Skip to content

Commit 33ad2c9

Browse files
committed
stage2-wasm: packed store/load 128 bits
1 parent 911f452 commit 33ad2c9

File tree

4 files changed

+85
-59
lines changed

4 files changed

+85
-59
lines changed

src/arch/wasm/CodeGen.zig

Lines changed: 85 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -759,6 +759,16 @@ fn resolveInst(cg: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
759759
return result;
760760
}
761761

762+
fn resolveValue(cg: *CodeGen, val: Value) InnerError!WValue {
763+
const zcu = cg.pt.zcu;
764+
const ty = val.typeOf(zcu);
765+
766+
return if (isByRef(ty, zcu, cg.target))
767+
.{ .uav_ref = .{ .ip_index = val.toIntern() } }
768+
else
769+
try cg.lowerConstant(val, ty);
770+
}
771+
762772
/// NOTE: if result == .stack, it will be stored in .local
763773
fn finishAir(cg: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void {
764774
assert(operands.len <= Liveness.bpi - 1);
@@ -2319,39 +2329,56 @@ fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
23192329
} else {
23202330
// at this point we have a non-natural alignment, we must
23212331
// load the value, and then shift+or the rhs into the result location.
2322-
const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
2332+
const host_size = ptr_info.packed_offset.host_size * 8;
2333+
const host_ty = try pt.intType(.unsigned, host_size);
2334+
const bit_size: u16 = @intCast(ty.bitSize(zcu));
2335+
const bit_offset = ptr_info.packed_offset.bit_offset;
2336+
2337+
const mask_val = try cg.resolveValue(val: {
2338+
const limbs = try cg.gpa.alloc(
2339+
std.math.big.Limb,
2340+
std.math.big.int.calcTwosCompLimbCount(host_size) + 1,
2341+
);
2342+
defer cg.gpa.free(limbs);
23232343

2324-
if (isByRef(int_elem_ty, zcu, cg.target)) {
2325-
return cg.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
2326-
}
2344+
var mask_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
2345+
mask_bigint.setTwosCompIntLimit(.max, .unsigned, host_size);
23272346

2328-
var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(zcu)))) - 1));
2329-
mask <<= @as(u6, @intCast(ptr_info.packed_offset.bit_offset));
2330-
mask ^= ~@as(u64, 0);
2331-
const shift_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
2332-
.{ .imm32 = ptr_info.packed_offset.bit_offset }
2347+
if (bit_size != host_size) {
2348+
mask_bigint.shiftRight(mask_bigint.toConst(), host_size - bit_size);
2349+
}
2350+
if (bit_offset != 0) {
2351+
mask_bigint.shiftLeft(mask_bigint.toConst(), bit_offset);
2352+
}
2353+
mask_bigint.bitNotWrap(mask_bigint.toConst(), .unsigned, host_size);
2354+
2355+
break :val try pt.intValue_big(host_ty, mask_bigint.toConst());
2356+
});
2357+
2358+
const shift_val: WValue = if (33 <= host_size and host_size <= 64)
2359+
.{ .imm64 = bit_offset }
23332360
else
2334-
.{ .imm64 = ptr_info.packed_offset.bit_offset };
2335-
const mask_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
2336-
.{ .imm32 = @as(u32, @truncate(mask)) }
2361+
.{ .imm32 = bit_offset };
2362+
2363+
if (host_size <= 64) {
2364+
try cg.emitWValue(lhs);
2365+
}
2366+
const loaded = if (host_size <= 64)
2367+
try cg.load(lhs, host_ty, 0)
23372368
else
2338-
.{ .imm64 = mask };
2339-
const wrap_mask_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
2340-
.{ .imm32 = @truncate(~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu))) }
2369+
lhs;
2370+
const anded = try cg.binOp(loaded, mask_val, host_ty, .@"and");
2371+
const extended_value = try cg.intcast(rhs, ty, host_ty);
2372+
const shifted_value = if (bit_offset > 0)
2373+
try cg.binOp(extended_value, shift_val, host_ty, .shl)
23412374
else
2342-
.{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu)) };
2343-
2344-
try cg.emitWValue(lhs);
2345-
const loaded = try cg.load(lhs, int_elem_ty, 0);
2346-
const anded = try cg.binOp(loaded, mask_val, int_elem_ty, .@"and");
2347-
const extended_value = try cg.intcast(rhs, ty, int_elem_ty);
2348-
const masked_value = try cg.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and");
2349-
const shifted_value = if (ptr_info.packed_offset.bit_offset > 0) shifted: {
2350-
break :shifted try cg.binOp(masked_value, shift_val, int_elem_ty, .shl);
2351-
} else masked_value;
2352-
const result = try cg.binOp(anded, shifted_value, int_elem_ty, .@"or");
2353-
// lhs is still on the stack
2354-
try cg.store(.stack, result, int_elem_ty, lhs.offset());
2375+
extended_value;
2376+
const result = try cg.binOp(anded, shifted_value, host_ty, .@"or");
2377+
if (host_size <= 64) {
2378+
try cg.store(.stack, result, host_ty, lhs.offset());
2379+
} else {
2380+
try cg.store(lhs, result, host_ty, lhs.offset());
2381+
}
23552382
}
23562383

23572384
return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
@@ -2494,22 +2521,30 @@ fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
24942521
}
24952522

24962523
if (ptr_info.packed_offset.host_size == 0) {
2497-
break :result try cg.load(operand, ty, 0);
2498-
}
2499-
2500-
// at this point we have a non-natural alignment, we must
2501-
// shift the value to obtain the correct bit.
2502-
const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
2503-
const shift_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
2504-
.{ .imm32 = ptr_info.packed_offset.bit_offset }
2505-
else if (ptr_info.packed_offset.host_size <= 8)
2506-
.{ .imm64 = ptr_info.packed_offset.bit_offset }
2507-
else
2508-
return cg.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{});
2524+
const loaded = try cg.load(operand, ty, 0);
2525+
const ty_size = ty.abiSize(zcu);
2526+
if (ty.isAbiInt(zcu) and ty_size * 8 > ty.bitSize(zcu)) {
2527+
const int_elem_ty = try pt.intType(.unsigned, @intCast(ty_size * 8));
2528+
break :result try cg.trunc(loaded, ty, int_elem_ty);
2529+
} else {
2530+
break :result loaded;
2531+
}
2532+
} else {
2533+
const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
2534+
const shift_val: WValue = if (ptr_info.packed_offset.host_size <= 4)
2535+
.{ .imm32 = ptr_info.packed_offset.bit_offset }
2536+
else if (ptr_info.packed_offset.host_size <= 8)
2537+
.{ .imm64 = ptr_info.packed_offset.bit_offset }
2538+
else
2539+
.{ .imm32 = ptr_info.packed_offset.bit_offset };
25092540

2510-
const stack_loaded = try cg.load(operand, int_elem_ty, 0);
2511-
const shifted = try cg.binOp(stack_loaded, shift_val, int_elem_ty, .shr);
2512-
break :result try cg.trunc(shifted, ty, int_elem_ty);
2541+
const stack_loaded = if (ptr_info.packed_offset.host_size <= 8)
2542+
try cg.load(operand, int_elem_ty, 0)
2543+
else
2544+
operand;
2545+
const shifted = try cg.binOp(stack_loaded, shift_val, int_elem_ty, .shr);
2546+
break :result try cg.trunc(shifted, ty, int_elem_ty);
2547+
}
25132548
};
25142549
return cg.finishAir(inst, result, &.{ty_op.operand});
25152550
}
@@ -3857,15 +3892,12 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
38573892
const packed_struct = zcu.typeToPackedStruct(struct_ty).?;
38583893
const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
38593894
const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
3860-
const wasm_bits = toWasmBits(backing_ty.intInfo(zcu).bits) orelse {
3861-
return cg.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
3862-
};
3863-
const const_wvalue: WValue = if (wasm_bits == 32)
3864-
.{ .imm32 = offset }
3865-
else if (wasm_bits == 64)
3895+
const host_bits = backing_ty.intInfo(zcu).bits;
3896+
3897+
const const_wvalue: WValue = if (33 <= host_bits and host_bits <= 64)
38663898
.{ .imm64 = offset }
38673899
else
3868-
return cg.fail("TODO: airStructFieldVal for packed structs larger than 64 bits", .{});
3900+
.{ .imm32 = offset };
38693901

38703902
// for first field we don't require any shifting
38713903
const shifted_value = if (offset == 0)
@@ -4043,7 +4075,7 @@ fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Inner
40434075
if (use_br_table) {
40444076
const width = width_maybe.?;
40454077

4046-
const br_value_original = try cg.binOp(target, try cg.resolveInst(Air.internedToRef(min.?.toIntern())), target_ty, .sub);
4078+
const br_value_original = try cg.binOp(target, try cg.resolveValue(min.?), target_ty, .sub);
40474079
_ = try cg.intcast(br_value_original, target_ty, Type.u32);
40484080

40494081
const jump_table: Mir.JumpTable = .{ .length = width + 1 };
@@ -5232,7 +5264,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
52325264
}
52335265
}
52345266
if (sentinel) |s| {
5235-
const val = try cg.resolveInst(Air.internedToRef(s.toIntern()));
5267+
const val = try cg.resolveValue(s);
52365268
try cg.store(offset, val, elem_ty, 0);
52375269
}
52385270
} else {
@@ -5243,7 +5275,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
52435275
offset += elem_size;
52445276
}
52455277
if (sentinel) |s| {
5246-
const val = try cg.resolveInst(Air.internedToRef(s.toIntern()));
5278+
const val = try cg.resolveValue(s);
52475279
try cg.store(result, val, elem_ty, offset);
52485280
}
52495281
}

test/behavior/bitcast.zig

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,6 @@ test "@bitCast of packed struct of bools all true" {
480480
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
481481
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
482482
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
483-
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
484483
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
485484

486485
const P = packed struct {
@@ -501,7 +500,6 @@ test "@bitCast of packed struct of bools all false" {
501500
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
502501
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
503502
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
504-
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
505503
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
506504

507505
const P = packed struct {

test/behavior/packed-struct.zig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1321,7 +1321,6 @@ test "packed struct with signed field" {
13211321
test "assign packed struct initialized with RLS to packed struct literal field" {
13221322
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) return error.SkipZigTest;
13231323
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
1324-
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
13251324
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
13261325

13271326
const Inner = packed struct { x: u17 };

test/behavior/struct.zig

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -421,9 +421,7 @@ const Foo96Bits = packed struct {
421421
test "packed struct 24bits" {
422422
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
423423
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
424-
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
425424
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
426-
if (builtin.cpu.arch.isWasm()) return error.SkipZigTest; // TODO
427425
if (builtin.cpu.arch.isArm()) return error.SkipZigTest; // TODO
428426
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
429427

@@ -763,7 +761,6 @@ const S0 = struct {
763761
var g_foo: S0 = S0.init();
764762

765763
test "packed struct with fp fields" {
766-
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
767764
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
768765
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
769766
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

0 commit comments

Comments
 (0)