@@ -759,6 +759,16 @@ fn resolveInst(cg: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
759
759
return result ;
760
760
}
761
761
762
+ fn resolveValue (cg : * CodeGen , val : Value ) InnerError ! WValue {
763
+ const zcu = cg .pt .zcu ;
764
+ const ty = val .typeOf (zcu );
765
+
766
+ return if (isByRef (ty , zcu , cg .target ))
767
+ .{ .uav_ref = .{ .ip_index = val .toIntern () } }
768
+ else
769
+ try cg .lowerConstant (val , ty );
770
+ }
771
+
762
772
/// NOTE: if result == .stack, it will be stored in .local
763
773
fn finishAir (cg : * CodeGen , inst : Air.Inst.Index , result : WValue , operands : []const Air.Inst.Ref ) InnerError ! void {
764
774
assert (operands .len <= Liveness .bpi - 1 );
@@ -2319,39 +2329,56 @@ fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
2319
2329
} else {
2320
2330
// at this point we have a non-natural alignment, we must
2321
2331
// load the value, and then shift+or the rhs into the result location.
2322
- const int_elem_ty = try pt .intType (.unsigned , ptr_info .packed_offset .host_size * 8 );
2332
+ const host_size = ptr_info .packed_offset .host_size * 8 ;
2333
+ const host_ty = try pt .intType (.unsigned , host_size );
2334
+ const bit_size : u16 = @intCast (ty .bitSize (zcu ));
2335
+ const bit_offset = ptr_info .packed_offset .bit_offset ;
2336
+
2337
+ const mask_val = try cg .resolveValue (val : {
2338
+ const limbs = try cg .gpa .alloc (
2339
+ std .math .big .Limb ,
2340
+ std .math .big .int .calcTwosCompLimbCount (host_size ) + 1 ,
2341
+ );
2342
+ defer cg .gpa .free (limbs );
2323
2343
2324
- if (isByRef (int_elem_ty , zcu , cg .target )) {
2325
- return cg .fail ("TODO: airStore for pointers to bitfields with backing type larger than 64bits" , .{});
2326
- }
2344
+ var mask_bigint : std.math.big.int.Mutable = .{ .limbs = limbs , .positive = undefined , .len = undefined };
2345
+ mask_bigint .setTwosCompIntLimit (.max , .unsigned , host_size );
2327
2346
2328
- var mask = @as (u64 , @intCast ((@as (u65 , 1 ) << @as (u7 , @intCast (ty .bitSize (zcu )))) - 1 ));
2329
- mask <<= @as (u6 , @intCast (ptr_info .packed_offset .bit_offset ));
2330
- mask ^= ~ @as (u64 , 0 );
2331
- const shift_val : WValue = if (ptr_info .packed_offset .host_size <= 4 )
2332
- .{ .imm32 = ptr_info .packed_offset .bit_offset }
2347
+ if (bit_size != host_size ) {
2348
+ mask_bigint .shiftRight (mask_bigint .toConst (), host_size - bit_size );
2349
+ }
2350
+ if (bit_offset != 0 ) {
2351
+ mask_bigint .shiftLeft (mask_bigint .toConst (), bit_offset );
2352
+ }
2353
+ mask_bigint .bitNotWrap (mask_bigint .toConst (), .unsigned , host_size );
2354
+
2355
+ break :val try pt .intValue_big (host_ty , mask_bigint .toConst ());
2356
+ });
2357
+
2358
+ const shift_val : WValue = if (33 <= host_size and host_size <= 64 )
2359
+ .{ .imm64 = bit_offset }
2333
2360
else
2334
- .{ .imm64 = ptr_info .packed_offset .bit_offset };
2335
- const mask_val : WValue = if (ptr_info .packed_offset .host_size <= 4 )
2336
- .{ .imm32 = @as (u32 , @truncate (mask )) }
2361
+ .{ .imm32 = bit_offset };
2362
+
2363
+ if (host_size <= 64 ) {
2364
+ try cg .emitWValue (lhs );
2365
+ }
2366
+ const loaded = if (host_size <= 64 )
2367
+ try cg .load (lhs , host_ty , 0 )
2337
2368
else
2338
- .{ .imm64 = mask };
2339
- const wrap_mask_val : WValue = if (ptr_info .packed_offset .host_size <= 4 )
2340
- .{ .imm32 = @truncate (~ @as (u64 , 0 ) >> @intCast (64 - ty .bitSize (zcu ))) }
2369
+ lhs ;
2370
+ const anded = try cg .binOp (loaded , mask_val , host_ty , .@"and" );
2371
+ const extended_value = try cg .intcast (rhs , ty , host_ty );
2372
+ const shifted_value = if (bit_offset > 0 )
2373
+ try cg .binOp (extended_value , shift_val , host_ty , .shl )
2341
2374
else
2342
- .{ .imm64 = ~ @as (u64 , 0 ) >> @intCast (64 - ty .bitSize (zcu )) };
2343
-
2344
- try cg .emitWValue (lhs );
2345
- const loaded = try cg .load (lhs , int_elem_ty , 0 );
2346
- const anded = try cg .binOp (loaded , mask_val , int_elem_ty , .@"and" );
2347
- const extended_value = try cg .intcast (rhs , ty , int_elem_ty );
2348
- const masked_value = try cg .binOp (extended_value , wrap_mask_val , int_elem_ty , .@"and" );
2349
- const shifted_value = if (ptr_info .packed_offset .bit_offset > 0 ) shifted : {
2350
- break :shifted try cg .binOp (masked_value , shift_val , int_elem_ty , .shl );
2351
- } else masked_value ;
2352
- const result = try cg .binOp (anded , shifted_value , int_elem_ty , .@"or" );
2353
- // lhs is still on the stack
2354
- try cg .store (.stack , result , int_elem_ty , lhs .offset ());
2375
+ extended_value ;
2376
+ const result = try cg .binOp (anded , shifted_value , host_ty , .@"or" );
2377
+ if (host_size <= 64 ) {
2378
+ try cg .store (.stack , result , host_ty , lhs .offset ());
2379
+ } else {
2380
+ try cg .store (lhs , result , host_ty , lhs .offset ());
2381
+ }
2355
2382
}
2356
2383
2357
2384
return cg .finishAir (inst , .none , &.{ bin_op .lhs , bin_op .rhs });
@@ -2494,22 +2521,30 @@ fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
2494
2521
}
2495
2522
2496
2523
if (ptr_info .packed_offset .host_size == 0 ) {
2497
- break :result try cg .load (operand , ty , 0 );
2498
- }
2499
-
2500
- // at this point we have a non-natural alignment, we must
2501
- // shift the value to obtain the correct bit.
2502
- const int_elem_ty = try pt .intType (.unsigned , ptr_info .packed_offset .host_size * 8 );
2503
- const shift_val : WValue = if (ptr_info .packed_offset .host_size <= 4 )
2504
- .{ .imm32 = ptr_info .packed_offset .bit_offset }
2505
- else if (ptr_info .packed_offset .host_size <= 8 )
2506
- .{ .imm64 = ptr_info .packed_offset .bit_offset }
2507
- else
2508
- return cg .fail ("TODO: airLoad where ptr to bitfield exceeds 64 bits" , .{});
2524
+ const loaded = try cg .load (operand , ty , 0 );
2525
+ const ty_size = ty .abiSize (zcu );
2526
+ if (ty .isAbiInt (zcu ) and ty_size * 8 > ty .bitSize (zcu )) {
2527
+ const int_elem_ty = try pt .intType (.unsigned , @intCast (ty_size * 8 ));
2528
+ break :result try cg .trunc (loaded , ty , int_elem_ty );
2529
+ } else {
2530
+ break :result loaded ;
2531
+ }
2532
+ } else {
2533
+ const int_elem_ty = try pt .intType (.unsigned , ptr_info .packed_offset .host_size * 8 );
2534
+ const shift_val : WValue = if (ptr_info .packed_offset .host_size <= 4 )
2535
+ .{ .imm32 = ptr_info .packed_offset .bit_offset }
2536
+ else if (ptr_info .packed_offset .host_size <= 8 )
2537
+ .{ .imm64 = ptr_info .packed_offset .bit_offset }
2538
+ else
2539
+ .{ .imm32 = ptr_info .packed_offset .bit_offset };
2509
2540
2510
- const stack_loaded = try cg .load (operand , int_elem_ty , 0 );
2511
- const shifted = try cg .binOp (stack_loaded , shift_val , int_elem_ty , .shr );
2512
- break :result try cg .trunc (shifted , ty , int_elem_ty );
2541
+ const stack_loaded = if (ptr_info .packed_offset .host_size <= 8 )
2542
+ try cg .load (operand , int_elem_ty , 0 )
2543
+ else
2544
+ operand ;
2545
+ const shifted = try cg .binOp (stack_loaded , shift_val , int_elem_ty , .shr );
2546
+ break :result try cg .trunc (shifted , ty , int_elem_ty );
2547
+ }
2513
2548
};
2514
2549
return cg .finishAir (inst , result , &.{ty_op .operand });
2515
2550
}
@@ -3857,15 +3892,12 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
3857
3892
const packed_struct = zcu .typeToPackedStruct (struct_ty ).? ;
3858
3893
const offset = pt .structPackedFieldBitOffset (packed_struct , field_index );
3859
3894
const backing_ty = Type .fromInterned (packed_struct .backingIntTypeUnordered (ip ));
3860
- const wasm_bits = toWasmBits (backing_ty .intInfo (zcu ).bits ) orelse {
3861
- return cg .fail ("TODO: airStructFieldVal for packed structs larger than 128 bits" , .{});
3862
- };
3863
- const const_wvalue : WValue = if (wasm_bits == 32 )
3864
- .{ .imm32 = offset }
3865
- else if (wasm_bits == 64 )
3895
+ const host_bits = backing_ty .intInfo (zcu ).bits ;
3896
+
3897
+ const const_wvalue : WValue = if (33 <= host_bits and host_bits <= 64 )
3866
3898
.{ .imm64 = offset }
3867
3899
else
3868
- return cg . fail ( "TODO: airStructFieldVal for packed structs larger than 64 bits" , .{}) ;
3900
+ .{ . imm32 = offset } ;
3869
3901
3870
3902
// for first field we don't require any shifting
3871
3903
const shifted_value = if (offset == 0 )
@@ -4043,7 +4075,7 @@ fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Inner
4043
4075
if (use_br_table ) {
4044
4076
const width = width_maybe .? ;
4045
4077
4046
- const br_value_original = try cg .binOp (target , try cg .resolveInst ( Air . internedToRef ( min .? . toIntern ()) ), target_ty , .sub );
4078
+ const br_value_original = try cg .binOp (target , try cg .resolveValue ( min .? ), target_ty , .sub );
4047
4079
_ = try cg .intcast (br_value_original , target_ty , Type .u32 );
4048
4080
4049
4081
const jump_table : Mir.JumpTable = .{ .length = width + 1 };
@@ -5232,7 +5264,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
5232
5264
}
5233
5265
}
5234
5266
if (sentinel ) | s | {
5235
- const val = try cg .resolveInst ( Air . internedToRef ( s . toIntern ()) );
5267
+ const val = try cg .resolveValue ( s );
5236
5268
try cg .store (offset , val , elem_ty , 0 );
5237
5269
}
5238
5270
} else {
@@ -5243,7 +5275,7 @@ fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
5243
5275
offset += elem_size ;
5244
5276
}
5245
5277
if (sentinel ) | s | {
5246
- const val = try cg .resolveInst ( Air . internedToRef ( s . toIntern ()) );
5278
+ const val = try cg .resolveValue ( s );
5247
5279
try cg .store (result , val , elem_ty , offset );
5248
5280
}
5249
5281
}
0 commit comments