@@ -111,18 +111,18 @@ static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI,
111
111
assert (IsRV64 && " Can't emit >32-bit imm for non-RV64 target" );
112
112
113
113
// In the worst case, for a full 64-bit constant, a sequence of 8 instructions
114
- // (i.e., LUI+ADDIW +SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note
115
- // that the first two instructions (LUI+ADDIW ) can contribute up to 32 bits
114
+ // (i.e., LUI+ADDI +SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emitted. Note
115
+ // that the first two instructions (LUI+ADDI ) can contribute up to 32 bits
116
116
// while the following ADDI instructions contribute up to 12 bits each.
117
117
//
118
118
// On the first glance, implementing this seems to be possible by simply
119
- // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
120
- // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
121
- // fact that ADDI performs a sign extended addition, doing it like that would
122
- // only be possible when at most 11 bits of the ADDI instructions are used.
123
- // Using all 12 bits of the ADDI instructions, like done by GAS, actually
124
- // requires that the constant is processed starting with the least significant
125
- // bit.
119
+ // emitting the most significant 32 bits (LUI+ADDI(W)) followed by as many
120
+ // left shift (SLLI) and immediate additions (ADDI) as needed. However, due to
121
+ // the fact that ADDI performs a sign extended addition, doing it like that
122
+ // would only be possible when at most 11 bits of the ADDI instructions are
123
+ // used. Using all 12 bits of the ADDI instructions, like done by GAS,
124
+ // actually requires that the constant is processed starting with the least
125
+ // significant bit.
126
126
//
127
127
// In the following, constants are processed from LSB to MSB but instruction
128
128
// emission is performed from MSB to LSB by recursively calling
@@ -145,7 +145,7 @@ static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI,
145
145
Val >>= ShiftAmount;
146
146
147
147
// If the remaining bits don't fit in 12 bits, we might be able to reduce
148
- // the // shift amount in order to use LUI which will zero the lower 12
148
+ // the shift amount in order to use LUI which will zero the lower 12
149
149
// bits.
150
150
if (ShiftAmount > 12 && !isInt<12 >(Val)) {
151
151
if (isInt<32 >((uint64_t )Val << 12 )) {
@@ -344,8 +344,9 @@ InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI) {
344
344
345
345
// Perform optimization with BSETI in the Zbs extension.
346
346
if (Res.size () > 2 && STI.hasFeature (RISCV::FeatureStdExtZbs)) {
347
- // Create a simm32 value for LUI+ADDIW by forcing the upper 33 bits to zero.
348
- // Xor that with original value to get which bits should be set by BSETI.
347
+ // Create a simm32 value for LUI+ADDI(W) by forcing the upper 33 bits to
348
+ // zero. Xor that with original value to get which bits should be set by
349
+ // BSETI.
349
350
uint64_t Lo = Val & 0x7fffffff ;
350
351
uint64_t Hi = Val ^ Lo;
351
352
assert (Hi != 0 );
@@ -372,8 +373,8 @@ InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI) {
372
373
373
374
// Perform optimization with BCLRI in the Zbs extension.
374
375
if (Res.size () > 2 && STI.hasFeature (RISCV::FeatureStdExtZbs)) {
375
- // Create a simm32 value for LUI+ADDIW by forcing the upper 33 bits to one.
376
- // Xor that with original value to get which bits should be cleared by
376
+ // Create a simm32 value for LUI+ADDI(W) by forcing the upper 33 bits to
377
+ // one. Xor that with original value to get which bits should be cleared by
377
378
// BCLRI.
378
379
uint64_t Lo = Val | 0xffffffff80000000 ;
379
380
uint64_t Hi = Val ^ Lo;
0 commit comments