@@ -144,11 +144,6 @@ CPU_LE( vrev64.8 q12, q12 )
144
144
veor. 8 \dst_reg , \dst_reg , \src_reg
145
145
.endm
146
146
147
- .macro __adrl , out , sym
148
- movw \ out , #:lower16:\sym
149
- movt \ out , #:upper16:\sym
150
- .endm
151
-
152
147
//
153
148
// u16 crc_t10dif_pmull(u16 init_crc , const u8 * buf , size_t len) ;
154
149
//
@@ -160,7 +155,7 @@ ENTRY(crc_t10dif_pmull)
160
155
cmp len , # 256
161
156
blt .Lless_than_256_bytes
162
157
163
- __adrl fold_consts_ptr , .Lfold_across_128_bytes_consts
158
+ mov_l fold_consts_ptr , .Lfold_across_128_bytes_consts
164
159
165
160
// Load the first 128 data bytes. Byte swapping is necessary to make
166
161
// the bit order match the polynomial coefficient order.
@@ -262,7 +257,7 @@ CPU_LE( vrev64.8 q0, q0 )
262
257
vswp q0l , q0h
263
258
264
259
// q1 = high order part of second chunk: q7 left - shifted by 'len' bytes.
265
- __adrl r3 , .Lbyteshift_table + 16
260
+ mov_l r3 , .Lbyteshift_table + 16
266
261
sub r3 , r3 , len
267
262
vld1. 8 {q2} , [ r3 ]
268
263
vtbl. 8 q1l , {q7l - q7h} , q2l
@@ -324,7 +319,7 @@ CPU_LE( vrev64.8 q0, q0 )
324
319
.Lless_than_256_bytes:
325
320
// Checksumming a buffer of length 16 ... 255 bytes
326
321
327
- __adrl fold_consts_ptr , .Lfold_across_16_bytes_consts
322
+ mov_l fold_consts_ptr , .Lfold_across_16_bytes_consts
328
323
329
324
// Load the first 16 data bytes.
330
325
vld1. 64 {q7} , [ buf ] !
0 commit comments