@@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove)
26
26
*/
27
27
28
28
/* Return if nothing to do */
29
- beq a0 , a1 , return_from_memmove
30
- beqz a2 , return_from_memmove
29
+ beq a0 , a1 , .Lreturn_from_memmove
30
+ beqz a2 , .Lreturn_from_memmove
31
31
32
32
/*
33
33
* Register Uses
@@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove)
60
60
* small enough not to bother.
61
61
*/
62
62
andi t0, a2 , -(2 * SZREG)
63
- beqz t0, byte_copy
63
+ beqz t0, .Lbyte_copy
64
64
65
65
/*
66
66
* Now solve for t5 and t6.
@@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove)
87
87
*/
88
88
xor t0, a0 , a1
89
89
andi t1, t0, (SZREG - 1 )
90
- beqz t1, coaligned_copy
90
+ beqz t1, .Lcoaligned_copy
91
91
/* Fall through to misaligned fixup copy */
92
92
93
- misaligned_fixup_copy :
94
- bltu a1 , a0 , misaligned_fixup_copy_reverse
93
+ .Lmisaligned_fixup_copy :
94
+ bltu a1 , a0 , .Lmisaligned_fixup_copy_reverse
95
95
96
- misaligned_fixup_copy_forward :
97
- jal t0, byte_copy_until_aligned_forward
96
+ .Lmisaligned_fixup_copy_forward :
97
+ jal t0, .Lbyte_copy_until_aligned_forward
98
98
99
99
andi a5 , a1 , (SZREG - 1 ) /* Find the alignment offset of src (a1) */
100
100
slli a6 , a5 , 3 /* Multiply by 8 to convert that to bits to shift */
@@ -153,10 +153,10 @@ misaligned_fixup_copy_forward:
153
153
mv t3, t6 /* Fix the dest pointer in case the loop was broken */
154
154
155
155
add a1 , t3, a5 /* Restore the src pointer */
156
- j byte_copy_forward /* Copy any remaining bytes */
156
+ j .Lbyte_copy_forward /* Copy any remaining bytes */
157
157
158
- misaligned_fixup_copy_reverse :
159
- jal t0, byte_copy_until_aligned_reverse
158
+ .Lmisaligned_fixup_copy_reverse :
159
+ jal t0, .Lbyte_copy_until_aligned_reverse
160
160
161
161
andi a5 , a4 , (SZREG - 1 ) /* Find the alignment offset of src (a4) */
162
162
slli a6 , a5 , 3 /* Multiply by 8 to convert that to bits to shift */
@@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse:
215
215
mv t4, t5 /* Fix the dest pointer in case the loop was broken */
216
216
217
217
add a4 , t4, a5 /* Restore the src pointer */
218
- j byte_copy_reverse /* Copy any remaining bytes */
218
+ j .Lbyte_copy_reverse /* Copy any remaining bytes */
219
219
220
220
/*
221
221
* Simple copy loops for SZREG co-aligned memory locations.
222
222
* These also make calls to do byte copies for any unaligned
223
223
* data at their terminations.
224
224
*/
225
- coaligned_copy :
226
- bltu a1 , a0 , coaligned_copy_reverse
225
+ .Lcoaligned_copy :
226
+ bltu a1 , a0 , .Lcoaligned_copy_reverse
227
227
228
- coaligned_copy_forward :
229
- jal t0, byte_copy_until_aligned_forward
228
+ .Lcoaligned_copy_forward :
229
+ jal t0, .Lbyte_copy_until_aligned_forward
230
230
231
231
1 :
232
232
REG_L t1, ( 0 * SZREG)(a1)
@@ -235,10 +235,10 @@ coaligned_copy_forward:
235
235
REG_S t1, (-1 * SZREG)(t3)
236
236
bne t3 , t6, 1b
237
237
238
- j byte_copy_forward /* Copy any remaining bytes */
238
+ j .Lbyte_copy_forward /* Copy any remaining bytes */
239
239
240
- coaligned_copy_reverse :
241
- jal t0, byte_copy_until_aligned_reverse
240
+ .Lcoaligned_copy_reverse :
241
+ jal t0, .Lbyte_copy_until_aligned_reverse
242
242
243
243
1 :
244
244
REG_L t1, (-1 * SZREG)(a4)
@@ -247,7 +247,7 @@ coaligned_copy_reverse:
247
247
REG_S t1, ( 0 * SZREG)(t4)
248
248
bne t4 , t5, 1b
249
249
250
- j byte_copy_reverse /* Copy any remaining bytes */
250
+ j .Lbyte_copy_reverse /* Copy any remaining bytes */
251
251
252
252
/*
253
253
* These are basically sub-functions within the function. They
@@ -258,7 +258,7 @@ coaligned_copy_reverse:
258
258
* up from where they were left and we avoid code duplication
259
259
* without any overhead except the call in and return jumps.
260
260
*/
261
- byte_copy_until_aligned_forward :
261
+ .Lbyte_copy_until_aligned_forward :
262
262
beq t3 , t5, 2f
263
263
1 :
264
264
lb t1, 0 (a1 )
@@ -269,7 +269,7 @@ byte_copy_until_aligned_forward:
269
269
2 :
270
270
jalr zero, 0x0(t0) /* Return to multibyte copy loop */
271
271
272
- byte_copy_until_aligned_reverse :
272
+ .Lbyte_copy_until_aligned_reverse :
273
273
beq t4 , t6, 2f
274
274
1 :
275
275
lb t1, -1 (a4 )
@@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse:
285
285
* These will byte copy until they reach the end of data to copy.
286
286
* At that point, they will call to return from memmove.
287
287
*/
288
- byte_copy :
289
- bltu a1 , a0 , byte_copy_reverse
288
+ .Lbyte_copy :
289
+ bltu a1 , a0 , .Lbyte_copy_reverse
290
290
291
- byte_copy_forward :
291
+ .Lbyte_copy_forward :
292
292
beq t3 , t4, 2f
293
293
1 :
294
294
lb t1, 0 (a1 )
@@ -299,7 +299,7 @@ byte_copy_forward:
299
299
2 :
300
300
ret
301
301
302
- byte_copy_reverse :
302
+ .Lbyte_copy_reverse :
303
303
beq t4 , t3, 2f
304
304
1 :
305
305
lb t1, -1 (a4 )
@@ -309,7 +309,7 @@ byte_copy_reverse:
309
309
bne t4 , t3, 1b
310
310
2 :
311
311
312
- return_from_memmove :
312
+ .Lreturn_from_memmove :
313
313
ret
314
314
315
315
SYM_FUNC_END (memmove)
0 commit comments