@@ -169,28 +169,26 @@ _user_exception:
169
169
170
170
/ * Save only live registers. * /
171
171
172
- UABI_W _bbsi.l a2 , 1 , 1f
172
+ UABI_W _bbsi.l a2 , 1 , .Lsave_window_registers
173
173
s32i a4 , a1 , PT_AREG4
174
174
s32i a5 , a1 , PT_AREG5
175
175
s32i a6 , a1 , PT_AREG6
176
176
s32i a7 , a1 , PT_AREG7
177
- UABI_W _bbsi.l a2 , 2 , 1f
177
+ UABI_W _bbsi.l a2 , 2 , .Lsave_window_registers
178
178
s32i a8 , a1 , PT_AREG8
179
179
s32i a9 , a1 , PT_AREG9
180
180
s32i a10 , a1 , PT_AREG10
181
181
s32i a11 , a1 , PT_AREG11
182
- UABI_W _bbsi.l a2 , 3 , 1f
182
+ UABI_W _bbsi.l a2 , 3 , .Lsave_window_registers
183
183
s32i a12 , a1 , PT_AREG12
184
184
s32i a13 , a1 , PT_AREG13
185
185
s32i a14 , a1 , PT_AREG14
186
186
s32i a15 , a1 , PT_AREG15
187
187
188
188
#if defined(USER_SUPPORT_WINDOWED)
189
- _bnei a2 , 1 , 1f # only one valid frame?
189
+ / * If only one valid frame skip saving regs. * /
190
190
191
- / * Only one valid frame , skip saving regs. * /
192
-
193
- j 2f
191
+ beqi a2 , 1 , common_exception
194
192
195
193
/ * Save the remaining registers.
196
194
* We have to save all registers up to the first '1' from
@@ -199,8 +197,8 @@ UABI_W _bbsi.l a2, 3, 1f
199
197
* All register frames starting from the top field to the marked '1'
200
198
* must be saved.
201
199
* /
202
-
203
- 1 : addi a3, a2 , - 1 # eliminate '1' in bit 0 : yyyyxxww0
200
+ .Lsave_window_registers:
201
+ addi a3 , a2 , - 1 # eliminate '1' in bit 0 : yyyyxxww0
204
202
neg a3 , a3 # yyyyxxww0 - > YYYYXXWW1 + 1
205
203
and a3 , a3 , a2 # max. only one bit is set
206
204
@@ -241,7 +239,7 @@ UABI_W _bbsi.l a2, 3, 1f
241
239
242
240
/ * We are back to the original stack pointer (a1) * /
243
241
#endif
244
- 2 : /* Now , jump to the common exception handler. * /
242
+ / * Now , jump to the common exception handler. * /
245
243
246
244
j common_exception
247
245
@@ -795,7 +793,7 @@ ENDPROC(kernel_exception)
795
793
ENTRY(debug_exception)
796
794
797
795
rsr a0 , SREG_EPS + XCHAL_DEBUGLEVEL
798
- bbsi.l a0 , PS_EXCM_BIT , 1f # exception mode
796
+ bbsi.l a0 , PS_EXCM_BIT , .Ldebug_exception_in_exception # exception mode
799
797
800
798
/ * Set EPC1 and EXCCAUSE * /
801
799
@@ -814,10 +812,10 @@ ENTRY(debug_exception)
814
812
815
813
/ * Switch to kernel/user stack , restore jump vector , and save a0 * /
816
814
817
- bbsi.l a2 , PS_UM_BIT , 2f # jump if user mode
818
-
815
+ bbsi.l a2 , PS_UM_BIT , .Ldebug_exception_user # jump if user mode
819
816
addi a2 , a1 , - 16 - PT_KERNEL_SIZE # assume kernel stack
820
- 3 :
817
+
818
+ .Ldebug_exception_continue:
821
819
l32i a0 , a3 , DT_DEBUG_SAVE
822
820
s32i a1 , a2 , PT_AREG1
823
821
s32i a0 , a2 , PT_AREG0
@@ -845,19 +843,21 @@ ENTRY(debug_exception)
845
843
bbsi.l a2 , PS_UM_BIT , _user_exception
846
844
j _kernel_exception
847
845
848
- 2 : rsr a2 , excsave1
846
+ .Ldebug_exception_user:
847
+ rsr a2 , excsave1
849
848
l32i a2 , a2 , EXC_TABLE_KSTK # load kernel stack pointer
850
- j 3b
849
+ j .Ldebug_exception_continue
851
850
851
+ .Ldebug_exception_in_exception:
852
852
#ifdef CONFIG_HAVE_HW_BREAKPOINT
853
853
/ * Debug exception while in exception mode. This may happen when
854
854
* window overflow/underflow handler or fast exception handler hits
855
855
* data breakpoint , in which case save and disable all data
856
856
* breakpoints , single - step faulting instruction and restore data
857
857
* breakpoints.
858
858
* /
859
- 1 :
860
- bbci.l a0 , PS_UM_BIT , 1b # jump if kernel mode
859
+
860
+ bbci.l a0 , PS_UM_BIT , .Ldebug_exception_in_exception # jump if kernel mode
861
861
862
862
rsr a0 , debugcause
863
863
bbsi.l a0 , DEBUGCAUSE_DBREAK_BIT , .Ldebug_save_dbreak
@@ -901,7 +901,7 @@ ENTRY(debug_exception)
901
901
rfi XCHAL_DEBUGLEVEL
902
902
#else
903
903
/ * Debug exception while in exception mode. Should not happen. * /
904
- 1 : j 1b // FIXME!!
904
+ j .Ldebug_exception_in_exception // FIXME!!
905
905
#endif
906
906
907
907
ENDPROC(debug_exception)
@@ -1630,12 +1630,13 @@ ENTRY(fast_second_level_miss)
1630
1630
1631
1631
GET_CURRENT(a1 , a2)
1632
1632
l32i a0 , a1 , TASK_MM # tsk - >mm
1633
- beqz a0 , 9f
1633
+ beqz a0 , .Lfast_second_level_miss_no_mm
1634
1634
1635
- 8 : rsr a3 , excvaddr # fault address
1635
+ .Lfast_second_level_miss_continue:
1636
+ rsr a3 , excvaddr # fault address
1636
1637
_PGD_OFFSET(a0 , a3 , a1)
1637
1638
l32i a0 , a0 , 0 # read pmdval
1638
- beqz a0 , 2f
1639
+ beqz a0 , .Lfast_second_level_miss_no_pmd
1639
1640
1640
1641
/ * Read ptevaddr and convert to top of page - table page.
1641
1642
*
@@ -1678,12 +1679,13 @@ ENTRY(fast_second_level_miss)
1678
1679
addi a3 , a3 , DTLB_WAY_PGD
1679
1680
add a1 , a1 , a3 # ... + way_number
1680
1681
1681
- 3 : wdtlb a0 , a1
1682
+ .Lfast_second_level_miss_wdtlb:
1683
+ wdtlb a0 , a1
1682
1684
dsync
1683
1685
1684
1686
/ * Exit critical section. * /
1685
-
1686
- 4 : rsr a3, excsave1
1687
+ .Lfast_second_level_miss_skip_wdtlb:
1688
+ rsr a3 , excsave1
1687
1689
movi a0 , 0
1688
1690
s32i a0 , a3 , EXC_TABLE_FIXUP
1689
1691
@@ -1707,19 +1709,21 @@ ENTRY(fast_second_level_miss)
1707
1709
esync
1708
1710
rfde
1709
1711
1710
- 9 : l32i a0 , a1 , TASK_ACTIVE_MM # unlikely case mm == 0
1711
- bnez a0 , 8b
1712
+ .Lfast_second_level_miss_no_mm:
1713
+ l32i a0 , a1 , TASK_ACTIVE_MM # unlikely case mm == 0
1714
+ bnez a0 , .Lfast_second_level_miss_continue
1712
1715
1713
1716
/ * Even more unlikely case active_mm == 0 .
1714
1717
* We can get here with NMI in the middle of context_switch th at
1715
1718
* touches vmalloc area.
1716
1719
* /
1717
1720
movi a0 , init_mm
1718
- j 8b
1721
+ j .Lfast_second_level_miss_continue
1719
1722
1723
+ .Lfast_second_level_miss_no_pmd:
1720
1724
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1721
1725
1722
- 2 : /* Special case for cache aliasing.
1726
+ / * Special case for cache aliasing.
1723
1727
* We (should) only get here if a clear_user_page , copy_user_page
1724
1728
* or the aliased cache flush functions got preemptively interrupted
1725
1729
* by another task. Re - establish temporary mapping to the
@@ -1729,24 +1733,24 @@ ENTRY(fast_second_level_miss)
1729
1733
/ * We shouldn't be in a double exception * /
1730
1734
1731
1735
l32i a0 , a2 , PT_DEPC
1732
- bgeui a0 , VALID_DOUBLE_EXCEPTION_ADDRESS , 2f
1736
+ bgeui a0 , VALID_DOUBLE_EXCEPTION_ADDRESS , .Lfast_second_level_miss_slow
1733
1737
1734
1738
/ * Make sure the exception originated in the special functions * /
1735
1739
1736
1740
movi a0 , __tlbtemp_mapping_start
1737
1741
rsr a3 , epc1
1738
- bltu a3 , a0 , 2f
1742
+ bltu a3 , a0 , .Lfast_second_level_miss_slow
1739
1743
movi a0 , __tlbtemp_mapping_end
1740
- bgeu a3 , a0 , 2f
1744
+ bgeu a3 , a0 , .Lfast_second_level_miss_slow
1741
1745
1742
1746
/ * Check if excvaddr was in one of the TLBTEMP_BASE areas. * /
1743
1747
1744
1748
movi a3 , TLBTEMP_BASE_1
1745
1749
rsr a0 , excvaddr
1746
- bltu a0 , a3 , 2f
1750
+ bltu a0 , a3 , .Lfast_second_level_miss_slow
1747
1751
1748
1752
addi a1 , a0 , - TLBTEMP_SIZE
1749
- bgeu a1 , a3 , 2f
1753
+ bgeu a1 , a3 , .Lfast_second_level_miss_slow
1750
1754
1751
1755
/ * Check if we have to restore an ITLB mapping. * /
1752
1756
@@ -1772,19 +1776,19 @@ ENTRY(fast_second_level_miss)
1772
1776
1773
1777
mov a0 , a6
1774
1778
movnez a0 , a7 , a3
1775
- j 3b
1779
+ j .Lfast_second_level_miss_wdtlb
1776
1780
1777
1781
/ * ITLB entry. We only use dst in a6. * /
1778
1782
1779
1783
1 : witlb a6 , a1
1780
1784
isync
1781
- j 4b
1785
+ j .Lfast_second_level_miss_skip_wdtlb
1782
1786
1783
1787
1784
1788
#endif // DCACHE_WAY_SIZE > PAGE_SIZE
1785
1789
1786
-
1787
- 2 : / * Invalid PGD , default exception handling * /
1790
+ / * Invalid PGD , default exception handling * /
1791
+ .Lfast_second_level_miss_slow:
1788
1792
1789
1793
rsr a1 , depc
1790
1794
s32i a1 , a2 , PT_AREG2
@@ -1824,12 +1828,13 @@ ENTRY(fast_store_prohibited)
1824
1828
1825
1829
GET_CURRENT(a1 , a2)
1826
1830
l32i a0 , a1 , TASK_MM # tsk - >mm
1827
- beqz a0 , 9f
1831
+ beqz a0 , .Lfast_store_no_mm
1828
1832
1829
- 8 : rsr a1 , excvaddr # fault address
1833
+ .Lfast_store_continue:
1834
+ rsr a1 , excvaddr # fault address
1830
1835
_PGD_OFFSET(a0 , a1 , a3)
1831
1836
l32i a0 , a0 , 0
1832
- beqz a0 , 2f
1837
+ beqz a0 , .Lfast_store_slow
1833
1838
1834
1839
/ *
1835
1840
* Note th at we test _PAGE_WRITABLE_BIT only if PTE is present
@@ -1839,8 +1844,8 @@ ENTRY(fast_store_prohibited)
1839
1844
_PTE_OFFSET(a0 , a1 , a3)
1840
1845
l32i a3 , a0 , 0 # read pteval
1841
1846
movi a1 , _PAGE_CA_INVALID
1842
- ball a3 , a1 , 2f
1843
- bbci.l a3 , _PAGE_WRITABLE_BIT , 2f
1847
+ ball a3 , a1 , .Lfast_store_slow
1848
+ bbci.l a3 , _PAGE_WRITABLE_BIT , .Lfast_store_slow
1844
1849
1845
1850
movi a1 , _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1846
1851
or a3 , a3 , a1
@@ -1868,7 +1873,6 @@ ENTRY(fast_store_prohibited)
1868
1873
l32i a2 , a2 , PT_DEPC
1869
1874
1870
1875
bgeui a2 , VALID_DOUBLE_EXCEPTION_ADDRESS , 1f
1871
-
1872
1876
rsr a2 , depc
1873
1877
rfe
1874
1878
@@ -1878,10 +1882,12 @@ ENTRY(fast_store_prohibited)
1878
1882
esync
1879
1883
rfde
1880
1884
1881
- 9 : l32i a0 , a1 , TASK_ACTIVE_MM # unlikely case mm == 0
1882
- j 8b
1885
+ .Lfast_store_no_mm:
1886
+ l32i a0 , a1 , TASK_ACTIVE_MM # unlikely case mm == 0
1887
+ j .Lfast_store_continue
1883
1888
1884
- 2 : / * If there was a problem , handle fault in C * /
1889
+ / * If there was a problem , handle fault in C * /
1890
+ .Lfast_store_slow:
1885
1891
rsr a1 , excvaddr
1886
1892
pdtlb a0 , a1
1887
1893
bbci.l a0 , DTLB_HIT_BIT , 1f
0 commit comments