9
9
10
10
static inline void local_flush_tlb_all_asid (unsigned long asid )
11
11
{
12
- __asm__ __volatile__ ("sfence.vma x0, %0"
13
- :
14
- : "r" (asid )
15
- : "memory" );
12
+ if (asid != FLUSH_TLB_NO_ASID )
13
+ __asm__ __volatile__ ("sfence.vma x0, %0"
14
+ :
15
+ : "r" (asid )
16
+ : "memory" );
17
+ else
18
+ local_flush_tlb_all ();
16
19
}
17
20
18
21
static inline void local_flush_tlb_page_asid (unsigned long addr ,
19
22
unsigned long asid )
20
23
{
21
- __asm__ __volatile__ ("sfence.vma %0, %1"
22
- :
23
- : "r" (addr ), "r" (asid )
24
- : "memory" );
24
+ if (asid != FLUSH_TLB_NO_ASID )
25
+ __asm__ __volatile__ ("sfence.vma %0, %1"
26
+ :
27
+ : "r" (addr ), "r" (asid )
28
+ : "memory" );
29
+ else
30
+ local_flush_tlb_page (addr );
25
31
}
26
32
27
- static inline void local_flush_tlb_range (unsigned long start ,
28
- unsigned long size , unsigned long stride )
33
+ /*
34
+ * Flush entire TLB if number of entries to be flushed is greater
35
+ * than the threshold below.
36
+ */
37
+ static unsigned long tlb_flush_all_threshold __read_mostly = 64 ;
38
+
39
+ static void local_flush_tlb_range_threshold_asid (unsigned long start ,
40
+ unsigned long size ,
41
+ unsigned long stride ,
42
+ unsigned long asid )
29
43
{
30
- if (size <= stride )
31
- local_flush_tlb_page (start );
32
- else
33
- local_flush_tlb_all ();
44
+ unsigned long nr_ptes_in_range = DIV_ROUND_UP (size , stride );
45
+ int i ;
46
+
47
+ if (nr_ptes_in_range > tlb_flush_all_threshold ) {
48
+ local_flush_tlb_all_asid (asid );
49
+ return ;
50
+ }
51
+
52
+ for (i = 0 ; i < nr_ptes_in_range ; ++ i ) {
53
+ local_flush_tlb_page_asid (start , asid );
54
+ start += stride ;
55
+ }
34
56
}
35
57
36
58
static inline void local_flush_tlb_range_asid (unsigned long start ,
37
59
unsigned long size , unsigned long stride , unsigned long asid )
38
60
{
39
61
if (size <= stride )
40
62
local_flush_tlb_page_asid (start , asid );
41
- else
63
+ else if ( size == FLUSH_TLB_MAX_SIZE )
42
64
local_flush_tlb_all_asid (asid );
65
+ else
66
+ local_flush_tlb_range_threshold_asid (start , size , stride , asid );
43
67
}
44
68
45
69
static void __ipi_flush_tlb_all (void * info )
@@ -52,7 +76,7 @@ void flush_tlb_all(void)
52
76
if (riscv_use_ipi_for_rfence ())
53
77
on_each_cpu (__ipi_flush_tlb_all , NULL , 1 );
54
78
else
55
- sbi_remote_sfence_vma (NULL , 0 , -1 );
79
+ sbi_remote_sfence_vma_asid (NULL , 0 , FLUSH_TLB_MAX_SIZE , FLUSH_TLB_NO_ASID );
56
80
}
57
81
58
82
struct flush_tlb_range_data {
@@ -69,18 +93,12 @@ static void __ipi_flush_tlb_range_asid(void *info)
69
93
local_flush_tlb_range_asid (d -> start , d -> size , d -> stride , d -> asid );
70
94
}
71
95
72
- static void __ipi_flush_tlb_range (void * info )
73
- {
74
- struct flush_tlb_range_data * d = info ;
75
-
76
- local_flush_tlb_range (d -> start , d -> size , d -> stride );
77
- }
78
-
79
96
static void __flush_tlb_range (struct mm_struct * mm , unsigned long start ,
80
97
unsigned long size , unsigned long stride )
81
98
{
82
99
struct flush_tlb_range_data ftd ;
83
100
struct cpumask * cmask = mm_cpumask (mm );
101
+ unsigned long asid = FLUSH_TLB_NO_ASID ;
84
102
unsigned int cpuid ;
85
103
bool broadcast ;
86
104
@@ -90,47 +108,32 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
90
108
cpuid = get_cpu ();
91
109
/* check if the tlbflush needs to be sent to other CPUs */
92
110
broadcast = cpumask_any_but (cmask , cpuid ) < nr_cpu_ids ;
93
- if (static_branch_unlikely (& use_asid_allocator )) {
94
- unsigned long asid = atomic_long_read (& mm -> context .id ) & asid_mask ;
95
-
96
- if (broadcast ) {
97
- if (riscv_use_ipi_for_rfence ()) {
98
- ftd .asid = asid ;
99
- ftd .start = start ;
100
- ftd .size = size ;
101
- ftd .stride = stride ;
102
- on_each_cpu_mask (cmask ,
103
- __ipi_flush_tlb_range_asid ,
104
- & ftd , 1 );
105
- } else
106
- sbi_remote_sfence_vma_asid (cmask ,
107
- start , size , asid );
108
- } else {
109
- local_flush_tlb_range_asid (start , size , stride , asid );
110
- }
111
+
112
+ if (static_branch_unlikely (& use_asid_allocator ))
113
+ asid = atomic_long_read (& mm -> context .id ) & asid_mask ;
114
+
115
+ if (broadcast ) {
116
+ if (riscv_use_ipi_for_rfence ()) {
117
+ ftd .asid = asid ;
118
+ ftd .start = start ;
119
+ ftd .size = size ;
120
+ ftd .stride = stride ;
121
+ on_each_cpu_mask (cmask ,
122
+ __ipi_flush_tlb_range_asid ,
123
+ & ftd , 1 );
124
+ } else
125
+ sbi_remote_sfence_vma_asid (cmask ,
126
+ start , size , asid );
111
127
} else {
112
- if (broadcast ) {
113
- if (riscv_use_ipi_for_rfence ()) {
114
- ftd .asid = 0 ;
115
- ftd .start = start ;
116
- ftd .size = size ;
117
- ftd .stride = stride ;
118
- on_each_cpu_mask (cmask ,
119
- __ipi_flush_tlb_range ,
120
- & ftd , 1 );
121
- } else
122
- sbi_remote_sfence_vma (cmask , start , size );
123
- } else {
124
- local_flush_tlb_range (start , size , stride );
125
- }
128
+ local_flush_tlb_range_asid (start , size , stride , asid );
126
129
}
127
130
128
131
put_cpu ();
129
132
}
130
133
131
134
void flush_tlb_mm (struct mm_struct * mm )
132
135
{
133
- __flush_tlb_range (mm , 0 , -1 , PAGE_SIZE );
136
+ __flush_tlb_range (mm , 0 , FLUSH_TLB_MAX_SIZE , PAGE_SIZE );
134
137
}
135
138
136
139
void flush_tlb_mm_range (struct mm_struct * mm ,
0 commit comments