13
13
#include <asm/pti.h>
14
14
#include <asm/processor-flags.h>
15
15
16
- /*
17
- * The x86 feature is called PCID (Process Context IDentifier). It is similar
18
- * to what is traditionally called ASID on the RISC processors.
19
- *
20
- * We don't use the traditional ASID implementation, where each process/mm gets
21
- * its own ASID and flush/restart when we run out of ASID space.
22
- *
23
- * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
24
- * that came by on this CPU, allowing cheaper switch_mm between processes on
25
- * this CPU.
26
- *
27
- * We end up with different spaces for different things. To avoid confusion we
28
- * use different names for each of them:
29
- *
30
- * ASID - [0, TLB_NR_DYN_ASIDS-1]
31
- * the canonical identifier for an mm
32
- *
33
- * kPCID - [1, TLB_NR_DYN_ASIDS]
34
- * the value we write into the PCID part of CR3; corresponds to the
35
- * ASID+1, because PCID 0 is special.
36
- *
37
- * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
38
- * for KPTI each mm has two address spaces and thus needs two
39
- * PCID values, but we can still do with a single ASID denomination
40
- * for each mm. Corresponds to kPCID + 2048.
41
- *
42
- */
43
-
44
- /* There are 12 bits of space for ASIDS in CR3 */
45
- #define CR3_HW_ASID_BITS 12
46
-
47
- /*
48
- * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
49
- * user/kernel switches
50
- */
51
- #ifdef CONFIG_PAGE_TABLE_ISOLATION
52
- # define PTI_CONSUMED_PCID_BITS 1
53
- #else
54
- # define PTI_CONSUMED_PCID_BITS 0
55
- #endif
56
-
57
- #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
58
-
59
- /*
60
- * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
61
- * for them being zero-based. Another -1 is because PCID 0 is reserved for
62
- * use by non-PCID-aware users.
63
- */
64
- #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
65
-
66
- /*
67
- * 6 because 6 should be plenty and struct tlb_state will fit in two cache
68
- * lines.
69
- */
70
- #define TLB_NR_DYN_ASIDS 6
71
-
72
- /*
73
- * Given @asid, compute kPCID
74
- */
75
- static inline u16 kern_pcid (u16 asid )
76
- {
77
- VM_WARN_ON_ONCE (asid > MAX_ASID_AVAILABLE );
78
-
79
- #ifdef CONFIG_PAGE_TABLE_ISOLATION
80
- /*
81
- * Make sure that the dynamic ASID space does not confict with the
82
- * bit we are using to switch between user and kernel ASIDs.
83
- */
84
- BUILD_BUG_ON (TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT ));
85
-
86
- /*
87
- * The ASID being passed in here should have respected the
88
- * MAX_ASID_AVAILABLE and thus never have the switch bit set.
89
- */
90
- VM_WARN_ON_ONCE (asid & (1 << X86_CR3_PTI_PCID_USER_BIT ));
91
- #endif
92
- /*
93
- * The dynamically-assigned ASIDs that get passed in are small
94
- * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
95
- * so do not bother to clear it.
96
- *
97
- * If PCID is on, ASID-aware code paths put the ASID+1 into the
98
- * PCID bits. This serves two purposes. It prevents a nasty
99
- * situation in which PCID-unaware code saves CR3, loads some other
100
- * value (with PCID == 0), and then restores CR3, thus corrupting
101
- * the TLB for ASID 0 if the saved ASID was nonzero. It also means
102
- * that any bugs involving loading a PCID-enabled CR3 with
103
- * CR4.PCIDE off will trigger deterministically.
104
- */
105
- return asid + 1 ;
106
- }
107
-
108
- /*
109
- * Given @asid, compute uPCID
110
- */
111
- static inline u16 user_pcid (u16 asid )
112
- {
113
- u16 ret = kern_pcid (asid );
114
- #ifdef CONFIG_PAGE_TABLE_ISOLATION
115
- ret |= 1 << X86_CR3_PTI_PCID_USER_BIT ;
116
- #endif
117
- return ret ;
118
- }
119
-
120
- struct pgd_t ;
121
- static inline unsigned long build_cr3 (pgd_t * pgd , u16 asid )
122
- {
123
- if (static_cpu_has (X86_FEATURE_PCID )) {
124
- return __sme_pa (pgd ) | kern_pcid (asid );
125
- } else {
126
- VM_WARN_ON_ONCE (asid != 0 );
127
- return __sme_pa (pgd );
128
- }
129
- }
130
-
131
- static inline unsigned long build_cr3_noflush (pgd_t * pgd , u16 asid )
132
- {
133
- VM_WARN_ON_ONCE (asid > MAX_ASID_AVAILABLE );
134
- /*
135
- * Use boot_cpu_has() instead of this_cpu_has() as this function
136
- * might be called during early boot. This should work even after
137
- * boot because all CPU's the have same capabilities:
138
- */
139
- VM_WARN_ON_ONCE (!boot_cpu_has (X86_FEATURE_PCID ));
140
- return __sme_pa (pgd ) | kern_pcid (asid ) | CR3_NOFLUSH ;
141
- }
142
-
143
16
struct flush_tlb_info ;
144
17
145
18
void __flush_tlb_all (void );
@@ -153,6 +26,12 @@ void flush_tlb_others(const struct cpumask *cpumask,
153
26
#include <asm/paravirt.h>
154
27
#endif
155
28
29
+ /*
30
+ * 6 because 6 should be plenty and struct tlb_state will fit in two cache
31
+ * lines.
32
+ */
33
+ #define TLB_NR_DYN_ASIDS 6
34
+
156
35
struct tlb_context {
157
36
u64 ctx_id ;
158
37
u64 tlb_gen ;
0 commit comments