File tree Expand file tree Collapse file tree 4 files changed +32
-3
lines changed Expand file tree Collapse file tree 4 files changed +32
-3
lines changed Original file line number Diff line number Diff line change @@ -24,6 +24,7 @@ struct hyp_pool {
24
24
25
25
/* Allocation */
26
26
void * hyp_alloc_pages (struct hyp_pool * pool , unsigned short order );
27
+ void hyp_split_page (struct hyp_page * page );
27
28
void hyp_get_page (struct hyp_pool * pool , void * addr );
28
29
void hyp_put_page (struct hyp_pool * pool , void * addr );
29
30
Original file line number Diff line number Diff line change @@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1;
35
35
36
36
static void * host_s2_zalloc_pages_exact (size_t size )
37
37
{
38
- return hyp_alloc_pages (& host_s2_pool , get_order (size ));
38
+ void * addr = hyp_alloc_pages (& host_s2_pool , get_order (size ));
39
+
40
+ hyp_split_page (hyp_virt_to_page (addr ));
41
+
42
+ /*
43
+ * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
44
+ * so there should be no need to free any of the tail pages to make the
45
+ * allocation exact.
46
+ */
47
+ WARN_ON (size != (PAGE_SIZE << get_order (size )));
48
+
49
+ return addr ;
39
50
}
40
51
41
52
static void * host_s2_zalloc_page (void * pool )
Original file line number Diff line number Diff line change @@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
152
152
153
153
static inline int hyp_page_ref_dec_and_test (struct hyp_page * p )
154
154
{
155
+ BUG_ON (!p -> refcount );
155
156
p -> refcount -- ;
156
157
return (p -> refcount == 0 );
157
158
}
@@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
193
194
hyp_spin_unlock (& pool -> lock );
194
195
}
195
196
197
+ void hyp_split_page (struct hyp_page * p )
198
+ {
199
+ unsigned short order = p -> order ;
200
+ unsigned int i ;
201
+
202
+ p -> order = 0 ;
203
+ for (i = 1 ; i < (1 << order ); i ++ ) {
204
+ struct hyp_page * tail = p + i ;
205
+
206
+ tail -> order = 0 ;
207
+ hyp_set_page_refcounted (tail );
208
+ }
209
+ }
210
+
196
211
void * hyp_alloc_pages (struct hyp_pool * pool , unsigned short order )
197
212
{
198
213
unsigned short i = order ;
Original file line number Diff line number Diff line change @@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1529
1529
* when updating the PG_mte_tagged page flag, see
1530
1530
* sanitise_mte_tags for more details.
1531
1531
*/
1532
- if (kvm_has_mte (kvm ) && vma -> vm_flags & VM_SHARED )
1533
- return - EINVAL ;
1532
+ if (kvm_has_mte (kvm ) && vma -> vm_flags & VM_SHARED ) {
1533
+ ret = - EINVAL ;
1534
+ break ;
1535
+ }
1534
1536
1535
1537
if (vma -> vm_flags & VM_PFNMAP ) {
1536
1538
/* IO region dirty page logging not allowed */
You can’t perform that action at this time.
0 commit comments