@@ -18,12 +18,13 @@ struct shared_region {
1818 asid_t asid ;
1919 struct mp_region region ;
2020 cpumap_t sharing_cpus ;
21+ bool lock ;
2122};
2223
2324void mem_handle_broadcast_region (uint32_t event , uint64_t data );
2425bool mem_unmap_range (struct addr_space * as , vaddr_t vaddr , size_t size , bool broadcast );
2526
26- enum { MEM_INSERT_REGION , MEM_REMOVE_REGION };
27+ enum { MEM_INSERT_REGION , MEM_REMOVE_REGION , MEM_UPDATE_REGION };
2728
2829#define SHARED_REGION_POOL_SIZE_DEFAULT (128)
2930#ifndef SHARED_REGION_POOL_SIZE
@@ -58,7 +59,8 @@ static int vmpu_node_cmp(node_t* _n1, node_t* _n2)
5859 }
5960}
6061
61- static void mem_vmpu_set_entry (struct addr_space * as , mpid_t mpid , struct mp_region * mpr )
62+ static void mem_vmpu_set_entry (struct addr_space * as , mpid_t mpid , struct mp_region * mpr ,
63+ bool locked )
6264{
6365 struct mpe * mpe = mem_vmpu_get_entry (as , mpid );
6466
@@ -68,6 +70,7 @@ static void mem_vmpu_set_entry(struct addr_space* as, mpid_t mpid, struct mp_reg
6870 mpe -> region .as_sec = mpr -> as_sec ;
6971 mpe -> state = MPE_S_VALID ;
7072 mpe -> mpid = mpid ;
73+ mpe -> lock = locked ;
7174
7275 list_insert_ordered (& cpu ()-> as .vmpu .ordered_list , (node_t * )& cpu ()-> as .vmpu .node [mpid ],
7376 vmpu_node_cmp );
@@ -82,6 +85,7 @@ static void mem_vmpu_clear_entry(struct addr_space* as, mpid_t mpid)
8285 mpe -> region .mem_flags = PTE_INVALID ;
8386 mpe -> region .as_sec = SEC_UNKNOWN ;
8487 mpe -> state = MPE_S_INVALID ;
88+ mpe -> lock = false;
8589}
8690
8791static void mem_vmpu_free_entry (struct addr_space * as , mpid_t mpid )
@@ -112,6 +116,7 @@ static mpid_t mem_vmpu_allocate_entry(struct addr_space* as)
112116static mpid_t mem_vmpu_get_entry_by_addr (struct addr_space * as , vaddr_t addr )
113117{
114118 mpid_t mpid = INVALID_MPID ;
119+ struct mpe * mpe ;
115120
116121 for (mpid_t i = 0 ; i < VMPU_NUM_ENTRIES ; i ++ ) {
117122 struct mpe * mpe = mem_vmpu_get_entry (as , i );
@@ -150,7 +155,7 @@ static inline priv_t as_priv(struct addr_space* as)
150155 return priv ;
151156}
152157
153- static void as_init_boot_regions (void )
158+ static void mem_init_boot_regions (void )
154159{
155160 /**
156161 * Add hypervisor mpu entries set up during boot to the vmpu:
@@ -164,8 +169,12 @@ static void as_init_boot_regions(void)
164169 vaddr_t image_noload_start = (vaddr_t )& _image_noload_start ;
165170 vaddr_t image_end = (vaddr_t )& _image_end ;
166171
172+ #ifdef MEM_NON_UNIFIED
173+ extern uint8_t _data_vma_start ;
174+ vaddr_t data_vma_start = (vaddr_t )& _data_vma_start ;
175+ #endif
176+
167177 struct mp_region mpr ;
168- mpid_t mpid = 0 ;
169178
170179 bool separate_noload_region = image_load_end != image_noload_start ;
171180 vaddr_t first_region_end = separate_noload_region ? image_load_end : image_end ;
@@ -176,18 +185,21 @@ static void as_init_boot_regions(void)
176185 .mem_flags = PTE_HYP_FLAGS ,
177186 .as_sec = SEC_HYP_IMAGE ,
178187 };
179- mem_vmpu_set_entry (& cpu ()-> as , mpid , & mpr );
180- mpid ++ ;
188+ mem_map (& cpu ()-> as , & mpr , true, true);
181189
182190 if (separate_noload_region ) {
183191 mpr = (struct mp_region ){
192+ #ifdef MEM_NON_UNIFIED
193+ .base = data_vma_start ,
194+ .size = (size_t )(image_end - data_vma_start ),
195+ #else
184196 .base = image_noload_start ,
185197 .size = (size_t )image_end - image_noload_start ,
198+ #endif
186199 .mem_flags = PTE_HYP_FLAGS ,
187200 .as_sec = SEC_HYP_IMAGE ,
188201 };
189- mem_vmpu_set_entry (& cpu ()-> as , mpid , & mpr );
190- mpid ++ ;
202+ mem_map (& cpu ()-> as , & mpr , true, true);
191203 }
192204
193205 mpr = (struct mp_region ){
@@ -196,15 +208,14 @@ static void as_init_boot_regions(void)
196208 .mem_flags = PTE_HYP_FLAGS ,
197209 .as_sec = SEC_HYP_PRIVATE ,
198210 };
199- mem_vmpu_set_entry (& cpu ()-> as , mpid , & mpr );
200- mpid ++ ;
211+ mem_map (& cpu ()-> as , & mpr , true, true);
201212}
202213
203214void mem_prot_init ()
204215{
205216 mpu_init ();
206217 as_init (& cpu ()-> as , AS_HYP , HYP_ASID , BIT_MASK (0 , PLAT_CPU_NUM ), 0 );
207- as_init_boot_regions ();
218+ mem_init_boot_regions ();
208219 mpu_enable ();
209220}
210221
@@ -250,7 +261,8 @@ static void mem_msg_handler(uint32_t event, uint64_t data)
250261}
251262CPU_MSG_HANDLER (mem_msg_handler , MEM_PROT_SYNC )
252263
253- static void mem_region_broadcast (struct addr_space * as , struct mp_region * mpr , uint32_t op )
264+ static void mem_region_broadcast (struct addr_space * as , struct mp_region * mpr , uint32_t op ,
265+ bool locked )
254266{
255267 cpumap_t shared_cpus = as -> cpus ;
256268
@@ -262,6 +274,7 @@ static void mem_region_broadcast(struct addr_space* as, struct mp_region* mpr, u
262274 .as_type = as -> type ,
263275 .asid = as -> id ,
264276 .region = * mpr ,
277+ .lock = locked ,
265278 };
266279
267280 for (cpuid_t cpuid = 0 ; cpuid < PLAT_CPU_NUM ; cpuid ++ ) {
@@ -278,23 +291,38 @@ static void mem_region_broadcast(struct addr_space* as, struct mp_region* mpr, u
278291}
279292
280293static bool mem_vmpu_insert_region (struct addr_space * as , mpid_t mpid , struct mp_region * mpr ,
281- bool broadcast )
294+ bool broadcast , bool locked )
282295{
283296 if (mpid == INVALID_MPID ) {
284297 return false;
285298 }
286299
287- if (mpu_map (as , mpr )) {
288- mem_vmpu_set_entry (as , mpid , mpr );
300+ if (mpu_map (as , mpr , locked )) {
301+ mem_vmpu_set_entry (as , mpid , mpr , locked );
289302 if (broadcast ) {
290- mem_region_broadcast (as , mpr , MEM_INSERT_REGION );
303+ mem_region_broadcast (as , mpr , MEM_INSERT_REGION , locked );
291304 }
292305 return true;
293306 }
294307
295308 return false;
296309}
297310
311+ static bool mem_vmpu_update_region (struct addr_space * as , mpid_t mpid , struct mp_region merge_reg ,
312+ bool broadcast , bool locked )
313+ {
314+ bool merged = false;
315+
316+ if (mpu_update (as , & merge_reg )) {
317+ struct mpe * mpe = mem_vmpu_get_entry (as , mpid );
318+ mpe -> region = merge_reg ;
319+ if (broadcast ) {
320+ mem_region_broadcast (as , & mpe -> region , MEM_UPDATE_REGION , locked );
321+ }
322+ merged = true;
323+ }
324+ }
325+
298326static bool mem_vmpu_remove_region (struct addr_space * as , mpid_t mpid , bool broadcast )
299327{
300328 bool removed = false;
@@ -303,7 +331,7 @@ static bool mem_vmpu_remove_region(struct addr_space* as, mpid_t mpid, bool broa
303331
304332 if ((mpe != NULL ) && (mpe -> state == MPE_S_VALID )) {
305333 if (broadcast ) {
306- mem_region_broadcast (as , & mpe -> region , MEM_REMOVE_REGION );
334+ mem_region_broadcast (as , & mpe -> region , MEM_REMOVE_REGION , mpe -> lock );
307335 }
308336 mpu_unmap (as , & mpe -> region );
309337 mem_vmpu_free_entry (as , mpid );
@@ -313,12 +341,12 @@ static bool mem_vmpu_remove_region(struct addr_space* as, mpid_t mpid, bool broa
313341 return removed ;
314342}
315343
316- static void mem_handle_broadcast_insert (struct addr_space * as , struct mp_region * mpr )
344+ static void mem_handle_broadcast_insert (struct addr_space * as , struct mp_region * mpr , bool locked )
317345{
318346 if (as -> type == AS_HYP ) {
319- mem_map (& cpu ()-> as , mpr , false);
347+ mem_map (& cpu ()-> as , mpr , false, locked );
320348 } else {
321- mpu_map (as , mpr );
349+ mpu_map (as , mpr , locked );
322350 }
323351}
324352
@@ -331,6 +359,17 @@ static void mem_handle_broadcast_remove(struct addr_space* as, struct mp_region*
331359 }
332360}
333361
362+ static void mem_handle_broadcast_update (struct addr_space * as , struct mp_region * mpr )
363+ {
364+ //TODO:ARMV8M - check if this makes sense
365+ if (as -> type == AS_HYP ) {
366+ mem_unmap_range (& cpu ()-> as , mpr -> base , mpr -> size , false);
367+ mem_map (& cpu ()-> as , mpr , false, false);
368+ } else {
369+ mpu_update (as , mpr );
370+ }
371+ }
372+
334373void mem_handle_broadcast_region (uint32_t event , uint64_t data )
335374{
336375 struct shared_region * sh_reg = (struct shared_region * )(uintptr_t )data ;
@@ -349,11 +388,14 @@ void mem_handle_broadcast_region(uint32_t event, uint64_t data)
349388
350389 switch (event ) {
351390 case MEM_INSERT_REGION :
352- mem_handle_broadcast_insert (as , & sh_reg -> region );
391+ mem_handle_broadcast_insert (as , & sh_reg -> region , sh_reg -> lock );
353392 break ;
354393 case MEM_REMOVE_REGION :
355394 mem_handle_broadcast_remove (as , & sh_reg -> region );
356395 break ;
396+ case MEM_UPDATE_REGION :
397+ mem_handle_broadcast_update (as , & sh_reg -> region );
398+ break ;
357399 default :
358400 ERROR ("unknown mem broadcast msg" );
359401 }
@@ -382,7 +424,7 @@ static mpid_t mem_vmpu_find_overlapping_region(struct addr_space* as, struct mp_
382424 return mpid ;
383425}
384426
385- void mem_vmpu_coalesce_contiguous (struct addr_space * as , bool broadcast )
427+ void mem_vmpu_coalesce_contiguous (struct addr_space * as , bool broadcast , bool locked )
386428{
387429 while (true) {
388430 bool merge = false;
@@ -398,10 +440,11 @@ void mem_vmpu_coalesce_contiguous(struct addr_space* as, bool broadcast)
398440 cur_reg = mem_vmpu_get_entry (as , cur -> mpid );
399441 prev_reg = mem_vmpu_get_entry (as , prev -> mpid );
400442
401- bool contigous = prev_reg -> region .base + prev_reg -> region .size == cur_reg -> region .base ;
443+ bool contiguous = prev_reg -> region .base + prev_reg -> region .size == cur_reg -> region .base ;
402444 bool perms_compatible =
403- mpu_perms_comptible (prev_reg -> region .mem_flags .raw , cur_reg -> region .mem_flags .raw );
404- if (contigous && perms_compatible ) {
445+ mpu_perms_compatible (prev_reg -> region .mem_flags .raw , cur_reg -> region .mem_flags .raw );
446+ bool lock_compatible = prev_reg -> lock == cur_reg -> lock ;
447+ if (contiguous && perms_compatible && lock_compatible ) {
405448 cur_mpid = cur -> mpid ;
406449 prev_mpid = prev -> mpid ;
407450 merge = true;
@@ -415,19 +458,16 @@ void mem_vmpu_coalesce_contiguous(struct addr_space* as, bool broadcast)
415458 .size = prev_reg -> region .size + cur_reg -> region .size ,
416459 .mem_flags = cur_reg -> region .mem_flags ,
417460 };
418- mem_vmpu_remove_region (as , cur_mpid , broadcast );
419- mem_vmpu_remove_region (as , prev_mpid , broadcast );
420- mpid_t mpid = mem_vmpu_allocate_entry (as );
421- if (mpid != INVALID_MPID ) {
422- mem_vmpu_insert_region (as , mpid , & merged_reg , broadcast );
461+ if (mem_vmpu_update_region (as , prev_mpid , merged_reg , broadcast , locked )) {
462+ mem_vmpu_remove_region (as , cur_mpid , broadcast );
423463 }
424464 } else {
425465 break ;
426466 }
427467 }
428468}
429469
430- bool mem_map (struct addr_space * as , struct mp_region * mpr , bool broadcast )
470+ bool mem_map (struct addr_space * as , struct mp_region * mpr , bool broadcast , bool locked )
431471{
432472 bool mapped = false;
433473
@@ -443,15 +483,14 @@ bool mem_map(struct addr_space* as, struct mp_region* mpr, bool broadcast)
443483 spin_lock (& as -> lock );
444484
445485 if (mem_vmpu_find_overlapping_region (as , mpr ) == INVALID_MPID ) {
446- // TODO: check if it possible to merge with another region
447486 mpid_t mpid = mem_vmpu_allocate_entry (as );
448487 if (mpid != INVALID_MPID ) {
449- mapped = mem_vmpu_insert_region (as , mpid , mpr , broadcast );
488+ mapped = mem_vmpu_insert_region (as , mpid , mpr , broadcast , locked );
450489 }
451490 }
452491
453492 if (mapped ) {
454- mem_vmpu_coalesce_contiguous (as , broadcast );
493+ mem_vmpu_coalesce_contiguous (as , broadcast , locked );
455494 }
456495
457496 spin_unlock (& as -> lock );
@@ -484,6 +523,8 @@ bool mem_unmap_range(struct addr_space* as, vaddr_t vaddr, size_t size, bool bro
484523 struct mpe * mpe = mem_vmpu_get_entry (as , mpid );
485524 reg = mpe -> region ;
486525
526+ bool locked = mpe -> lock ;
527+
487528 vaddr_t limit = vaddr + size ;
488529 vaddr_t r_limit = reg .base + reg .size ;
489530 vaddr_t r_base = reg .base ;
@@ -498,14 +539,14 @@ bool mem_unmap_range(struct addr_space* as, vaddr_t vaddr, size_t size, bool bro
498539 top .base = limit ;
499540 top .size = top_size ;
500541 mpid_t top_mpid = mem_vmpu_allocate_entry (as );
501- mem_vmpu_insert_region (as , top_mpid , & top , true);
542+ mem_vmpu_insert_region (as , top_mpid , & top , true, locked );
502543 }
503544
504545 if (bottom_size > 0 ) {
505546 struct mp_region bottom = reg ;
506547 bottom .size = bottom_size ;
507548 mpid_t bottom_mpid = mem_vmpu_allocate_entry (as );
508- mem_vmpu_insert_region (as , bottom_mpid , & bottom , true);
549+ mem_vmpu_insert_region (as , bottom_mpid , & bottom , true, locked );
509550 }
510551
511552 size_t overlap_size = reg .size - top_size - bottom_size ;
@@ -546,7 +587,7 @@ vaddr_t mem_map_cpy(struct addr_space* ass, struct addr_space* asd, vaddr_t vas,
546587 mpr = mpe -> region ;
547588 spin_unlock (& ass -> lock );
548589
549- if (mem_map (asd , & mpr , true)) {
590+ if (mem_map (asd , & mpr , true, false )) {
550591 va_res = vas ;
551592 } else {
552593 INFO ("failed mem map on mem map cpy" );
@@ -598,7 +639,7 @@ vaddr_t mem_alloc_map(struct addr_space* as, as_sec_t section, struct ppages* pp
598639 .mem_flags = flags ,
599640 };
600641
601- mem_map (as , & mpr , true);
642+ mem_map (as , & mpr , true, false );
602643
603644 return at ;
604645}
0 commit comments