@@ -341,6 +341,22 @@ validatePartitions(umf_os_memory_provider_params_t *params) {
341
341
return UMF_RESULT_SUCCESS ;
342
342
}
343
343
344
+ static umf_result_t os_get_min_page_size (void * provider , void * ptr ,
345
+ size_t * page_size );
346
+
347
+ static umf_result_t validatePartSize (os_memory_provider_t * provider ,
348
+ umf_os_memory_provider_params_t * params ) {
349
+ size_t page_size ;
350
+ os_get_min_page_size (provider , NULL , & page_size );
351
+ if (ALIGN_UP (params -> part_size , page_size ) < params -> part_size ) {
352
+ LOG_ERR ("partition size (%zu) is too big, cannot align with a page "
353
+ "size (%zu)" ,
354
+ params -> part_size , page_size );
355
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
356
+ }
357
+ return UMF_RESULT_SUCCESS ;
358
+ }
359
+
344
360
static void free_bitmaps (os_memory_provider_t * provider ) {
345
361
for (unsigned i = 0 ; i < provider -> nodeset_len ; i ++ ) {
346
362
hwloc_bitmap_free (provider -> nodeset [i ]);
@@ -427,6 +443,14 @@ static umf_result_t translate_params(umf_os_memory_provider_params_t *in_params,
427
443
return result ;
428
444
}
429
445
446
+ if (in_params -> numa_mode == UMF_NUMA_MODE_INTERLEAVE ) {
447
+ result = validatePartSize (provider , in_params );
448
+ if (result != UMF_RESULT_SUCCESS ) {
449
+ LOG_ERR ("incorrect partition size: %zu" , in_params -> part_size );
450
+ return result ;
451
+ }
452
+ }
453
+
430
454
int is_dedicated_node_bind = dedicated_node_bind (in_params );
431
455
provider -> numa_policy =
432
456
translate_numa_mode (in_params -> numa_mode , is_dedicated_node_bind );
@@ -574,9 +598,6 @@ static void os_finalize(void *provider) {
574
598
umf_ba_global_free (os_provider );
575
599
}
576
600
577
- static umf_result_t os_get_min_page_size (void * provider , void * ptr ,
578
- size_t * page_size );
579
-
580
601
// TODO: this function should be re-enabled when CTL is implemented
581
602
#if 0
582
603
static void print_numa_nodes (os_memory_provider_t * os_provider , void * addr ,
@@ -813,12 +834,12 @@ static membind_t membindFirst(os_memory_provider_t *provider, void *addr,
813
834
membind_t membind ;
814
835
memset (& membind , 0 , sizeof (membind ));
815
836
816
- membind .alloc_size = ALIGN_UP ( size , page_size ) ;
837
+ membind .alloc_size = size ;
817
838
membind .page_size = page_size ;
818
839
membind .addr = addr ;
819
840
membind .pages = membind .alloc_size / membind .page_size ;
820
841
if (provider -> nodeset_len == 1 ) {
821
- membind .bind_size = ALIGN_UP ( size , membind . page_size ) ;
842
+ membind .bind_size = size ;
822
843
membind .bitmap = provider -> nodeset [0 ];
823
844
return membind ;
824
845
}
@@ -924,7 +945,15 @@ static umf_result_t os_alloc(void *provider, size_t size, size_t alignment,
924
945
925
946
// Bind memory to NUMA nodes if numa_policy is other than DEFAULT
926
947
if (os_provider -> numa_policy != HWLOC_MEMBIND_DEFAULT ) {
927
- membind_t membind = membindFirst (os_provider , addr , size , page_size );
948
+ size_t first_size = ALIGN_UP_SAFE (size , page_size );
949
+ if (first_size == 0 ) {
950
+ LOG_ERR ("size is too big, page align failed" );
951
+ (void )utils_munmap (addr , size );
952
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
953
+ }
954
+
955
+ membind_t membind =
956
+ membindFirst (os_provider , addr , first_size , page_size );
928
957
if (membind .bitmap == NULL ) {
929
958
goto err_unmap ;
930
959
}
0 commit comments