@@ -834,12 +834,12 @@ static membind_t membindFirst(os_memory_provider_t *provider, void *addr,
834
834
membind_t membind ;
835
835
memset (& membind , 0 , sizeof (membind ));
836
836
837
- membind .alloc_size = ALIGN_UP ( size , page_size ) ;
837
+ membind .alloc_size = size ;
838
838
membind .page_size = page_size ;
839
839
membind .addr = addr ;
840
840
membind .pages = membind .alloc_size / membind .page_size ;
841
841
if (provider -> nodeset_len == 1 ) {
842
- membind .bind_size = ALIGN_UP ( size , membind . page_size ) ;
842
+ membind .bind_size = size ;
843
843
membind .bitmap = provider -> nodeset [0 ];
844
844
return membind ;
845
845
}
@@ -945,7 +945,15 @@ static umf_result_t os_alloc(void *provider, size_t size, size_t alignment,
945
945
946
946
// Bind memory to NUMA nodes if numa_policy is other than DEFAULT
947
947
if (os_provider -> numa_policy != HWLOC_MEMBIND_DEFAULT ) {
948
- membind_t membind = membindFirst (os_provider , addr , size , page_size );
948
+ size_t first_size = ALIGN_UP_SAFE (size , page_size );
949
+ if (first_size == 0 ) {
950
+ LOG_ERR ("size is too big, page align failed" );
951
+ (void )utils_munmap (addr , size );
952
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
953
+ }
954
+
955
+ membind_t membind =
956
+ membindFirst (os_provider , addr , first_size , page_size );
949
957
if (membind .bitmap == NULL ) {
950
958
goto err_unmap ;
951
959
}
0 commit comments