Skip to content

Commit b6df23e

Browse files
remckeerppt
authored andcommitted
memblock tests: add bottom-up NUMA tests for memblock_alloc_exact_nid_raw
Add tests for memblock_alloc_exact_nid_raw() where the simulated physical memory is set up with multiple NUMA nodes. Additionally, all of these tests set nid != NUMA_NO_NODE. These tests are run with a bottom-up allocation direction. The tested scenarios are: Range unrestricted: - region can be allocated in the specific node requested: + there are no previously reserved regions + the requested node is partially reserved but has enough space Range restricted: - region can be allocated in the specific node requested after dropping min_addr: + range partially overlaps with two different nodes, where the first node is the requested node + range partially overlaps with two different nodes, where the requested node ends before min_addr + range overlaps with multiple nodes along node boundaries, and the requested node ends before min_addr Acked-by: David Hildenbrand <[email protected]> Signed-off-by: Rebecca Mckeever <[email protected]> Signed-off-by: Mike Rapoport <[email protected]> Link: https://lore.kernel.org/r/935f0eed5e06fd44dc67d9f49b277923d7896bd3.1667802195.git.remckee0@gmail.com
1 parent bfc05a4 commit b6df23e

File tree

1 file changed

+282
-0
lines changed

1 file changed

+282
-0
lines changed

tools/testing/memblock/tests/alloc_exact_nid_api.c

Lines changed: 282 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -288,12 +288,286 @@ static int alloc_exact_nid_top_down_numa_no_overlap_low_check(void)
288288
return 0;
289289
}
290290

291+
/*
292+
* A test that tries to allocate a memory region in a specific NUMA node that
293+
* has enough memory to allocate a region of the requested size.
294+
* Expect to allocate an aligned region at the beginning of the requested node.
295+
*/
296+
static int alloc_exact_nid_bottom_up_numa_simple_check(void)
297+
{
298+
int nid_req = 3;
299+
struct memblock_region *new_rgn = &memblock.reserved.regions[0];
300+
struct memblock_region *req_node = &memblock.memory.regions[nid_req];
301+
void *allocated_ptr = NULL;
302+
phys_addr_t size;
303+
phys_addr_t min_addr;
304+
phys_addr_t max_addr;
305+
306+
PREFIX_PUSH();
307+
setup_numa_memblock(node_fractions);
308+
309+
ASSERT_LE(SZ_4, req_node->size);
310+
size = req_node->size / SZ_4;
311+
min_addr = memblock_start_of_DRAM();
312+
max_addr = memblock_end_of_DRAM();
313+
314+
allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
315+
min_addr, max_addr,
316+
nid_req);
317+
318+
ASSERT_NE(allocated_ptr, NULL);
319+
ASSERT_MEM_NE(allocated_ptr, 0, size);
320+
321+
ASSERT_EQ(new_rgn->size, size);
322+
ASSERT_EQ(new_rgn->base, req_node->base);
323+
ASSERT_LE(region_end(new_rgn), region_end(req_node));
324+
325+
ASSERT_EQ(memblock.reserved.cnt, 1);
326+
ASSERT_EQ(memblock.reserved.total_size, size);
327+
328+
test_pass_pop();
329+
330+
return 0;
331+
}
332+
333+
/*
334+
* A test that tries to allocate a memory region in a specific NUMA node that
335+
* is partially reserved but has enough memory for the allocated region:
336+
*
337+
* | +---------------------------------------+ |
338+
* | | requested | |
339+
* +-----------+---------------------------------------+---------+
340+
*
341+
* | +------------------+-----+ |
342+
* | | reserved | new | |
343+
* +-----------+------------------+-----+------------------------+
344+
*
345+
* Expect to allocate an aligned region in the requested node that merges with
346+
* the existing reserved region. The total size gets updated.
347+
*/
348+
static int alloc_exact_nid_bottom_up_numa_part_reserved_check(void)
349+
{
350+
int nid_req = 4;
351+
struct memblock_region *new_rgn = &memblock.reserved.regions[0];
352+
struct memblock_region *req_node = &memblock.memory.regions[nid_req];
353+
void *allocated_ptr = NULL;
354+
struct region r1;
355+
phys_addr_t size;
356+
phys_addr_t min_addr;
357+
phys_addr_t max_addr;
358+
phys_addr_t total_size;
359+
360+
PREFIX_PUSH();
361+
setup_numa_memblock(node_fractions);
362+
363+
ASSERT_LE(SZ_8, req_node->size);
364+
r1.base = req_node->base;
365+
r1.size = req_node->size / SZ_2;
366+
size = r1.size / SZ_4;
367+
min_addr = memblock_start_of_DRAM();
368+
max_addr = memblock_end_of_DRAM();
369+
total_size = size + r1.size;
370+
371+
memblock_reserve(r1.base, r1.size);
372+
allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
373+
min_addr, max_addr,
374+
nid_req);
375+
376+
ASSERT_NE(allocated_ptr, NULL);
377+
ASSERT_MEM_NE(allocated_ptr, 0, size);
378+
379+
ASSERT_EQ(new_rgn->size, total_size);
380+
ASSERT_EQ(new_rgn->base, req_node->base);
381+
ASSERT_LE(region_end(new_rgn), region_end(req_node));
382+
383+
ASSERT_EQ(memblock.reserved.cnt, 1);
384+
ASSERT_EQ(memblock.reserved.total_size, total_size);
385+
386+
test_pass_pop();
387+
388+
return 0;
389+
}
390+
391+
/*
392+
* A test that tries to allocate a memory region that spans over the min_addr
393+
* and max_addr range and overlaps with two different nodes, where the first
394+
* node is the requested node:
395+
*
396+
* min_addr
397+
* | max_addr
398+
* | |
399+
* v v
400+
* | +-----------------------+-----------+ |
401+
* | | requested | node3 | |
402+
* +-----------+-----------------------+-----------+--------------+
403+
* + +
404+
* | +-----------+ |
405+
* | | rgn | |
406+
* +-----------+-----------+--------------------------------------+
407+
*
408+
* Expect to drop the lower limit and allocate a memory region at the beginning
409+
* of the requested node.
410+
*/
411+
static int alloc_exact_nid_bottom_up_numa_split_range_low_check(void)
412+
{
413+
int nid_req = 2;
414+
struct memblock_region *new_rgn = &memblock.reserved.regions[0];
415+
struct memblock_region *req_node = &memblock.memory.regions[nid_req];
416+
void *allocated_ptr = NULL;
417+
phys_addr_t size = SZ_512;
418+
phys_addr_t min_addr;
419+
phys_addr_t max_addr;
420+
phys_addr_t req_node_end;
421+
422+
PREFIX_PUSH();
423+
setup_numa_memblock(node_fractions);
424+
425+
req_node_end = region_end(req_node);
426+
min_addr = req_node_end - SZ_256;
427+
max_addr = min_addr + size;
428+
429+
allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
430+
min_addr, max_addr,
431+
nid_req);
432+
433+
ASSERT_NE(allocated_ptr, NULL);
434+
ASSERT_MEM_NE(allocated_ptr, 0, size);
435+
436+
ASSERT_EQ(new_rgn->size, size);
437+
ASSERT_EQ(new_rgn->base, req_node->base);
438+
ASSERT_LE(region_end(new_rgn), req_node_end);
439+
440+
ASSERT_EQ(memblock.reserved.cnt, 1);
441+
ASSERT_EQ(memblock.reserved.total_size, size);
442+
443+
test_pass_pop();
444+
445+
return 0;
446+
}
447+
448+
/*
449+
* A test that tries to allocate a memory region that spans over the min_addr
450+
* and max_addr range and overlaps with two different nodes, where the requested
451+
* node ends before min_addr:
452+
*
453+
* min_addr
454+
* | max_addr
455+
* | |
456+
* v v
457+
* | +---------------+ +-------------+---------+ |
458+
* | | requested | | node1 | node2 | |
459+
* +----+---------------+--------+-------------+---------+---------+
460+
* + +
461+
* | +---------+ |
462+
* | | rgn | |
463+
* +----+---------+------------------------------------------------+
464+
*
465+
* Expect to drop the lower limit and allocate a memory region that starts at
466+
* the beginning of the requested node.
467+
*/
468+
static int alloc_exact_nid_bottom_up_numa_no_overlap_split_check(void)
469+
{
470+
int nid_req = 2;
471+
struct memblock_region *new_rgn = &memblock.reserved.regions[0];
472+
struct memblock_region *req_node = &memblock.memory.regions[nid_req];
473+
struct memblock_region *node2 = &memblock.memory.regions[6];
474+
void *allocated_ptr = NULL;
475+
phys_addr_t size;
476+
phys_addr_t min_addr;
477+
phys_addr_t max_addr;
478+
479+
PREFIX_PUSH();
480+
setup_numa_memblock(node_fractions);
481+
482+
size = SZ_512;
483+
min_addr = node2->base - SZ_256;
484+
max_addr = min_addr + size;
485+
486+
allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
487+
min_addr, max_addr,
488+
nid_req);
489+
490+
ASSERT_NE(allocated_ptr, NULL);
491+
ASSERT_MEM_NE(allocated_ptr, 0, size);
492+
493+
ASSERT_EQ(new_rgn->size, size);
494+
ASSERT_EQ(new_rgn->base, req_node->base);
495+
ASSERT_LE(region_end(new_rgn), region_end(req_node));
496+
497+
ASSERT_EQ(memblock.reserved.cnt, 1);
498+
ASSERT_EQ(memblock.reserved.total_size, size);
499+
500+
test_pass_pop();
501+
502+
return 0;
503+
}
504+
505+
/*
506+
* A test that tries to allocate memory within min_addr and max_add range when
507+
* the requested node and the range do not overlap, and requested node ends
508+
* before min_addr. The range overlaps with multiple nodes along node
509+
* boundaries:
510+
*
511+
* min_addr
512+
* | max_addr
513+
* | |
514+
* v v
515+
* |-----------+ +----------+----...----+----------+ |
516+
* | requested | | min node | ... | max node | |
517+
* +-----------+-----------+----------+----...----+----------+------+
518+
* + +
519+
* |-----+ |
520+
* | rgn | |
521+
* +-----+----------------------------------------------------------+
522+
*
523+
* Expect to drop the lower limit and allocate a memory region that starts at
524+
* the beginning of the requested node.
525+
*/
526+
static int alloc_exact_nid_bottom_up_numa_no_overlap_low_check(void)
527+
{
528+
int nid_req = 0;
529+
struct memblock_region *new_rgn = &memblock.reserved.regions[0];
530+
struct memblock_region *req_node = &memblock.memory.regions[nid_req];
531+
struct memblock_region *min_node = &memblock.memory.regions[2];
532+
struct memblock_region *max_node = &memblock.memory.regions[5];
533+
void *allocated_ptr = NULL;
534+
phys_addr_t size = SZ_64;
535+
phys_addr_t max_addr;
536+
phys_addr_t min_addr;
537+
538+
PREFIX_PUSH();
539+
setup_numa_memblock(node_fractions);
540+
541+
min_addr = min_node->base;
542+
max_addr = region_end(max_node);
543+
544+
allocated_ptr = memblock_alloc_exact_nid_raw(size, SMP_CACHE_BYTES,
545+
min_addr, max_addr,
546+
nid_req);
547+
548+
ASSERT_NE(allocated_ptr, NULL);
549+
ASSERT_MEM_NE(allocated_ptr, 0, size);
550+
551+
ASSERT_EQ(new_rgn->size, size);
552+
ASSERT_EQ(new_rgn->base, req_node->base);
553+
ASSERT_LE(region_end(new_rgn), region_end(req_node));
554+
555+
ASSERT_EQ(memblock.reserved.cnt, 1);
556+
ASSERT_EQ(memblock.reserved.total_size, size);
557+
558+
test_pass_pop();
559+
560+
return 0;
561+
}
562+
291563
/* Test case wrappers for NUMA tests */
292564
static int alloc_exact_nid_numa_simple_check(void)
293565
{
294566
test_print("\tRunning %s...\n", __func__);
295567
memblock_set_bottom_up(false);
296568
alloc_exact_nid_top_down_numa_simple_check();
569+
memblock_set_bottom_up(true);
570+
alloc_exact_nid_bottom_up_numa_simple_check();
297571

298572
return 0;
299573
}
@@ -303,6 +577,8 @@ static int alloc_exact_nid_numa_part_reserved_check(void)
303577
test_print("\tRunning %s...\n", __func__);
304578
memblock_set_bottom_up(false);
305579
alloc_exact_nid_top_down_numa_part_reserved_check();
580+
memblock_set_bottom_up(true);
581+
alloc_exact_nid_bottom_up_numa_part_reserved_check();
306582

307583
return 0;
308584
}
@@ -312,6 +588,8 @@ static int alloc_exact_nid_numa_split_range_low_check(void)
312588
test_print("\tRunning %s...\n", __func__);
313589
memblock_set_bottom_up(false);
314590
alloc_exact_nid_top_down_numa_split_range_low_check();
591+
memblock_set_bottom_up(true);
592+
alloc_exact_nid_bottom_up_numa_split_range_low_check();
315593

316594
return 0;
317595
}
@@ -321,6 +599,8 @@ static int alloc_exact_nid_numa_no_overlap_split_check(void)
321599
test_print("\tRunning %s...\n", __func__);
322600
memblock_set_bottom_up(false);
323601
alloc_exact_nid_top_down_numa_no_overlap_split_check();
602+
memblock_set_bottom_up(true);
603+
alloc_exact_nid_bottom_up_numa_no_overlap_split_check();
324604

325605
return 0;
326606
}
@@ -330,6 +610,8 @@ static int alloc_exact_nid_numa_no_overlap_low_check(void)
330610
test_print("\tRunning %s...\n", __func__);
331611
memblock_set_bottom_up(false);
332612
alloc_exact_nid_top_down_numa_no_overlap_low_check();
613+
memblock_set_bottom_up(true);
614+
alloc_exact_nid_bottom_up_numa_no_overlap_low_check();
333615

334616
return 0;
335617
}

0 commit comments

Comments
 (0)