44
55#define FUNC_NAME "memblock_alloc_exact_nid_raw"
66
7+ /*
8+ * contains the fraction of MEM_SIZE contained in each node in basis point
9+ * units (one hundredth of 1% or 1/10000)
10+ */
11+ static const unsigned int node_fractions [] = {
12+ 2500 , /* 1/4 */
13+ 625 , /* 1/16 */
14+ 1250 , /* 1/8 */
15+ 1250 , /* 1/8 */
16+ 625 , /* 1/16 */
17+ 625 , /* 1/16 */
18+ 2500 , /* 1/4 */
19+ 625 , /* 1/16 */
20+ };
21+
22+ /*
23+ * A test that tries to allocate a memory region in a specific NUMA node that
24+ * has enough memory to allocate a region of the requested size.
25+ * Expect to allocate an aligned region at the end of the requested node.
26+ */
27+ static int alloc_exact_nid_top_down_numa_simple_check (void )
28+ {
29+ int nid_req = 3 ;
30+ struct memblock_region * new_rgn = & memblock .reserved .regions [0 ];
31+ struct memblock_region * req_node = & memblock .memory .regions [nid_req ];
32+ void * allocated_ptr = NULL ;
33+ phys_addr_t size ;
34+ phys_addr_t min_addr ;
35+ phys_addr_t max_addr ;
36+
37+ PREFIX_PUSH ();
38+ setup_numa_memblock (node_fractions );
39+
40+ ASSERT_LE (SZ_4 , req_node -> size );
41+ size = req_node -> size / SZ_4 ;
42+ min_addr = memblock_start_of_DRAM ();
43+ max_addr = memblock_end_of_DRAM ();
44+
45+ allocated_ptr = memblock_alloc_exact_nid_raw (size , SMP_CACHE_BYTES ,
46+ min_addr , max_addr ,
47+ nid_req );
48+
49+ ASSERT_NE (allocated_ptr , NULL );
50+ ASSERT_MEM_NE (allocated_ptr , 0 , size );
51+
52+ ASSERT_EQ (new_rgn -> size , size );
53+ ASSERT_EQ (new_rgn -> base , region_end (req_node ) - size );
54+ ASSERT_LE (req_node -> base , new_rgn -> base );
55+
56+ ASSERT_EQ (memblock .reserved .cnt , 1 );
57+ ASSERT_EQ (memblock .reserved .total_size , size );
58+
59+ test_pass_pop ();
60+
61+ return 0 ;
62+ }
63+
64+ /*
65+ * A test that tries to allocate a memory region in a specific NUMA node that
66+ * is partially reserved but has enough memory for the allocated region:
67+ *
68+ * | +---------------------------------------+ |
69+ * | | requested | |
70+ * +-----------+---------------------------------------+----------+
71+ *
72+ * | +------------------+ +-----+ |
73+ * | | reserved | | new | |
74+ * +-----------+------------------+--------------+-----+----------+
75+ *
76+ * Expect to allocate an aligned region at the end of the requested node. The
77+ * region count and total size get updated.
78+ */
79+ static int alloc_exact_nid_top_down_numa_part_reserved_check (void )
80+ {
81+ int nid_req = 4 ;
82+ struct memblock_region * new_rgn = & memblock .reserved .regions [1 ];
83+ struct memblock_region * req_node = & memblock .memory .regions [nid_req ];
84+ void * allocated_ptr = NULL ;
85+ struct region r1 ;
86+ phys_addr_t size ;
87+ phys_addr_t min_addr ;
88+ phys_addr_t max_addr ;
89+
90+ PREFIX_PUSH ();
91+ setup_numa_memblock (node_fractions );
92+
93+ ASSERT_LE (SZ_8 , req_node -> size );
94+ r1 .base = req_node -> base ;
95+ r1 .size = req_node -> size / SZ_2 ;
96+ size = r1 .size / SZ_4 ;
97+ min_addr = memblock_start_of_DRAM ();
98+ max_addr = memblock_end_of_DRAM ();
99+
100+ memblock_reserve (r1 .base , r1 .size );
101+ allocated_ptr = memblock_alloc_exact_nid_raw (size , SMP_CACHE_BYTES ,
102+ min_addr , max_addr ,
103+ nid_req );
104+
105+ ASSERT_NE (allocated_ptr , NULL );
106+ ASSERT_MEM_NE (allocated_ptr , 0 , size );
107+
108+ ASSERT_EQ (new_rgn -> size , size );
109+ ASSERT_EQ (new_rgn -> base , region_end (req_node ) - size );
110+ ASSERT_LE (req_node -> base , new_rgn -> base );
111+
112+ ASSERT_EQ (memblock .reserved .cnt , 2 );
113+ ASSERT_EQ (memblock .reserved .total_size , size + r1 .size );
114+
115+ test_pass_pop ();
116+
117+ return 0 ;
118+ }
119+
120+ /*
121+ * A test that tries to allocate a memory region that spans over the min_addr
122+ * and max_addr range and overlaps with two different nodes, where the first
123+ * node is the requested node:
124+ *
125+ * min_addr
126+ * | max_addr
127+ * | |
128+ * v v
129+ * | +-----------------------+-----------+ |
130+ * | | requested | node3 | |
131+ * +-----------+-----------------------+-----------+--------------+
132+ * + +
133+ * | +-----------+ |
134+ * | | rgn | |
135+ * +-----------------------+-----------+--------------------------+
136+ *
137+ * Expect to drop the lower limit and allocate a memory region that ends at
138+ * the end of the requested node.
139+ */
140+ static int alloc_exact_nid_top_down_numa_split_range_low_check (void )
141+ {
142+ int nid_req = 2 ;
143+ struct memblock_region * new_rgn = & memblock .reserved .regions [0 ];
144+ struct memblock_region * req_node = & memblock .memory .regions [nid_req ];
145+ void * allocated_ptr = NULL ;
146+ phys_addr_t size = SZ_512 ;
147+ phys_addr_t min_addr ;
148+ phys_addr_t max_addr ;
149+ phys_addr_t req_node_end ;
150+
151+ PREFIX_PUSH ();
152+ setup_numa_memblock (node_fractions );
153+
154+ req_node_end = region_end (req_node );
155+ min_addr = req_node_end - SZ_256 ;
156+ max_addr = min_addr + size ;
157+
158+ allocated_ptr = memblock_alloc_exact_nid_raw (size , SMP_CACHE_BYTES ,
159+ min_addr , max_addr ,
160+ nid_req );
161+
162+ ASSERT_NE (allocated_ptr , NULL );
163+ ASSERT_MEM_NE (allocated_ptr , 0 , size );
164+
165+ ASSERT_EQ (new_rgn -> size , size );
166+ ASSERT_EQ (new_rgn -> base , req_node_end - size );
167+ ASSERT_LE (req_node -> base , new_rgn -> base );
168+
169+ ASSERT_EQ (memblock .reserved .cnt , 1 );
170+ ASSERT_EQ (memblock .reserved .total_size , size );
171+
172+ test_pass_pop ();
173+
174+ return 0 ;
175+ }
176+
177+ /*
178+ * A test that tries to allocate a memory region that spans over the min_addr
179+ * and max_addr range and overlaps with two different nodes, where the requested
180+ * node ends before min_addr:
181+ *
182+ * min_addr
183+ * | max_addr
184+ * | |
185+ * v v
186+ * | +---------------+ +-------------+---------+ |
187+ * | | requested | | node1 | node2 | |
188+ * +----+---------------+--------+-------------+---------+----------+
189+ * + +
190+ * | +---------+ |
191+ * | | rgn | |
192+ * +----------+---------+-------------------------------------------+
193+ *
194+ * Expect to drop the lower limit and allocate a memory region that ends at
195+ * the end of the requested node.
196+ */
197+ static int alloc_exact_nid_top_down_numa_no_overlap_split_check (void )
198+ {
199+ int nid_req = 2 ;
200+ struct memblock_region * new_rgn = & memblock .reserved .regions [0 ];
201+ struct memblock_region * req_node = & memblock .memory .regions [nid_req ];
202+ struct memblock_region * node2 = & memblock .memory .regions [6 ];
203+ void * allocated_ptr = NULL ;
204+ phys_addr_t size ;
205+ phys_addr_t min_addr ;
206+ phys_addr_t max_addr ;
207+
208+ PREFIX_PUSH ();
209+ setup_numa_memblock (node_fractions );
210+
211+ size = SZ_512 ;
212+ min_addr = node2 -> base - SZ_256 ;
213+ max_addr = min_addr + size ;
214+
215+ allocated_ptr = memblock_alloc_exact_nid_raw (size , SMP_CACHE_BYTES ,
216+ min_addr , max_addr ,
217+ nid_req );
218+
219+ ASSERT_NE (allocated_ptr , NULL );
220+ ASSERT_MEM_NE (allocated_ptr , 0 , size );
221+
222+ ASSERT_EQ (new_rgn -> size , size );
223+ ASSERT_EQ (new_rgn -> base , region_end (req_node ) - size );
224+ ASSERT_LE (req_node -> base , new_rgn -> base );
225+
226+ ASSERT_EQ (memblock .reserved .cnt , 1 );
227+ ASSERT_EQ (memblock .reserved .total_size , size );
228+
229+ test_pass_pop ();
230+
231+ return 0 ;
232+ }
233+
234+ /*
235+ * A test that tries to allocate memory within min_addr and max_add range when
236+ * the requested node and the range do not overlap, and requested node ends
237+ * before min_addr. The range overlaps with multiple nodes along node
238+ * boundaries:
239+ *
240+ * min_addr
241+ * | max_addr
242+ * | |
243+ * v v
244+ * |-----------+ +----------+----...----+----------+ |
245+ * | requested | | min node | ... | max node | |
246+ * +-----------+-----------+----------+----...----+----------+------+
247+ * + +
248+ * | +-----+ |
249+ * | | rgn | |
250+ * +-----+-----+----------------------------------------------------+
251+ *
252+ * Expect to drop the lower limit and allocate a memory region that ends at
253+ * the end of the requested node.
254+ */
255+ static int alloc_exact_nid_top_down_numa_no_overlap_low_check (void )
256+ {
257+ int nid_req = 0 ;
258+ struct memblock_region * new_rgn = & memblock .reserved .regions [0 ];
259+ struct memblock_region * req_node = & memblock .memory .regions [nid_req ];
260+ struct memblock_region * min_node = & memblock .memory .regions [2 ];
261+ struct memblock_region * max_node = & memblock .memory .regions [5 ];
262+ void * allocated_ptr = NULL ;
263+ phys_addr_t size = SZ_64 ;
264+ phys_addr_t max_addr ;
265+ phys_addr_t min_addr ;
266+
267+ PREFIX_PUSH ();
268+ setup_numa_memblock (node_fractions );
269+
270+ min_addr = min_node -> base ;
271+ max_addr = region_end (max_node );
272+
273+ allocated_ptr = memblock_alloc_exact_nid_raw (size , SMP_CACHE_BYTES ,
274+ min_addr , max_addr ,
275+ nid_req );
276+
277+ ASSERT_NE (allocated_ptr , NULL );
278+ ASSERT_MEM_NE (allocated_ptr , 0 , size );
279+
280+ ASSERT_EQ (new_rgn -> size , size );
281+ ASSERT_EQ (new_rgn -> base , region_end (req_node ) - size );
282+
283+ ASSERT_EQ (memblock .reserved .cnt , 1 );
284+ ASSERT_EQ (memblock .reserved .total_size , size );
285+
286+ test_pass_pop ();
287+
288+ return 0 ;
289+ }
290+
291+ /* Test case wrappers for NUMA tests */
292+ static int alloc_exact_nid_numa_simple_check (void )
293+ {
294+ test_print ("\tRunning %s...\n" , __func__ );
295+ memblock_set_bottom_up (false);
296+ alloc_exact_nid_top_down_numa_simple_check ();
297+
298+ return 0 ;
299+ }
300+
301+ static int alloc_exact_nid_numa_part_reserved_check (void )
302+ {
303+ test_print ("\tRunning %s...\n" , __func__ );
304+ memblock_set_bottom_up (false);
305+ alloc_exact_nid_top_down_numa_part_reserved_check ();
306+
307+ return 0 ;
308+ }
309+
310+ static int alloc_exact_nid_numa_split_range_low_check (void )
311+ {
312+ test_print ("\tRunning %s...\n" , __func__ );
313+ memblock_set_bottom_up (false);
314+ alloc_exact_nid_top_down_numa_split_range_low_check ();
315+
316+ return 0 ;
317+ }
318+
319+ static int alloc_exact_nid_numa_no_overlap_split_check (void )
320+ {
321+ test_print ("\tRunning %s...\n" , __func__ );
322+ memblock_set_bottom_up (false);
323+ alloc_exact_nid_top_down_numa_no_overlap_split_check ();
324+
325+ return 0 ;
326+ }
327+
328+ static int alloc_exact_nid_numa_no_overlap_low_check (void )
329+ {
330+ test_print ("\tRunning %s...\n" , __func__ );
331+ memblock_set_bottom_up (false);
332+ alloc_exact_nid_top_down_numa_no_overlap_low_check ();
333+
334+ return 0 ;
335+ }
336+
337+ int __memblock_alloc_exact_nid_numa_checks (void )
338+ {
339+ test_print ("Running %s NUMA tests...\n" , FUNC_NAME );
340+
341+ alloc_exact_nid_numa_simple_check ();
342+ alloc_exact_nid_numa_part_reserved_check ();
343+ alloc_exact_nid_numa_split_range_low_check ();
344+ alloc_exact_nid_numa_no_overlap_split_check ();
345+ alloc_exact_nid_numa_no_overlap_low_check ();
346+
347+ return 0 ;
348+ }
349+
7350int memblock_alloc_exact_nid_checks (void )
8351{
9352 prefix_reset ();
@@ -13,6 +356,7 @@ int memblock_alloc_exact_nid_checks(void)
13356 dummy_physical_memory_init ();
14357
15358 memblock_alloc_exact_nid_range_checks ();
359+ memblock_alloc_exact_nid_numa_checks ();
16360
17361 dummy_physical_memory_cleanup ();
18362
0 commit comments