Skip to content

Commit 5e9388f

Browse files
wdebruijAlexei Starovoitov
authored andcommitted
selftests/bpf: adapt one more case in test_lru_map to the new target_free
The below commit that updated BPF_MAP_TYPE_LRU_HASH free target, also updated tools/testing/selftests/bpf/test_lru_map to match. But that missed one case that passes with 4 cores, but fails at higher cpu counts. Update test_lru_sanity3 to also adjust its expectation of target_free. This time tested with 1, 4, 16, 64 and 384 cpu count. Fixes: d4adf1c ("bpf: Adjust free target to avoid global starvation of LRU map") Signed-off-by: Willem de Bruijn <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent fa6f092 commit 5e9388f

File tree

1 file changed

+18
-15
lines changed

1 file changed

+18
-15
lines changed

tools/testing/selftests/bpf/test_lru_map.c

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,12 @@ static int sched_next_online(int pid, int *next_to_try)
138138
return ret;
139139
}
140140

141+
/* Derive target_free from map_size, same as bpf_common_lru_populate */
142+
static unsigned int __tgt_size(unsigned int map_size)
143+
{
144+
return (map_size / nr_cpus) / 2;
145+
}
146+
141147
/* Inverse of how bpf_common_lru_populate derives target_free from map_size. */
142148
static unsigned int __map_size(unsigned int tgt_free)
143149
{
@@ -410,12 +416,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
410416
printf("Pass\n");
411417
}
412418

413-
/* Size of the LRU map is 2*tgt_free
414-
* It is to test the active/inactive list rotation
415-
* Insert 1 to 2*tgt_free (+2*tgt_free keys)
416-
* Lookup key 1 to tgt_free*3/2
417-
* Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
418-
* => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
419+
/* Test the active/inactive list rotation
420+
*
421+
* Fill the whole map, deplete the free list.
422+
* Reference all except the last lru->target_free elements.
423+
* Insert lru->target_free new elements. This triggers one shrink.
424+
* Verify that the non-referenced elements are replaced.
419425
*/
420426
static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
421427
{
@@ -434,8 +440,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
434440

435441
assert(sched_next_online(0, &next_cpu) != -1);
436442

437-
batch_size = tgt_free / 2;
438-
assert(batch_size * 2 == tgt_free);
443+
batch_size = __tgt_size(tgt_free);
439444

440445
map_size = tgt_free * 2;
441446
lru_map_fd = create_map(map_type, map_flags, map_size);
@@ -446,23 +451,21 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
446451

447452
value[0] = 1234;
448453

449-
/* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
450-
end_key = 1 + (2 * tgt_free);
454+
/* Fill the map */
455+
end_key = 1 + map_size;
451456
for (key = 1; key < end_key; key++)
452457
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
453458
BPF_NOEXIST));
454459

455-
/* Lookup key 1 to tgt_free*3/2 */
456-
end_key = tgt_free + batch_size;
460+
/* Reference all but the last batch_size */
461+
end_key = 1 + map_size - batch_size;
457462
for (key = 1; key < end_key; key++) {
458463
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
459464
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
460465
BPF_NOEXIST));
461466
}
462467

463-
/* Add 1+2*tgt_free to tgt_free*5/2
464-
* (+tgt_free/2 keys)
465-
*/
468+
/* Insert new batch_size: replaces the non-referenced elements */
466469
key = 2 * tgt_free + 1;
467470
end_key = key + batch_size;
468471
for (; key < end_key; key++) {

0 commit comments

Comments
 (0)