Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1207,6 +1207,27 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
b = __select_bucket(htab, hash);
head = &b->head;

ret = htab_lock_bucket(b, &flags);
if (ret)
goto err_lock_bucket;

l_old = lookup_elem_raw(head, hash, key, key_size);

ret = check_flags(htab, l_old, map_flags);
if (ret)
goto err;

if (l_old) {
bpf_lru_node_set_ref(&l_old->lru_node);
copy_map_value(&htab->map, htab_elem_value(l_old, map->key_size), value);
check_and_free_fields(htab, l_old);
}

htab_unlock_bucket(b, flags);

if (l_old)
return 0;

/* For LRU, we need to alloc before taking bucket's
* spinlock because getting free nodes from LRU may need
* to remove older elements from htab and this removal
Expand Down Expand Up @@ -1337,6 +1358,28 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
b = __select_bucket(htab, hash);
head = &b->head;

ret = htab_lock_bucket(b, &flags);
if (ret)
goto err_lock_bucket;

l_old = lookup_elem_raw(head, hash, key, key_size);

ret = check_flags(htab, l_old, map_flags);
if (ret)
goto err;

if (l_old) {
bpf_lru_node_set_ref(&l_old->lru_node);
/* per-cpu hash map can update value in-place */
pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
value, onallcpus);
}

htab_unlock_bucket(b, flags);

if (l_old)
return 0;

/* For LRU, we need to alloc before taking bucket's
* spinlock because LRU's elem alloc may need
* to remove older elem from htab and this removal
Expand Down
73 changes: 73 additions & 0 deletions tools/testing/selftests/bpf/prog_tests/htab_update.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,3 +143,76 @@ void test_htab_update(void)
if (test__start_subtest("concurrent_update"))
test_concurrent_update();
}

static void test_lru_hash_map_update_elem(enum bpf_map_type map_type)
{
int err, map_fd, i, key, nr_cpus, max_entries = 128;
u64 *values, value = 0xDEADC0DE;

nr_cpus = libbpf_num_possible_cpus();
if (!ASSERT_GT(nr_cpus, 0, "libbpf_num_possible_cpus"))
return;

values = calloc(nr_cpus, sizeof(u64));
if (!ASSERT_OK_PTR(values, "calloc values"))
return;
for (i = 0; i < nr_cpus; i++)
values[i] = value;

map_fd = bpf_map_create(map_type, "test_lru", sizeof(int), sizeof(u64), max_entries, NULL);
if (!ASSERT_GE(map_fd, 0, "bpf_map_create")) {
free(values);
return;
}

/* populate all slots */
for (key = 0; key < max_entries; key++) {
err = bpf_map_update_elem(map_fd, &key, values, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto out;
}

/* LRU eviction should not happen */

key = 0;
memset(values, 0, nr_cpus * sizeof(u64));
err = bpf_map_update_elem(map_fd, &key, values, 0);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto out;

err = bpf_map_lookup_elem(map_fd, &key, values);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto out;
if (!ASSERT_EQ(*values, 0, "bpf_map_lookup_elem value"))
goto out;

for (key = 1; key < max_entries; key++) {
err = bpf_map_lookup_elem(map_fd, &key, values);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto out;
if (!ASSERT_EQ(*values, value, "bpf_map_lookup_elem value"))
goto out;
}

out:
close(map_fd);
free(values);
}

static void test_update_lru_hash_map(void)
{
test_lru_hash_map_update_elem(BPF_MAP_TYPE_LRU_HASH);
}

static void test_update_lru_percpu_hash_map(void)
{
test_lru_hash_map_update_elem(BPF_MAP_TYPE_LRU_PERCPU_HASH);
}

void test_update_lru_hash_maps(void)
{
if (test__start_subtest("lru_hash"))
test_update_lru_hash_map();
if (test__start_subtest("lru_percpu_hash"))
test_update_lru_percpu_hash_map();
}
Loading