Skip to content

Commit b2531d4

Browse files
anakryikoborkmann
authored andcommitted
selftests/bpf: Convert some selftests to high-level BPF map APIs
Convert a bunch of selftests to using newly added high-level BPF map APIs. This change exposed that map_kptr selftests allocated too big buffer, which is fixed in this patch as well. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 737d064 commit b2531d4

File tree

8 files changed

+61
-47
lines changed

8 files changed

+61
-47
lines changed

tools/testing/selftests/bpf/prog_tests/core_autosize.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ void test_core_autosize(void)
167167
if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
168168
goto cleanup;
169169

170-
err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out);
170+
err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0);
171171
if (!ASSERT_OK(err, "bss_lookup"))
172172
goto cleanup;
173173

tools/testing/selftests/bpf/prog_tests/core_retro.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,31 +6,32 @@
66

77
void test_core_retro(void)
88
{
9-
int err, zero = 0, res, duration = 0, my_pid = getpid();
9+
int err, zero = 0, res, my_pid = getpid();
1010
struct test_core_retro *skel;
1111

1212
/* load program */
1313
skel = test_core_retro__open_and_load();
14-
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
14+
if (!ASSERT_OK_PTR(skel, "skel_load"))
1515
goto out_close;
1616

17-
err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0);
18-
if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno))
17+
err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero),
18+
&my_pid, sizeof(my_pid), 0);
19+
if (!ASSERT_OK(err, "map_update"))
1920
goto out_close;
2021

2122
/* attach probe */
2223
err = test_core_retro__attach(skel);
23-
if (CHECK(err, "attach_kprobe", "err %d\n", err))
24+
if (!ASSERT_OK(err, "attach_kprobe"))
2425
goto out_close;
2526

2627
/* trigger */
2728
usleep(1);
2829

29-
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
30-
if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
30+
err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0);
31+
if (!ASSERT_OK(err, "map_lookup"))
3132
goto out_close;
3233

33-
CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid);
34+
ASSERT_EQ(res, my_pid, "pid_check");
3435

3536
out_close:
3637
test_core_retro__destroy(skel);

tools/testing/selftests/bpf/prog_tests/for_each.c

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,10 @@ static unsigned int duration;
1010

1111
static void test_hash_map(void)
1212
{
13-
int i, err, hashmap_fd, max_entries, percpu_map_fd;
13+
int i, err, max_entries;
1414
struct for_each_hash_map_elem *skel;
1515
__u64 *percpu_valbuf = NULL;
16+
size_t percpu_val_sz;
1617
__u32 key, num_cpus;
1718
__u64 val;
1819
LIBBPF_OPTS(bpf_test_run_opts, topts,
@@ -25,26 +26,27 @@ static void test_hash_map(void)
2526
if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
2627
return;
2728

28-
hashmap_fd = bpf_map__fd(skel->maps.hashmap);
2929
max_entries = bpf_map__max_entries(skel->maps.hashmap);
3030
for (i = 0; i < max_entries; i++) {
3131
key = i;
3232
val = i + 1;
33-
err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY);
33+
err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
34+
&val, sizeof(val), BPF_ANY);
3435
if (!ASSERT_OK(err, "map_update"))
3536
goto out;
3637
}
3738

3839
num_cpus = bpf_num_possible_cpus();
39-
percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
40-
percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
40+
percpu_val_sz = sizeof(__u64) * num_cpus;
41+
percpu_valbuf = malloc(percpu_val_sz);
4142
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
4243
goto out;
4344

4445
key = 1;
4546
for (i = 0; i < num_cpus; i++)
4647
percpu_valbuf[i] = i + 1;
47-
err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
48+
err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
49+
percpu_valbuf, percpu_val_sz, BPF_ANY);
4850
if (!ASSERT_OK(err, "percpu_map_update"))
4951
goto out;
5052

@@ -58,7 +60,7 @@ static void test_hash_map(void)
5860
ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
5961

6062
key = 1;
61-
err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
63+
err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
6264
ASSERT_ERR(err, "hashmap_lookup");
6365

6466
ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
@@ -75,9 +77,10 @@ static void test_hash_map(void)
7577
static void test_array_map(void)
7678
{
7779
__u32 key, num_cpus, max_entries;
78-
int i, arraymap_fd, percpu_map_fd, err;
80+
int i, err;
7981
struct for_each_array_map_elem *skel;
8082
__u64 *percpu_valbuf = NULL;
83+
size_t percpu_val_sz;
8184
__u64 val, expected_total;
8285
LIBBPF_OPTS(bpf_test_run_opts, topts,
8386
.data_in = &pkt_v4,
@@ -89,7 +92,6 @@ static void test_array_map(void)
8992
if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
9093
return;
9194

92-
arraymap_fd = bpf_map__fd(skel->maps.arraymap);
9395
expected_total = 0;
9496
max_entries = bpf_map__max_entries(skel->maps.arraymap);
9597
for (i = 0; i < max_entries; i++) {
@@ -98,21 +100,23 @@ static void test_array_map(void)
98100
/* skip the last iteration for expected total */
99101
if (i != max_entries - 1)
100102
expected_total += val;
101-
err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY);
103+
err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
104+
&val, sizeof(val), BPF_ANY);
102105
if (!ASSERT_OK(err, "map_update"))
103106
goto out;
104107
}
105108

106109
num_cpus = bpf_num_possible_cpus();
107-
percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
108-
percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
110+
percpu_val_sz = sizeof(__u64) * num_cpus;
111+
percpu_valbuf = malloc(percpu_val_sz);
109112
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
110113
goto out;
111114

112115
key = 0;
113116
for (i = 0; i < num_cpus; i++)
114117
percpu_valbuf[i] = i + 1;
115-
err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
118+
err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
119+
percpu_valbuf, percpu_val_sz, BPF_ANY);
116120
if (!ASSERT_OK(err, "percpu_map_update"))
117121
goto out;
118122

tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,8 @@ static void test_lookup_and_delete_hash(void)
112112

113113
/* Lookup and delete element. */
114114
key = 1;
115-
err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
115+
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
116+
&key, sizeof(key), &value, sizeof(value), 0);
116117
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
117118
goto cleanup;
118119

@@ -147,7 +148,8 @@ static void test_lookup_and_delete_percpu_hash(void)
147148

148149
/* Lookup and delete element. */
149150
key = 1;
150-
err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
151+
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
152+
&key, sizeof(key), value, sizeof(value), 0);
151153
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
152154
goto cleanup;
153155

@@ -191,7 +193,8 @@ static void test_lookup_and_delete_lru_hash(void)
191193
goto cleanup;
192194

193195
/* Lookup and delete element 3. */
194-
err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
196+
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
197+
&key, sizeof(key), &value, sizeof(value), 0);
195198
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
196199
goto cleanup;
197200

@@ -240,10 +243,10 @@ static void test_lookup_and_delete_lru_percpu_hash(void)
240243
value[i] = 0;
241244

242245
/* Lookup and delete element 3. */
243-
err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
244-
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) {
246+
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
247+
&key, sizeof(key), value, sizeof(value), 0);
248+
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
245249
goto cleanup;
246-
}
247250

248251
/* Check if only one CPU has set the value. */
249252
for (i = 0; i < nr_cpus; i++) {

tools/testing/selftests/bpf/prog_tests/map_kptr.c

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ static void test_map_kptr_success(bool test_run)
9191
);
9292
struct map_kptr *skel;
9393
int key = 0, ret;
94-
char buf[24];
94+
char buf[16];
9595

9696
skel = map_kptr__open_and_load();
9797
if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
@@ -107,24 +107,29 @@ static void test_map_kptr_success(bool test_run)
107107
if (test_run)
108108
return;
109109

110-
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
110+
ret = bpf_map__update_elem(skel->maps.array_map,
111+
&key, sizeof(key), buf, sizeof(buf), 0);
111112
ASSERT_OK(ret, "array_map update");
112-
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
113+
ret = bpf_map__update_elem(skel->maps.array_map,
114+
&key, sizeof(key), buf, sizeof(buf), 0);
113115
ASSERT_OK(ret, "array_map update2");
114116

115-
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_map), &key, buf, 0);
117+
ret = bpf_map__update_elem(skel->maps.hash_map,
118+
&key, sizeof(key), buf, sizeof(buf), 0);
116119
ASSERT_OK(ret, "hash_map update");
117-
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_map), &key);
120+
ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
118121
ASSERT_OK(ret, "hash_map delete");
119122

120-
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key, buf, 0);
123+
ret = bpf_map__update_elem(skel->maps.hash_malloc_map,
124+
&key, sizeof(key), buf, sizeof(buf), 0);
121125
ASSERT_OK(ret, "hash_malloc_map update");
122-
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key);
126+
ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
123127
ASSERT_OK(ret, "hash_malloc_map delete");
124128

125-
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.lru_hash_map), &key, buf, 0);
129+
ret = bpf_map__update_elem(skel->maps.lru_hash_map,
130+
&key, sizeof(key), buf, sizeof(buf), 0);
126131
ASSERT_OK(ret, "lru_hash_map update");
127-
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.lru_hash_map), &key);
132+
ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
128133
ASSERT_OK(ret, "lru_hash_map delete");
129134

130135
map_kptr__destroy(skel);

tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ void test_stacktrace_build_id(void)
88
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
99
struct test_stacktrace_build_id *skel;
1010
int err, stack_trace_len;
11-
__u32 key, previous_key, val, duration = 0;
11+
__u32 key, prev_key, val, duration = 0;
1212
char buf[256];
1313
int i, j;
1414
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@@ -58,7 +58,7 @@ void test_stacktrace_build_id(void)
5858
"err %d errno %d\n", err, errno))
5959
goto cleanup;
6060

61-
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
61+
err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
6262
if (CHECK(err, "get_next_key from stackmap",
6363
"err %d, errno %d\n", err, errno))
6464
goto cleanup;
@@ -79,8 +79,8 @@ void test_stacktrace_build_id(void)
7979
if (strstr(buf, build_id) != NULL)
8080
build_id_matches = 1;
8181
}
82-
previous_key = key;
83-
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
82+
prev_key = key;
83+
} while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
8484

8585
/* stack_map_get_build_id_offset() is racy and sometimes can return
8686
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;

tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ void test_stacktrace_build_id_nmi(void)
2727
.type = PERF_TYPE_HARDWARE,
2828
.config = PERF_COUNT_HW_CPU_CYCLES,
2929
};
30-
__u32 key, previous_key, val, duration = 0;
30+
__u32 key, prev_key, val, duration = 0;
3131
char buf[256];
3232
int i, j;
3333
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@@ -100,15 +100,16 @@ void test_stacktrace_build_id_nmi(void)
100100
"err %d errno %d\n", err, errno))
101101
goto cleanup;
102102

103-
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
103+
err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
104104
if (CHECK(err, "get_next_key from stackmap",
105105
"err %d, errno %d\n", err, errno))
106106
goto cleanup;
107107

108108
do {
109109
char build_id[64];
110110

111-
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
111+
err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
112+
id_offs, sizeof(id_offs), 0);
112113
if (CHECK(err, "lookup_elem from stackmap",
113114
"err %d, errno %d\n", err, errno))
114115
goto cleanup;
@@ -121,8 +122,8 @@ void test_stacktrace_build_id_nmi(void)
121122
if (strstr(buf, build_id) != NULL)
122123
build_id_matches = 1;
123124
}
124-
previous_key = key;
125-
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
125+
prev_key = key;
126+
} while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
126127

127128
/* stack_map_get_build_id_offset() is racy and sometimes can return
128129
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;

tools/testing/selftests/bpf/prog_tests/timer_mim.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ static int timer_mim(struct timer_mim *timer_skel)
3535
ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok");
3636

3737
close(bpf_map__fd(timer_skel->maps.inner_htab));
38-
err = bpf_map_delete_elem(bpf_map__fd(timer_skel->maps.outer_arr), &key1);
38+
err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0);
3939
ASSERT_EQ(err, 0, "delete inner map");
4040

4141
/* check that timer_cb[12] are no longer running */

0 commit comments

Comments
 (0)