Skip to content

Commit a496d0c

Browse files
namhyungAlexei Starovoitov
authored andcommitted
selftests/bpf: Add a test for kmem_cache_iter
The test traverses all slab caches using the kmem_cache_iter and save the data into slab_result array map. And check if current task's pointer is from "task_struct" slab cache using bpf_get_kmem_cache(). Also compare the result array with /proc/slabinfo if available (when CONFIG_SLUB_DEBUG is on). Note that many of the fields in the slabinfo are transient, so it only compares the name and objsize fields. Signed-off-by: Namhyung Kim <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent a992d7a commit a496d0c

File tree

3 files changed

+209
-0
lines changed

3 files changed

+209
-0
lines changed
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2024 Google */
3+
4+
#include <test_progs.h>
5+
#include <bpf/libbpf.h>
6+
#include <bpf/btf.h>
7+
#include "kmem_cache_iter.skel.h"
8+
9+
#define SLAB_NAME_MAX 32
10+
11+
struct kmem_cache_result {
12+
char name[SLAB_NAME_MAX];
13+
long obj_size;
14+
};
15+
16+
static void subtest_kmem_cache_iter_check_task_struct(struct kmem_cache_iter *skel)
17+
{
18+
LIBBPF_OPTS(bpf_test_run_opts, opts,
19+
.flags = 0, /* Run it with the current task */
20+
);
21+
int prog_fd = bpf_program__fd(skel->progs.check_task_struct);
22+
23+
/* Get task_struct and check it if's from a slab cache */
24+
ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &opts), "prog_test_run");
25+
26+
/* The BPF program should set 'found' variable */
27+
ASSERT_EQ(skel->bss->task_struct_found, 1, "task_struct_found");
28+
}
29+
30+
static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
31+
{
32+
FILE *fp;
33+
int map_fd;
34+
char name[SLAB_NAME_MAX];
35+
unsigned long objsize;
36+
char rest_of_line[1000];
37+
struct kmem_cache_result r;
38+
int seen = 0;
39+
40+
fp = fopen("/proc/slabinfo", "r");
41+
if (fp == NULL) {
42+
/* CONFIG_SLUB_DEBUG is not enabled */
43+
return;
44+
}
45+
46+
map_fd = bpf_map__fd(skel->maps.slab_result);
47+
48+
/* Ignore first two lines for header */
49+
fscanf(fp, "slabinfo - version: %*d.%*d\n");
50+
fscanf(fp, "# %*s %*s %*s %*s %*s %*s : %[^\n]\n", rest_of_line);
51+
52+
/* Compare name and objsize only - others can be changes frequently */
53+
while (fscanf(fp, "%s %*u %*u %lu %*u %*u : %[^\n]\n",
54+
name, &objsize, rest_of_line) == 3) {
55+
int ret = bpf_map_lookup_elem(map_fd, &seen, &r);
56+
57+
if (!ASSERT_OK(ret, "kmem_cache_lookup"))
58+
break;
59+
60+
ASSERT_STREQ(r.name, name, "kmem_cache_name");
61+
ASSERT_EQ(r.obj_size, objsize, "kmem_cache_objsize");
62+
63+
seen++;
64+
}
65+
66+
ASSERT_EQ(skel->bss->kmem_cache_seen, seen, "kmem_cache_seen_eq");
67+
68+
fclose(fp);
69+
}
70+
71+
void test_kmem_cache_iter(void)
72+
{
73+
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
74+
struct kmem_cache_iter *skel = NULL;
75+
union bpf_iter_link_info linfo = {};
76+
struct bpf_link *link;
77+
char buf[256];
78+
int iter_fd;
79+
80+
skel = kmem_cache_iter__open_and_load();
81+
if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
82+
return;
83+
84+
opts.link_info = &linfo;
85+
opts.link_info_len = sizeof(linfo);
86+
87+
link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts);
88+
if (!ASSERT_OK_PTR(link, "attach_iter"))
89+
goto destroy;
90+
91+
iter_fd = bpf_iter_create(bpf_link__fd(link));
92+
if (!ASSERT_GE(iter_fd, 0, "iter_create"))
93+
goto free_link;
94+
95+
memset(buf, 0, sizeof(buf));
96+
while (read(iter_fd, buf, sizeof(buf) > 0)) {
97+
/* Read out all contents */
98+
printf("%s", buf);
99+
}
100+
101+
/* Next reads should return 0 */
102+
ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read");
103+
104+
if (test__start_subtest("check_task_struct"))
105+
subtest_kmem_cache_iter_check_task_struct(skel);
106+
if (test__start_subtest("check_slabinfo"))
107+
subtest_kmem_cache_iter_check_slabinfo(skel);
108+
109+
close(iter_fd);
110+
111+
free_link:
112+
bpf_link__destroy(link);
113+
destroy:
114+
kmem_cache_iter__destroy(skel);
115+
}

tools/testing/selftests/bpf/progs/bpf_iter.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used
2525
#define BTF_F_ZERO BTF_F_ZERO___not_used
2626
#define bpf_iter__ksym bpf_iter__ksym___not_used
27+
#define bpf_iter__kmem_cache bpf_iter__kmem_cache___not_used
2728
#include "vmlinux.h"
2829
#undef bpf_iter_meta
2930
#undef bpf_iter__bpf_map
@@ -48,6 +49,7 @@
4849
#undef BTF_F_PTR_RAW
4950
#undef BTF_F_ZERO
5051
#undef bpf_iter__ksym
52+
#undef bpf_iter__kmem_cache
5153

5254
struct bpf_iter_meta {
5355
struct seq_file *seq;
@@ -165,3 +167,8 @@ struct bpf_iter__ksym {
165167
struct bpf_iter_meta *meta;
166168
struct kallsym_iter *ksym;
167169
};
170+
171+
struct bpf_iter__kmem_cache {
172+
struct bpf_iter_meta *meta;
173+
struct kmem_cache *s;
174+
} __attribute__((preserve_access_index));
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2024 Google */
3+
4+
#include "bpf_iter.h"
5+
#include <bpf/bpf_helpers.h>
6+
#include <bpf/bpf_tracing.h>
7+
8+
char _license[] SEC("license") = "GPL";
9+
10+
#define SLAB_NAME_MAX 32
11+
12+
struct kmem_cache_result {
13+
char name[SLAB_NAME_MAX];
14+
long obj_size;
15+
};
16+
17+
struct {
18+
__uint(type, BPF_MAP_TYPE_HASH);
19+
__uint(key_size, sizeof(void *));
20+
__uint(value_size, SLAB_NAME_MAX);
21+
__uint(max_entries, 1);
22+
} slab_hash SEC(".maps");
23+
24+
struct {
25+
__uint(type, BPF_MAP_TYPE_ARRAY);
26+
__uint(key_size, sizeof(int));
27+
__uint(value_size, sizeof(struct kmem_cache_result));
28+
__uint(max_entries, 1024);
29+
} slab_result SEC(".maps");
30+
31+
extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
32+
33+
/* Result, will be checked by userspace */
34+
int task_struct_found;
35+
int kmem_cache_seen;
36+
37+
SEC("iter/kmem_cache")
38+
int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
39+
{
40+
struct seq_file *seq = ctx->meta->seq;
41+
struct kmem_cache *s = ctx->s;
42+
struct kmem_cache_result *r;
43+
int idx;
44+
45+
if (s) {
46+
/* To make sure if the slab_iter implements the seq interface
47+
* properly and it's also useful for debugging.
48+
*/
49+
BPF_SEQ_PRINTF(seq, "%s: %u\n", s->name, s->size);
50+
51+
idx = kmem_cache_seen;
52+
r = bpf_map_lookup_elem(&slab_result, &idx);
53+
if (r == NULL)
54+
return 0;
55+
56+
kmem_cache_seen++;
57+
58+
/* Save name and size to match /proc/slabinfo */
59+
bpf_probe_read_kernel_str(r->name, sizeof(r->name), s->name);
60+
r->obj_size = s->size;
61+
62+
if (!bpf_strncmp(r->name, 11, "task_struct"))
63+
bpf_map_update_elem(&slab_hash, &s, r->name, BPF_NOEXIST);
64+
}
65+
66+
return 0;
67+
}
68+
69+
SEC("raw_tp/bpf_test_finish")
70+
int BPF_PROG(check_task_struct)
71+
{
72+
u64 curr = bpf_get_current_task();
73+
struct kmem_cache *s;
74+
char *name;
75+
76+
s = bpf_get_kmem_cache(curr);
77+
if (s == NULL) {
78+
task_struct_found = -1;
79+
return 0;
80+
}
81+
name = bpf_map_lookup_elem(&slab_hash, &s);
82+
if (name && !bpf_strncmp(name, 11, "task_struct"))
83+
task_struct_found = 1;
84+
else
85+
task_struct_found = -2;
86+
return 0;
87+
}

0 commit comments

Comments
 (0)