Skip to content

Commit 384452f

Browse files
laoarKernel Patches Daemon
authored andcommitted
selftests/bpf: Add selftests for cgroup1 local storage
Expanding the test coverage from cgroup2 to include cgroup1. The result as follows, Already existing test cases for cgroup2: #48/1 cgrp_local_storage/tp_btf:OK #48/2 cgrp_local_storage/attach_cgroup:OK #48/3 cgrp_local_storage/recursion:OK #48/4 cgrp_local_storage/negative:OK #48/5 cgrp_local_storage/cgroup_iter_sleepable:OK #48/6 cgrp_local_storage/yes_rcu_lock:OK #48/7 cgrp_local_storage/no_rcu_lock:OK Expanded test cases for cgroup1: #48/8 cgrp_local_storage/cgrp1_tp_btf:OK #48/9 cgrp_local_storage/cgrp1_recursion:OK #48/10 cgrp_local_storage/cgrp1_negative:OK #48/11 cgrp_local_storage/cgrp1_iter_sleepable:OK #48/12 cgrp_local_storage/cgrp1_yes_rcu_lock:OK #48/13 cgrp_local_storage/cgrp1_no_rcu_lock:OK Summary: #48 cgrp_local_storage:OK Summary: 1/13 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Yafang Shao <[email protected]>
1 parent 779070a commit 384452f

File tree

4 files changed

+278
-47
lines changed

4 files changed

+278
-47
lines changed

tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c

Lines changed: 91 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ struct socket_cookie {
1919
__u64 cookie_value;
2020
};
2121

22+
bool is_cgroup1;
23+
int target_hid;
24+
2225
static void test_tp_btf(int cgroup_fd)
2326
{
2427
struct cgrp_ls_tp_btf *skel;
@@ -29,6 +32,9 @@ static void test_tp_btf(int cgroup_fd)
2932
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
3033
return;
3134

35+
skel->bss->is_cgroup1 = is_cgroup1;
36+
skel->bss->target_hid = target_hid;
37+
3238
/* populate a value in map_b */
3339
err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
3440
if (!ASSERT_OK(err, "map_update_elem"))
@@ -130,6 +136,9 @@ static void test_recursion(int cgroup_fd)
130136
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
131137
return;
132138

139+
skel->bss->target_hid = target_hid;
140+
skel->bss->is_cgroup1 = is_cgroup1;
141+
133142
err = cgrp_ls_recursion__attach(skel);
134143
if (!ASSERT_OK(err, "skel_attach"))
135144
goto out;
@@ -165,6 +174,9 @@ static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
165174
if (!ASSERT_OK_PTR(skel, "skel_open"))
166175
return;
167176

177+
skel->bss->target_hid = target_hid;
178+
skel->bss->is_cgroup1 = is_cgroup1;
179+
168180
bpf_program__set_autoload(skel->progs.cgroup_iter, true);
169181
err = cgrp_ls_sleepable__load(skel);
170182
if (!ASSERT_OK(err, "skel_load"))
@@ -202,6 +214,8 @@ static void test_yes_rcu_lock(__u64 cgroup_id)
202214
if (!ASSERT_OK_PTR(skel, "skel_open"))
203215
return;
204216

217+
skel->bss->target_hid = target_hid;
218+
skel->bss->is_cgroup1 = is_cgroup1;
205219
skel->bss->target_pid = syscall(SYS_gettid);
206220

207221
bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
@@ -229,14 +243,36 @@ static void test_no_rcu_lock(void)
229243
if (!ASSERT_OK_PTR(skel, "skel_open"))
230244
return;
231245

246+
skel->bss->target_hid = target_hid;
247+
skel->bss->is_cgroup1 = is_cgroup1;
248+
232249
bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
233250
err = cgrp_ls_sleepable__load(skel);
234251
ASSERT_ERR(err, "skel_load");
235252

236253
cgrp_ls_sleepable__destroy(skel);
237254
}
238255

239-
void test_cgrp_local_storage(void)
256+
static void test_cgrp1_no_rcu_lock(void)
257+
{
258+
struct cgrp_ls_sleepable *skel;
259+
int err;
260+
261+
skel = cgrp_ls_sleepable__open();
262+
if (!ASSERT_OK_PTR(skel, "skel_open"))
263+
return;
264+
265+
skel->bss->target_hid = target_hid;
266+
skel->bss->is_cgroup1 = is_cgroup1;
267+
268+
bpf_program__set_autoload(skel->progs.cgrp1_no_rcu_lock, true);
269+
err = cgrp_ls_sleepable__load(skel);
270+
ASSERT_OK(err, "skel_load");
271+
272+
cgrp_ls_sleepable__destroy(skel);
273+
}
274+
275+
void cgrp2_local_storage(void)
240276
{
241277
__u64 cgroup_id;
242278
int cgroup_fd;
@@ -245,6 +281,8 @@ void test_cgrp_local_storage(void)
245281
if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
246282
return;
247283

284+
is_cgroup1 = 0;
285+
target_hid = -1;
248286
cgroup_id = get_cgroup_id("/cgrp_local_storage");
249287
if (test__start_subtest("tp_btf"))
250288
test_tp_btf(cgroup_fd);
@@ -263,3 +301,55 @@ void test_cgrp_local_storage(void)
263301

264302
close(cgroup_fd);
265303
}
304+
305+
void cgrp1_local_storage(void)
306+
{
307+
int cgrp1_fd, cgrp1_hid, cgrp1_id, err;
308+
309+
/* Setup cgroup1 hierarchy */
310+
err = setup_classid_environment();
311+
if (!ASSERT_OK(err, "setup_classid_environment"))
312+
return;
313+
314+
err = join_classid();
315+
if (!ASSERT_OK(err, "join_cgroup1"))
316+
goto cleanup;
317+
318+
cgrp1_fd = open_classid();
319+
if (!ASSERT_GE(cgrp1_fd, 0, "cgroup1 fd"))
320+
goto cleanup;
321+
322+
cgrp1_id = get_classid_cgroup_id();
323+
if (!ASSERT_GE(cgrp1_id, 0, "cgroup1 id"))
324+
goto close_fd;
325+
326+
cgrp1_hid = get_cgroup1_hierarchy_id("net_cls");
327+
if (!ASSERT_GE(cgrp1_hid, 0, "cgroup1 hid"))
328+
goto close_fd;
329+
target_hid = cgrp1_hid;
330+
is_cgroup1 = 1;
331+
332+
if (test__start_subtest("cgrp1_tp_btf"))
333+
test_tp_btf(cgrp1_fd);
334+
if (test__start_subtest("cgrp1_recursion"))
335+
test_recursion(cgrp1_fd);
336+
if (test__start_subtest("cgrp1_negative"))
337+
test_negative();
338+
if (test__start_subtest("cgrp1_iter_sleepable"))
339+
test_cgroup_iter_sleepable(cgrp1_fd, cgrp1_id);
340+
if (test__start_subtest("cgrp1_yes_rcu_lock"))
341+
test_yes_rcu_lock(cgrp1_id);
342+
if (test__start_subtest("cgrp1_no_rcu_lock"))
343+
test_cgrp1_no_rcu_lock();
344+
345+
close_fd:
346+
close(cgrp1_fd);
347+
cleanup:
348+
cleanup_classid_environment();
349+
}
350+
351+
void test_cgrp_local_storage(void)
352+
{
353+
cgrp2_local_storage();
354+
cgrp1_local_storage();
355+
}

tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c

Lines changed: 67 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -21,50 +21,100 @@ struct {
2121
__type(value, long);
2222
} map_b SEC(".maps");
2323

24+
int target_hid = 0;
25+
bool is_cgroup1 = 0;
26+
27+
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
28+
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
29+
30+
static void __on_lookup(struct cgroup *cgrp)
31+
{
32+
bpf_cgrp_storage_delete(&map_a, cgrp);
33+
bpf_cgrp_storage_delete(&map_b, cgrp);
34+
}
35+
2436
SEC("fentry/bpf_local_storage_lookup")
2537
int BPF_PROG(on_lookup)
2638
{
2739
struct task_struct *task = bpf_get_current_task_btf();
40+
struct cgroup *cgrp;
41+
42+
if (is_cgroup1) {
43+
cgrp = bpf_task_get_cgroup1(task, target_hid);
44+
if (!cgrp)
45+
return 0;
2846

29-
bpf_cgrp_storage_delete(&map_a, task->cgroups->dfl_cgrp);
30-
bpf_cgrp_storage_delete(&map_b, task->cgroups->dfl_cgrp);
47+
__on_lookup(cgrp);
48+
bpf_cgroup_release(cgrp);
49+
return 0;
50+
}
51+
52+
__on_lookup(task->cgroups->dfl_cgrp);
3153
return 0;
3254
}
3355

34-
SEC("fentry/bpf_local_storage_update")
35-
int BPF_PROG(on_update)
56+
static void __on_update(struct cgroup *cgrp)
3657
{
37-
struct task_struct *task = bpf_get_current_task_btf();
3858
long *ptr;
3959

40-
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
41-
BPF_LOCAL_STORAGE_GET_F_CREATE);
60+
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
4261
if (ptr)
4362
*ptr += 1;
4463

45-
ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0,
46-
BPF_LOCAL_STORAGE_GET_F_CREATE);
64+
ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
4765
if (ptr)
4866
*ptr += 1;
67+
}
4968

69+
SEC("fentry/bpf_local_storage_update")
70+
int BPF_PROG(on_update)
71+
{
72+
struct task_struct *task = bpf_get_current_task_btf();
73+
struct cgroup *cgrp;
74+
75+
if (is_cgroup1) {
76+
cgrp = bpf_task_get_cgroup1(task, target_hid);
77+
if (!cgrp)
78+
return 0;
79+
80+
__on_update(cgrp);
81+
bpf_cgroup_release(cgrp);
82+
return 0;
83+
}
84+
85+
__on_update(task->cgroups->dfl_cgrp);
5086
return 0;
5187
}
5288

53-
SEC("tp_btf/sys_enter")
54-
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
89+
static void __on_enter(struct pt_regs *regs, long id, struct cgroup *cgrp)
5590
{
56-
struct task_struct *task;
5791
long *ptr;
5892

59-
task = bpf_get_current_task_btf();
60-
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
61-
BPF_LOCAL_STORAGE_GET_F_CREATE);
93+
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
6294
if (ptr)
6395
*ptr = 200;
6496

65-
ptr = bpf_cgrp_storage_get(&map_b, task->cgroups->dfl_cgrp, 0,
66-
BPF_LOCAL_STORAGE_GET_F_CREATE);
97+
ptr = bpf_cgrp_storage_get(&map_b, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
6798
if (ptr)
6899
*ptr = 100;
100+
}
101+
102+
SEC("tp_btf/sys_enter")
103+
int BPF_PROG(on_enter, struct pt_regs *regs, long id)
104+
{
105+
struct task_struct *task = bpf_get_current_task_btf();
106+
struct cgroup *cgrp;
107+
108+
if (is_cgroup1) {
109+
cgrp = bpf_task_get_cgroup1(task, target_hid);
110+
if (!cgrp)
111+
return 0;
112+
113+
__on_enter(regs, id, cgrp);
114+
bpf_cgroup_release(cgrp);
115+
return 0;
116+
}
117+
118+
__on_enter(regs, id, task->cgroups->dfl_cgrp);
69119
return 0;
70120
}

tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c

Lines changed: 60 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,11 @@ struct {
1717

1818
__u32 target_pid;
1919
__u64 cgroup_id;
20+
int target_hid;
21+
bool is_cgroup1;
2022

23+
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
24+
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
2125
void bpf_rcu_read_lock(void) __ksym;
2226
void bpf_rcu_read_unlock(void) __ksym;
2327

@@ -37,23 +41,56 @@ int cgroup_iter(struct bpf_iter__cgroup *ctx)
3741
return 0;
3842
}
3943

44+
static void __no_rcu_lock(struct cgroup *cgrp)
45+
{
46+
long *ptr;
47+
48+
/* Note that trace rcu is held in sleepable prog, so we can use
49+
* bpf_cgrp_storage_get() in sleepable prog.
50+
*/
51+
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
52+
BPF_LOCAL_STORAGE_GET_F_CREATE);
53+
if (ptr)
54+
cgroup_id = cgrp->kn->id;
55+
}
56+
4057
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
41-
int no_rcu_lock(void *ctx)
58+
int cgrp1_no_rcu_lock(void *ctx)
4259
{
4360
struct task_struct *task;
4461
struct cgroup *cgrp;
45-
long *ptr;
62+
63+
if (!is_cgroup1)
64+
return 0;
65+
66+
task = bpf_get_current_task_btf();
67+
if (task->pid != target_pid)
68+
return 0;
69+
70+
/* bpf_task_get_cgroup1 can work in sleepable prog */
71+
cgrp = bpf_task_get_cgroup1(task, target_hid);
72+
if (!cgrp)
73+
return 0;
74+
75+
__no_rcu_lock(cgrp);
76+
bpf_cgroup_release(cgrp);
77+
return 0;
78+
}
79+
80+
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
81+
int no_rcu_lock(void *ctx)
82+
{
83+
struct task_struct *task;
84+
85+
if (is_cgroup1)
86+
return 0;
4687

4788
task = bpf_get_current_task_btf();
4889
if (task->pid != target_pid)
4990
return 0;
5091

5192
/* task->cgroups is untrusted in sleepable prog outside of RCU CS */
52-
cgrp = task->cgroups->dfl_cgrp;
53-
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0,
54-
BPF_LOCAL_STORAGE_GET_F_CREATE);
55-
if (ptr)
56-
cgroup_id = cgrp->kn->id;
93+
__no_rcu_lock(task->cgroups->dfl_cgrp);
5794
return 0;
5895
}
5996

@@ -68,6 +105,22 @@ int yes_rcu_lock(void *ctx)
68105
if (task->pid != target_pid)
69106
return 0;
70107

108+
if (is_cgroup1) {
109+
bpf_rcu_read_lock();
110+
cgrp = bpf_task_get_cgroup1(task, target_hid);
111+
if (!cgrp) {
112+
bpf_rcu_read_unlock();
113+
return 0;
114+
}
115+
116+
ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE);
117+
if (ptr)
118+
cgroup_id = cgrp->kn->id;
119+
bpf_cgroup_release(cgrp);
120+
bpf_rcu_read_unlock();
121+
return 0;
122+
}
123+
71124
bpf_rcu_read_lock();
72125
cgrp = task->cgroups->dfl_cgrp;
73126
/* cgrp is trusted under RCU CS */

0 commit comments

Comments
 (0)