Skip to content

Commit fb7dd8b

Browse files
anakryikoborkmann
authored andcommitted
bpf: Refactor BPF_PROG_RUN into a function
Turn BPF_PROG_RUN into a proper always inlined function. No functional and performance changes are intended, but it makes it much easier to understand what's going on with how BPF programs are actually get executed. It's more obvious what types and callbacks are expected. Also extra () around input parameters can be dropped, as well as `__` variable prefixes intended to avoid naming collisions, which makes the code simpler to read and write. This refactoring also highlighted one extra issue. BPF_PROG_RUN is both a macro and an enum value (BPF_PROG_RUN == BPF_PROG_TEST_RUN). Turning BPF_PROG_RUN into a function causes naming conflict compilation error. So rename BPF_PROG_RUN into lower-case bpf_prog_run(), similar to bpf_prog_run_xdp(), bpf_prog_run_pin_on_cpu(), etc. All existing callers of BPF_PROG_RUN, the macro, are switched to bpf_prog_run() explicitly. Signed-off-by: Andrii Nakryiko <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Yonghong Song <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 1bda52f commit fb7dd8b

File tree

20 files changed

+73
-60
lines changed

20 files changed

+73
-60
lines changed

Documentation/networking/filter.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -638,8 +638,8 @@ extension, PTP dissector/classifier, and much more. They are all internally
638638
converted by the kernel into the new instruction set representation and run
639639
in the eBPF interpreter. For in-kernel handlers, this all works transparently
640640
by using bpf_prog_create() for setting up the filter, resp.
641-
bpf_prog_destroy() for destroying it. The macro
642-
BPF_PROG_RUN(filter, ctx) transparently invokes eBPF interpreter or JITed
641+
bpf_prog_destroy() for destroying it. The function
642+
bpf_prog_run(filter, ctx) transparently invokes eBPF interpreter or JITed
643643
code to run the filter. 'filter' is a pointer to struct bpf_prog that we
644644
got from bpf_prog_create(), and 'ctx' the given context (e.g.
645645
skb pointer). All constraints and restrictions from bpf_check_classic() apply

drivers/media/rc/bpf-lirc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
217217
raw->bpf_sample = sample;
218218

219219
if (raw->progs)
220-
BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, BPF_PROG_RUN);
220+
BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, bpf_prog_run);
221221
}
222222

223223
/*

drivers/net/ppp/ppp_generic.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1744,7 +1744,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
17441744
a four-byte PPP header on each packet */
17451745
*(u8 *)skb_push(skb, 2) = 1;
17461746
if (ppp->pass_filter &&
1747-
BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
1747+
bpf_prog_run(ppp->pass_filter, skb) == 0) {
17481748
if (ppp->debug & 1)
17491749
netdev_printk(KERN_DEBUG, ppp->dev,
17501750
"PPP: outbound frame "
@@ -1754,7 +1754,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
17541754
}
17551755
/* if this packet passes the active filter, record the time */
17561756
if (!(ppp->active_filter &&
1757-
BPF_PROG_RUN(ppp->active_filter, skb) == 0))
1757+
bpf_prog_run(ppp->active_filter, skb) == 0))
17581758
ppp->last_xmit = jiffies;
17591759
skb_pull(skb, 2);
17601760
#else
@@ -2468,7 +2468,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
24682468

24692469
*(u8 *)skb_push(skb, 2) = 0;
24702470
if (ppp->pass_filter &&
2471-
BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
2471+
bpf_prog_run(ppp->pass_filter, skb) == 0) {
24722472
if (ppp->debug & 1)
24732473
netdev_printk(KERN_DEBUG, ppp->dev,
24742474
"PPP: inbound frame "
@@ -2477,7 +2477,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
24772477
return;
24782478
}
24792479
if (!(ppp->active_filter &&
2480-
BPF_PROG_RUN(ppp->active_filter, skb) == 0))
2480+
bpf_prog_run(ppp->active_filter, skb) == 0))
24812481
ppp->last_recv = jiffies;
24822482
__skb_pull(skb, 2);
24832483
} else

drivers/net/team/team_mode_loadbalance.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
197197
fp = rcu_dereference_bh(lb_priv->fp);
198198
if (unlikely(!fp))
199199
return 0;
200-
lhash = BPF_PROG_RUN(fp, skb);
200+
lhash = bpf_prog_run(fp, skb);
201201
c = (char *) &lhash;
202202
return c[0] ^ c[1] ^ c[2] ^ c[3];
203203
}

include/linux/bpf.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1103,7 +1103,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
11031103
/* an array of programs to be executed under rcu_lock.
11041104
*
11051105
* Typical usage:
1106-
* ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
1106+
* ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
11071107
*
11081108
* the structure returned by bpf_prog_array_alloc() should be populated
11091109
* with program pointers and the last pointer must be NULL.

include/linux/filter.h

Lines changed: 37 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -600,25 +600,38 @@ struct sk_filter {
600600

601601
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
602602

603-
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
604-
u32 __ret; \
605-
cant_migrate(); \
606-
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
607-
struct bpf_prog_stats *__stats; \
608-
u64 __start = sched_clock(); \
609-
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
610-
__stats = this_cpu_ptr(prog->stats); \
611-
u64_stats_update_begin(&__stats->syncp); \
612-
__stats->cnt++; \
613-
__stats->nsecs += sched_clock() - __start; \
614-
u64_stats_update_end(&__stats->syncp); \
615-
} else { \
616-
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
617-
} \
618-
__ret; })
619-
620-
#define BPF_PROG_RUN(prog, ctx) \
621-
__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
603+
typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
604+
const struct bpf_insn *insnsi,
605+
unsigned int (*bpf_func)(const void *,
606+
const struct bpf_insn *));
607+
608+
static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
609+
const void *ctx,
610+
bpf_dispatcher_fn dfunc)
611+
{
612+
u32 ret;
613+
614+
cant_migrate();
615+
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
616+
struct bpf_prog_stats *stats;
617+
u64 start = sched_clock();
618+
619+
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
620+
stats = this_cpu_ptr(prog->stats);
621+
u64_stats_update_begin(&stats->syncp);
622+
stats->cnt++;
623+
stats->nsecs += sched_clock() - start;
624+
u64_stats_update_end(&stats->syncp);
625+
} else {
626+
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
627+
}
628+
return ret;
629+
}
630+
631+
static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
632+
{
633+
return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
634+
}
622635

623636
/*
624637
* Use in preemptible and therefore migratable context to make sure that
@@ -637,7 +650,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
637650
u32 ret;
638651

639652
migrate_disable();
640-
ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
653+
ret = bpf_prog_run(prog, ctx);
641654
migrate_enable();
642655
return ret;
643656
}
@@ -742,7 +755,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
742755
memset(cb_data, 0, sizeof(cb_saved));
743756
}
744757

745-
res = BPF_PROG_RUN(prog, skb);
758+
res = bpf_prog_run(prog, skb);
746759

747760
if (unlikely(prog->cb_access))
748761
memcpy(cb_data, cb_saved, sizeof(cb_saved));
@@ -787,7 +800,7 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
787800
* under local_bh_disable(), which provides the needed RCU protection
788801
* for accessing map entries.
789802
*/
790-
u32 act = __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
803+
u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
791804

792805
if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
793806
if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
@@ -1440,7 +1453,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
14401453
};
14411454
u32 act;
14421455

1443-
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
1456+
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
14441457
if (act == SK_PASS) {
14451458
selected_sk = ctx.selected_sk;
14461459
no_reuseport = ctx.no_reuseport;
@@ -1478,7 +1491,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
14781491
};
14791492
u32 act;
14801493

1481-
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
1494+
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
14821495
if (act == SK_PASS) {
14831496
selected_sk = ctx.selected_sk;
14841497
no_reuseport = ctx.no_reuseport;

kernel/bpf/bpf_iter.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -686,7 +686,7 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
686686

687687
rcu_read_lock();
688688
migrate_disable();
689-
ret = BPF_PROG_RUN(prog, ctx);
689+
ret = bpf_prog_run(prog, ctx);
690690
migrate_enable();
691691
rcu_read_unlock();
692692

kernel/bpf/cgroup.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1043,7 +1043,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
10431043
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
10441044
int ret;
10451045

1046-
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
1046+
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, bpf_prog_run);
10471047
return ret == 1 ? 0 : -EPERM;
10481048
}
10491049
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
@@ -1091,7 +1091,7 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
10911091

10921092
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
10931093
ret = BPF_PROG_RUN_ARRAY_FLAGS(cgrp->bpf.effective[type], &ctx,
1094-
BPF_PROG_RUN, flags);
1094+
bpf_prog_run, flags);
10951095

10961096
return ret == 1 ? 0 : -EPERM;
10971097
}
@@ -1121,7 +1121,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
11211121
int ret;
11221122

11231123
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
1124-
BPF_PROG_RUN);
1124+
bpf_prog_run);
11251125
return ret == 1 ? 0 : -EPERM;
11261126
}
11271127
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
@@ -1140,7 +1140,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
11401140
rcu_read_lock();
11411141
cgrp = task_dfl_cgroup(current);
11421142
allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
1143-
BPF_PROG_RUN);
1143+
bpf_prog_run);
11441144
rcu_read_unlock();
11451145

11461146
return !allow;
@@ -1271,7 +1271,7 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
12711271

12721272
rcu_read_lock();
12731273
cgrp = task_dfl_cgroup(current);
1274-
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
1274+
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, bpf_prog_run);
12751275
rcu_read_unlock();
12761276

12771277
kfree(ctx.cur_val);
@@ -1386,7 +1386,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
13861386

13871387
lock_sock(sk);
13881388
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
1389-
&ctx, BPF_PROG_RUN);
1389+
&ctx, bpf_prog_run);
13901390
release_sock(sk);
13911391

13921392
if (!ret) {
@@ -1496,7 +1496,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
14961496

14971497
lock_sock(sk);
14981498
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1499-
&ctx, BPF_PROG_RUN);
1499+
&ctx, bpf_prog_run);
15001500
release_sock(sk);
15011501

15021502
if (!ret) {
@@ -1557,7 +1557,7 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
15571557
*/
15581558

15591559
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1560-
&ctx, BPF_PROG_RUN);
1560+
&ctx, bpf_prog_run);
15611561
if (!ret)
15621562
return -EPERM;
15631563

kernel/bpf/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1879,7 +1879,7 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
18791879
* @err: pointer to error variable
18801880
*
18811881
* Try to JIT eBPF program, if JIT is not available, use interpreter.
1882-
* The BPF program will be executed via BPF_PROG_RUN() macro.
1882+
* The BPF program will be executed via bpf_prog_run() function.
18831883
*
18841884
* Return: the &fp argument along with &err set to 0 for success or
18851885
* a negative errno code on failure

kernel/bpf/trampoline.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -548,7 +548,7 @@ static void notrace inc_misses_counter(struct bpf_prog *prog)
548548
u64_stats_update_end(&stats->syncp);
549549
}
550550

551-
/* The logic is similar to BPF_PROG_RUN, but with an explicit
551+
/* The logic is similar to bpf_prog_run(), but with an explicit
552552
* rcu_read_lock() and migrate_disable() which are required
553553
* for the trampoline. The macro is split into
554554
* call __bpf_prog_enter

0 commit comments

Comments
 (0)