Skip to content

Commit 56b4a17

Browse files
Benjamin TissoiresAlexei Starovoitov
authored andcommitted
bpf: replace bpf_timer_init with a generic helper
No code change except for the new flags argument being stored in the local data struct. Signed-off-by: Benjamin Tissoires <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent be2749b commit 56b4a17

File tree

1 file changed

+63
-28
lines changed

1 file changed

+63
-28
lines changed

kernel/bpf/helpers.c

Lines changed: 63 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1111,14 +1111,21 @@ struct bpf_hrtimer {
11111111

11121112
/* the actual struct hidden inside uapi struct bpf_timer */
11131113
struct bpf_async_kern {
1114-
struct bpf_hrtimer *timer;
1114+
union {
1115+
struct bpf_async_cb *cb;
1116+
struct bpf_hrtimer *timer;
1117+
};
11151118
/* bpf_spin_lock is used here instead of spinlock_t to make
11161119
* sure that it always fits into space reserved by struct bpf_timer
11171120
* regardless of LOCKDEP and spinlock debug flags.
11181121
*/
11191122
struct bpf_spin_lock lock;
11201123
} __attribute__((aligned(8)));
11211124

1125+
enum bpf_async_type {
1126+
BPF_ASYNC_TYPE_TIMER = 0,
1127+
};
1128+
11221129
static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
11231130

11241131
static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
@@ -1160,46 +1167,55 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
11601167
return HRTIMER_NORESTART;
11611168
}
11621169

1163-
BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1164-
u64, flags)
1170+
static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1171+
enum bpf_async_type type)
11651172
{
1166-
clockid_t clockid = flags & (MAX_CLOCKS - 1);
1173+
struct bpf_async_cb *cb;
11671174
struct bpf_hrtimer *t;
1175+
clockid_t clockid;
1176+
size_t size;
11681177
int ret = 0;
11691178

1170-
BUILD_BUG_ON(MAX_CLOCKS != 16);
1171-
BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1172-
BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1173-
11741179
if (in_nmi())
11751180
return -EOPNOTSUPP;
11761181

1177-
if (flags >= MAX_CLOCKS ||
1178-
/* similar to timerfd except _ALARM variants are not supported */
1179-
(clockid != CLOCK_MONOTONIC &&
1180-
clockid != CLOCK_REALTIME &&
1181-
clockid != CLOCK_BOOTTIME))
1182+
switch (type) {
1183+
case BPF_ASYNC_TYPE_TIMER:
1184+
size = sizeof(struct bpf_hrtimer);
1185+
break;
1186+
default:
11821187
return -EINVAL;
1183-
__bpf_spin_lock_irqsave(&timer->lock);
1184-
t = timer->timer;
1188+
}
1189+
1190+
__bpf_spin_lock_irqsave(&async->lock);
1191+
t = async->timer;
11851192
if (t) {
11861193
ret = -EBUSY;
11871194
goto out;
11881195
}
1196+
11891197
/* allocate hrtimer via map_kmalloc to use memcg accounting */
1190-
t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1191-
if (!t) {
1198+
cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
1199+
if (!cb) {
11921200
ret = -ENOMEM;
11931201
goto out;
11941202
}
1195-
t->cb.value = (void *)timer - map->record->timer_off;
1196-
t->cb.map = map;
1197-
t->cb.prog = NULL;
1198-
rcu_assign_pointer(t->cb.callback_fn, NULL);
1199-
hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1200-
t->timer.function = bpf_timer_cb;
1201-
WRITE_ONCE(timer->timer, t);
1202-
/* Guarantee the order between timer->timer and map->usercnt. So
1203+
1204+
if (type == BPF_ASYNC_TYPE_TIMER) {
1205+
clockid = flags & (MAX_CLOCKS - 1);
1206+
t = (struct bpf_hrtimer *)cb;
1207+
1208+
hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1209+
t->timer.function = bpf_timer_cb;
1210+
cb->value = (void *)async - map->record->timer_off;
1211+
}
1212+
cb->map = map;
1213+
cb->prog = NULL;
1214+
cb->flags = flags;
1215+
rcu_assign_pointer(cb->callback_fn, NULL);
1216+
1217+
WRITE_ONCE(async->cb, cb);
1218+
/* Guarantee the order between async->cb and map->usercnt. So
12031219
* when there are concurrent uref release and bpf timer init, either
12041220
* bpf_timer_cancel_and_free() called by uref release reads a no-NULL
12051221
* timer or atomic64_read() below returns a zero usercnt.
@@ -1209,15 +1225,34 @@ BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map
12091225
/* maps with timers must be either held by user space
12101226
* or pinned in bpffs.
12111227
*/
1212-
WRITE_ONCE(timer->timer, NULL);
1213-
kfree(t);
1228+
WRITE_ONCE(async->cb, NULL);
1229+
kfree(cb);
12141230
ret = -EPERM;
12151231
}
12161232
out:
1217-
__bpf_spin_unlock_irqrestore(&timer->lock);
1233+
__bpf_spin_unlock_irqrestore(&async->lock);
12181234
return ret;
12191235
}
12201236

1237+
BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1238+
u64, flags)
1239+
{
1240+
clock_t clockid = flags & (MAX_CLOCKS - 1);
1241+
1242+
BUILD_BUG_ON(MAX_CLOCKS != 16);
1243+
BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1244+
BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1245+
1246+
if (flags >= MAX_CLOCKS ||
1247+
/* similar to timerfd except _ALARM variants are not supported */
1248+
(clockid != CLOCK_MONOTONIC &&
1249+
clockid != CLOCK_REALTIME &&
1250+
clockid != CLOCK_BOOTTIME))
1251+
return -EINVAL;
1252+
1253+
return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1254+
}
1255+
12211256
static const struct bpf_func_proto bpf_timer_init_proto = {
12221257
.func = bpf_timer_init,
12231258
.gpl_only = true,

0 commit comments

Comments
 (0)