@@ -1146,67 +1146,116 @@ struct bpf_run_ctx {};
1146
1146
1147
1147
struct bpf_cg_run_ctx {
1148
1148
struct bpf_run_ctx run_ctx ;
1149
- struct bpf_prog_array_item * prog_item ;
1149
+ const struct bpf_prog_array_item * prog_item ;
1150
1150
};
1151
1151
1152
+ static inline struct bpf_run_ctx * bpf_set_run_ctx (struct bpf_run_ctx * new_ctx )
1153
+ {
1154
+ struct bpf_run_ctx * old_ctx = NULL ;
1155
+
1156
+ #ifdef CONFIG_BPF_SYSCALL
1157
+ old_ctx = current -> bpf_ctx ;
1158
+ current -> bpf_ctx = new_ctx ;
1159
+ #endif
1160
+ return old_ctx ;
1161
+ }
1162
+
1163
+ static inline void bpf_reset_run_ctx (struct bpf_run_ctx * old_ctx )
1164
+ {
1165
+ #ifdef CONFIG_BPF_SYSCALL
1166
+ current -> bpf_ctx = old_ctx ;
1167
+ #endif
1168
+ }
1169
+
1152
1170
/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1153
1171
#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
1154
1172
/* BPF program asks to set CN on the packet. */
1155
1173
#define BPF_RET_SET_CN (1 << 0)
1156
1174
1157
- #define BPF_PROG_RUN_ARRAY_FLAGS (array , ctx , func , ret_flags ) \
1158
- ({ \
1159
- struct bpf_prog_array_item *_item; \
1160
- struct bpf_prog *_prog; \
1161
- struct bpf_prog_array *_array; \
1162
- struct bpf_run_ctx *old_run_ctx; \
1163
- struct bpf_cg_run_ctx run_ctx; \
1164
- u32 _ret = 1; \
1165
- u32 func_ret; \
1166
- migrate_disable(); \
1167
- rcu_read_lock(); \
1168
- _array = rcu_dereference(array); \
1169
- _item = &_array->items[0]; \
1170
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); \
1171
- while ((_prog = READ_ONCE(_item->prog))) { \
1172
- run_ctx.prog_item = _item; \
1173
- func_ret = func(_prog, ctx); \
1174
- _ret &= (func_ret & 1); \
1175
- *(ret_flags) |= (func_ret >> 1); \
1176
- _item++; \
1177
- } \
1178
- bpf_reset_run_ctx(old_run_ctx); \
1179
- rcu_read_unlock(); \
1180
- migrate_enable(); \
1181
- _ret; \
1182
- })
1183
-
1184
- #define __BPF_PROG_RUN_ARRAY (array , ctx , func , check_non_null , set_cg_storage ) \
1185
- ({ \
1186
- struct bpf_prog_array_item *_item; \
1187
- struct bpf_prog *_prog; \
1188
- struct bpf_prog_array *_array; \
1189
- struct bpf_run_ctx *old_run_ctx; \
1190
- struct bpf_cg_run_ctx run_ctx; \
1191
- u32 _ret = 1; \
1192
- migrate_disable(); \
1193
- rcu_read_lock(); \
1194
- _array = rcu_dereference(array); \
1195
- if (unlikely(check_non_null && !_array))\
1196
- goto _out; \
1197
- _item = &_array->items[0]; \
1198
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);\
1199
- while ((_prog = READ_ONCE(_item->prog))) { \
1200
- run_ctx.prog_item = _item; \
1201
- _ret &= func(_prog, ctx); \
1202
- _item++; \
1203
- } \
1204
- bpf_reset_run_ctx(old_run_ctx); \
1205
- _out: \
1206
- rcu_read_unlock(); \
1207
- migrate_enable(); \
1208
- _ret; \
1209
- })
1175
+ typedef u32 (* bpf_prog_run_fn )(const struct bpf_prog * prog , const void * ctx );
1176
+
1177
+ static __always_inline u32
1178
+ BPF_PROG_RUN_ARRAY_CG_FLAGS (const struct bpf_prog_array __rcu * array_rcu ,
1179
+ const void * ctx , bpf_prog_run_fn run_prog ,
1180
+ u32 * ret_flags )
1181
+ {
1182
+ const struct bpf_prog_array_item * item ;
1183
+ const struct bpf_prog * prog ;
1184
+ const struct bpf_prog_array * array ;
1185
+ struct bpf_run_ctx * old_run_ctx ;
1186
+ struct bpf_cg_run_ctx run_ctx ;
1187
+ u32 ret = 1 ;
1188
+ u32 func_ret ;
1189
+
1190
+ migrate_disable ();
1191
+ rcu_read_lock ();
1192
+ array = rcu_dereference (array_rcu );
1193
+ item = & array -> items [0 ];
1194
+ old_run_ctx = bpf_set_run_ctx (& run_ctx .run_ctx );
1195
+ while ((prog = READ_ONCE (item -> prog ))) {
1196
+ run_ctx .prog_item = item ;
1197
+ func_ret = run_prog (prog , ctx );
1198
+ ret &= (func_ret & 1 );
1199
+ * (ret_flags ) |= (func_ret >> 1 );
1200
+ item ++ ;
1201
+ }
1202
+ bpf_reset_run_ctx (old_run_ctx );
1203
+ rcu_read_unlock ();
1204
+ migrate_enable ();
1205
+ return ret ;
1206
+ }
1207
+
1208
+ static __always_inline u32
1209
+ BPF_PROG_RUN_ARRAY_CG (const struct bpf_prog_array __rcu * array_rcu ,
1210
+ const void * ctx , bpf_prog_run_fn run_prog )
1211
+ {
1212
+ const struct bpf_prog_array_item * item ;
1213
+ const struct bpf_prog * prog ;
1214
+ const struct bpf_prog_array * array ;
1215
+ struct bpf_run_ctx * old_run_ctx ;
1216
+ struct bpf_cg_run_ctx run_ctx ;
1217
+ u32 ret = 1 ;
1218
+
1219
+ migrate_disable ();
1220
+ rcu_read_lock ();
1221
+ array = rcu_dereference (array_rcu );
1222
+ item = & array -> items [0 ];
1223
+ old_run_ctx = bpf_set_run_ctx (& run_ctx .run_ctx );
1224
+ while ((prog = READ_ONCE (item -> prog ))) {
1225
+ run_ctx .prog_item = item ;
1226
+ ret &= run_prog (prog , ctx );
1227
+ item ++ ;
1228
+ }
1229
+ bpf_reset_run_ctx (old_run_ctx );
1230
+ rcu_read_unlock ();
1231
+ migrate_enable ();
1232
+ return ret ;
1233
+ }
1234
+
1235
+ static __always_inline u32
1236
+ BPF_PROG_RUN_ARRAY (const struct bpf_prog_array __rcu * array_rcu ,
1237
+ const void * ctx , bpf_prog_run_fn run_prog )
1238
+ {
1239
+ const struct bpf_prog_array_item * item ;
1240
+ const struct bpf_prog * prog ;
1241
+ const struct bpf_prog_array * array ;
1242
+ u32 ret = 1 ;
1243
+
1244
+ migrate_disable ();
1245
+ rcu_read_lock ();
1246
+ array = rcu_dereference (array_rcu );
1247
+ if (unlikely (!array ))
1248
+ goto out ;
1249
+ item = & array -> items [0 ];
1250
+ while ((prog = READ_ONCE (item -> prog ))) {
1251
+ ret &= run_prog (prog , ctx );
1252
+ item ++ ;
1253
+ }
1254
+ out :
1255
+ rcu_read_unlock ();
1256
+ migrate_enable ();
1257
+ return ret ;
1258
+ }
1210
1259
1211
1260
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
1212
1261
* so BPF programs can request cwr for TCP packets.
@@ -1235,7 +1284,7 @@ _out: \
1235
1284
u32 _flags = 0; \
1236
1285
bool _cn; \
1237
1286
u32 _ret; \
1238
- _ret = BPF_PROG_RUN_ARRAY_FLAGS (array, ctx, func, &_flags); \
1287
+ _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS (array, ctx, func, &_flags); \
1239
1288
_cn = _flags & BPF_RET_SET_CN; \
1240
1289
if (_ret) \
1241
1290
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
@@ -1244,12 +1293,6 @@ _out: \
1244
1293
_ret; \
1245
1294
})
1246
1295
1247
- #define BPF_PROG_RUN_ARRAY (array , ctx , func ) \
1248
- __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
1249
-
1250
- #define BPF_PROG_RUN_ARRAY_CHECK (array , ctx , func ) \
1251
- __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
1252
-
1253
1296
#ifdef CONFIG_BPF_SYSCALL
1254
1297
DECLARE_PER_CPU (int , bpf_prog_active );
1255
1298
extern struct mutex bpf_stats_enabled_mutex ;
@@ -1284,20 +1327,6 @@ static inline void bpf_enable_instrumentation(void)
1284
1327
migrate_enable ();
1285
1328
}
1286
1329
1287
- static inline struct bpf_run_ctx * bpf_set_run_ctx (struct bpf_run_ctx * new_ctx )
1288
- {
1289
- struct bpf_run_ctx * old_ctx ;
1290
-
1291
- old_ctx = current -> bpf_ctx ;
1292
- current -> bpf_ctx = new_ctx ;
1293
- return old_ctx ;
1294
- }
1295
-
1296
- static inline void bpf_reset_run_ctx (struct bpf_run_ctx * old_ctx )
1297
- {
1298
- current -> bpf_ctx = old_ctx ;
1299
- }
1300
-
1301
1330
extern const struct file_operations bpf_map_fops ;
1302
1331
extern const struct file_operations bpf_prog_fops ;
1303
1332
extern const struct file_operations bpf_iter_fops ;
0 commit comments