125125 * are set to NOT_INIT to indicate that they are no longer readable.
126126 */
127127
128+ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
129+
128130/* single container for all structs
129131 * one verifier_env per bpf_check() call
130132 */
131133struct verifier_env {
134+ struct bpf_prog * prog ; /* eBPF program being verified */
135+ struct bpf_map * used_maps [MAX_USED_MAPS ]; /* array of map's used by eBPF program */
136+ u32 used_map_cnt ; /* number of used maps */
132137};
133138
134139/* verbose verifier prints what it's seeing
@@ -300,6 +305,115 @@ static void print_bpf_insn(struct bpf_insn *insn)
300305 }
301306}
302307
308+ /* return the map pointer stored inside BPF_LD_IMM64 instruction */
309+ static struct bpf_map * ld_imm64_to_map_ptr (struct bpf_insn * insn )
310+ {
311+ u64 imm64 = ((u64 ) (u32 ) insn [0 ].imm ) | ((u64 ) (u32 ) insn [1 ].imm ) << 32 ;
312+
313+ return (struct bpf_map * ) (unsigned long ) imm64 ;
314+ }
315+
316+ /* look for pseudo eBPF instructions that access map FDs and
317+ * replace them with actual map pointers
318+ */
319+ static int replace_map_fd_with_map_ptr (struct verifier_env * env )
320+ {
321+ struct bpf_insn * insn = env -> prog -> insnsi ;
322+ int insn_cnt = env -> prog -> len ;
323+ int i , j ;
324+
325+ for (i = 0 ; i < insn_cnt ; i ++ , insn ++ ) {
326+ if (insn [0 ].code == (BPF_LD | BPF_IMM | BPF_DW )) {
327+ struct bpf_map * map ;
328+ struct fd f ;
329+
330+ if (i == insn_cnt - 1 || insn [1 ].code != 0 ||
331+ insn [1 ].dst_reg != 0 || insn [1 ].src_reg != 0 ||
332+ insn [1 ].off != 0 ) {
333+ verbose ("invalid bpf_ld_imm64 insn\n" );
334+ return - EINVAL ;
335+ }
336+
337+ if (insn -> src_reg == 0 )
338+ /* valid generic load 64-bit imm */
339+ goto next_insn ;
340+
341+ if (insn -> src_reg != BPF_PSEUDO_MAP_FD ) {
342+ verbose ("unrecognized bpf_ld_imm64 insn\n" );
343+ return - EINVAL ;
344+ }
345+
346+ f = fdget (insn -> imm );
347+
348+ map = bpf_map_get (f );
349+ if (IS_ERR (map )) {
350+ verbose ("fd %d is not pointing to valid bpf_map\n" ,
351+ insn -> imm );
352+ fdput (f );
353+ return PTR_ERR (map );
354+ }
355+
356+ /* store map pointer inside BPF_LD_IMM64 instruction */
357+ insn [0 ].imm = (u32 ) (unsigned long ) map ;
358+ insn [1 ].imm = ((u64 ) (unsigned long ) map ) >> 32 ;
359+
360+ /* check whether we recorded this map already */
361+ for (j = 0 ; j < env -> used_map_cnt ; j ++ )
362+ if (env -> used_maps [j ] == map ) {
363+ fdput (f );
364+ goto next_insn ;
365+ }
366+
367+ if (env -> used_map_cnt >= MAX_USED_MAPS ) {
368+ fdput (f );
369+ return - E2BIG ;
370+ }
371+
372+ /* remember this map */
373+ env -> used_maps [env -> used_map_cnt ++ ] = map ;
374+
375+ /* hold the map. If the program is rejected by verifier,
376+ * the map will be released by release_maps() or it
377+ * will be used by the valid program until it's unloaded
378+ * and all maps are released in free_bpf_prog_info()
379+ */
380+ atomic_inc (& map -> refcnt );
381+
382+ fdput (f );
383+ next_insn :
384+ insn ++ ;
385+ i ++ ;
386+ }
387+ }
388+
389+ /* now all pseudo BPF_LD_IMM64 instructions load valid
390+ * 'struct bpf_map *' into a register instead of user map_fd.
391+ * These pointers will be used later by verifier to validate map access.
392+ */
393+ return 0 ;
394+ }
395+
396+ /* drop refcnt of maps used by the rejected program */
397+ static void release_maps (struct verifier_env * env )
398+ {
399+ int i ;
400+
401+ for (i = 0 ; i < env -> used_map_cnt ; i ++ )
402+ bpf_map_put (env -> used_maps [i ]);
403+ }
404+
405+ /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
406+ static void convert_pseudo_ld_imm64 (struct verifier_env * env )
407+ {
408+ struct bpf_insn * insn = env -> prog -> insnsi ;
409+ int insn_cnt = env -> prog -> len ;
410+ int i ;
411+
412+ for (i = 0 ; i < insn_cnt ; i ++ , insn ++ )
413+ if (insn -> code == (BPF_LD | BPF_IMM | BPF_DW ))
414+ insn -> src_reg = 0 ;
415+ }
416+
303417int bpf_check (struct bpf_prog * prog , union bpf_attr * attr )
304418{
305419 char __user * log_ubuf = NULL ;
@@ -316,6 +430,8 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
316430 if (!env )
317431 return - ENOMEM ;
318432
433+ env -> prog = prog ;
434+
319435 /* grab the mutex to protect few globals used by verifier */
320436 mutex_lock (& bpf_verifier_lock );
321437
@@ -342,8 +458,14 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
342458 log_level = 0 ;
343459 }
344460
461+ ret = replace_map_fd_with_map_ptr (env );
462+ if (ret < 0 )
463+ goto skip_full_check ;
464+
345465 /* ret = do_check(env); */
346466
467+ skip_full_check :
468+
347469 if (log_level && log_len >= log_size - 1 ) {
348470 BUG_ON (log_len >= log_size );
349471 /* verifier log exceeded user supplied buffer */
@@ -357,11 +479,36 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
357479 goto free_log_buf ;
358480 }
359481
482+ if (ret == 0 && env -> used_map_cnt ) {
483+ /* if program passed verifier, update used_maps in bpf_prog_info */
484+ prog -> aux -> used_maps = kmalloc_array (env -> used_map_cnt ,
485+ sizeof (env -> used_maps [0 ]),
486+ GFP_KERNEL );
487+
488+ if (!prog -> aux -> used_maps ) {
489+ ret = - ENOMEM ;
490+ goto free_log_buf ;
491+ }
492+
493+ memcpy (prog -> aux -> used_maps , env -> used_maps ,
494+ sizeof (env -> used_maps [0 ]) * env -> used_map_cnt );
495+ prog -> aux -> used_map_cnt = env -> used_map_cnt ;
496+
497+ /* program is valid. Convert pseudo bpf_ld_imm64 into generic
498+ * bpf_ld_imm64 instructions
499+ */
500+ convert_pseudo_ld_imm64 (env );
501+ }
360502
361503free_log_buf :
362504 if (log_level )
363505 vfree (log_buf );
364506free_env :
507+ if (!prog -> aux -> used_maps )
508+ /* if we didn't copy map pointers into bpf_prog_info, release
509+ * them now. Otherwise free_bpf_prog_info() will release them.
510+ */
511+ release_maps (env );
365512 kfree (env );
366513 mutex_unlock (& bpf_verifier_lock );
367514 return ret ;
0 commit comments