@@ -259,6 +259,7 @@ struct bpf_call_arg_meta {
259
259
u32 ret_btf_id ;
260
260
u32 subprogno ;
261
261
struct bpf_map_value_off_desc * kptr_off_desc ;
262
+ u8 uninit_dynptr_regno ;
262
263
};
263
264
264
265
struct btf * btf_vmlinux ;
@@ -581,6 +582,7 @@ static char slot_type_char[] = {
581
582
[STACK_SPILL ] = 'r' ,
582
583
[STACK_MISC ] = 'm' ,
583
584
[STACK_ZERO ] = '0' ,
585
+ [STACK_DYNPTR ] = 'd' ,
584
586
};
585
587
586
588
static void print_liveness (struct bpf_verifier_env * env ,
@@ -596,6 +598,25 @@ static void print_liveness(struct bpf_verifier_env *env,
596
598
verbose (env , "D" );
597
599
}
598
600
601
+ static int get_spi (s32 off )
602
+ {
603
+ return (- off - 1 ) / BPF_REG_SIZE ;
604
+ }
605
+
606
+ static bool is_spi_bounds_valid (struct bpf_func_state * state , int spi , int nr_slots )
607
+ {
608
+ int allocated_slots = state -> allocated_stack / BPF_REG_SIZE ;
609
+
610
+ /* We need to check that slots between [spi - nr_slots + 1, spi] are
611
+ * within [0, allocated_stack).
612
+ *
613
+ * Please note that the spi grows downwards. For example, a dynptr
614
+ * takes the size of two stack slots; the first slot will be at
615
+ * spi and the second slot will be at spi - 1.
616
+ */
617
+ return spi - nr_slots + 1 >= 0 && spi < allocated_slots ;
618
+ }
619
+
599
620
static struct bpf_func_state * func (struct bpf_verifier_env * env ,
600
621
const struct bpf_reg_state * reg )
601
622
{
@@ -647,6 +668,108 @@ static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
647
668
env -> scratched_stack_slots = ~0ULL ;
648
669
}
649
670
671
+ static enum bpf_dynptr_type arg_to_dynptr_type (enum bpf_arg_type arg_type )
672
+ {
673
+ switch (arg_type & DYNPTR_TYPE_FLAG_MASK ) {
674
+ case DYNPTR_TYPE_LOCAL :
675
+ return BPF_DYNPTR_TYPE_LOCAL ;
676
+ default :
677
+ return BPF_DYNPTR_TYPE_INVALID ;
678
+ }
679
+ }
680
+
681
+ static int mark_stack_slots_dynptr (struct bpf_verifier_env * env , struct bpf_reg_state * reg ,
682
+ enum bpf_arg_type arg_type , int insn_idx )
683
+ {
684
+ struct bpf_func_state * state = func (env , reg );
685
+ enum bpf_dynptr_type type ;
686
+ int spi , i ;
687
+
688
+ spi = get_spi (reg -> off );
689
+
690
+ if (!is_spi_bounds_valid (state , spi , BPF_DYNPTR_NR_SLOTS ))
691
+ return - EINVAL ;
692
+
693
+ for (i = 0 ; i < BPF_REG_SIZE ; i ++ ) {
694
+ state -> stack [spi ].slot_type [i ] = STACK_DYNPTR ;
695
+ state -> stack [spi - 1 ].slot_type [i ] = STACK_DYNPTR ;
696
+ }
697
+
698
+ type = arg_to_dynptr_type (arg_type );
699
+ if (type == BPF_DYNPTR_TYPE_INVALID )
700
+ return - EINVAL ;
701
+
702
+ state -> stack [spi ].spilled_ptr .dynptr .first_slot = true;
703
+ state -> stack [spi ].spilled_ptr .dynptr .type = type ;
704
+ state -> stack [spi - 1 ].spilled_ptr .dynptr .type = type ;
705
+
706
+ return 0 ;
707
+ }
708
+
709
+ static int unmark_stack_slots_dynptr (struct bpf_verifier_env * env , struct bpf_reg_state * reg )
710
+ {
711
+ struct bpf_func_state * state = func (env , reg );
712
+ int spi , i ;
713
+
714
+ spi = get_spi (reg -> off );
715
+
716
+ if (!is_spi_bounds_valid (state , spi , BPF_DYNPTR_NR_SLOTS ))
717
+ return - EINVAL ;
718
+
719
+ for (i = 0 ; i < BPF_REG_SIZE ; i ++ ) {
720
+ state -> stack [spi ].slot_type [i ] = STACK_INVALID ;
721
+ state -> stack [spi - 1 ].slot_type [i ] = STACK_INVALID ;
722
+ }
723
+
724
+ state -> stack [spi ].spilled_ptr .dynptr .first_slot = false;
725
+ state -> stack [spi ].spilled_ptr .dynptr .type = 0 ;
726
+ state -> stack [spi - 1 ].spilled_ptr .dynptr .type = 0 ;
727
+
728
+ return 0 ;
729
+ }
730
+
731
+ static bool is_dynptr_reg_valid_uninit (struct bpf_verifier_env * env , struct bpf_reg_state * reg )
732
+ {
733
+ struct bpf_func_state * state = func (env , reg );
734
+ int spi = get_spi (reg -> off );
735
+ int i ;
736
+
737
+ if (!is_spi_bounds_valid (state , spi , BPF_DYNPTR_NR_SLOTS ))
738
+ return true;
739
+
740
+ for (i = 0 ; i < BPF_REG_SIZE ; i ++ ) {
741
+ if (state -> stack [spi ].slot_type [i ] == STACK_DYNPTR ||
742
+ state -> stack [spi - 1 ].slot_type [i ] == STACK_DYNPTR )
743
+ return false;
744
+ }
745
+
746
+ return true;
747
+ }
748
+
749
+ static bool is_dynptr_reg_valid_init (struct bpf_verifier_env * env , struct bpf_reg_state * reg ,
750
+ enum bpf_arg_type arg_type )
751
+ {
752
+ struct bpf_func_state * state = func (env , reg );
753
+ int spi = get_spi (reg -> off );
754
+ int i ;
755
+
756
+ if (!is_spi_bounds_valid (state , spi , BPF_DYNPTR_NR_SLOTS ) ||
757
+ !state -> stack [spi ].spilled_ptr .dynptr .first_slot )
758
+ return false;
759
+
760
+ for (i = 0 ; i < BPF_REG_SIZE ; i ++ ) {
761
+ if (state -> stack [spi ].slot_type [i ] != STACK_DYNPTR ||
762
+ state -> stack [spi - 1 ].slot_type [i ] != STACK_DYNPTR )
763
+ return false;
764
+ }
765
+
766
+ /* ARG_PTR_TO_DYNPTR takes any type of dynptr */
767
+ if (arg_type == ARG_PTR_TO_DYNPTR )
768
+ return true;
769
+
770
+ return state -> stack [spi ].spilled_ptr .dynptr .type == arg_to_dynptr_type (arg_type );
771
+ }
772
+
650
773
/* The reg state of a pointer or a bounded scalar was saved when
651
774
* it was spilled to the stack.
652
775
*/
@@ -5400,6 +5523,11 @@ static bool arg_type_is_release(enum bpf_arg_type type)
5400
5523
return type & OBJ_RELEASE ;
5401
5524
}
5402
5525
5526
+ static bool arg_type_is_dynptr (enum bpf_arg_type type )
5527
+ {
5528
+ return base_type (type ) == ARG_PTR_TO_DYNPTR ;
5529
+ }
5530
+
5403
5531
static int int_ptr_type_to_size (enum bpf_arg_type type )
5404
5532
{
5405
5533
if (type == ARG_PTR_TO_INT )
@@ -5539,6 +5667,7 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
5539
5667
[ARG_PTR_TO_CONST_STR ] = & const_str_ptr_types ,
5540
5668
[ARG_PTR_TO_TIMER ] = & timer_types ,
5541
5669
[ARG_PTR_TO_KPTR ] = & kptr_types ,
5670
+ [ARG_PTR_TO_DYNPTR ] = & stack_ptr_types ,
5542
5671
};
5543
5672
5544
5673
static int check_reg_type (struct bpf_verifier_env * env , u32 regno ,
@@ -5628,8 +5757,13 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
5628
5757
bool fixed_off_ok = false;
5629
5758
5630
5759
switch ((u32 )type ) {
5631
- case SCALAR_VALUE :
5632
5760
/* Pointer types where reg offset is explicitly allowed: */
5761
+ case PTR_TO_STACK :
5762
+ if (arg_type_is_dynptr (arg_type ) && reg -> off % BPF_REG_SIZE ) {
5763
+ verbose (env , "cannot pass in dynptr at an offset\n" );
5764
+ return - EINVAL ;
5765
+ }
5766
+ fallthrough ;
5633
5767
case PTR_TO_PACKET :
5634
5768
case PTR_TO_PACKET_META :
5635
5769
case PTR_TO_MAP_KEY :
@@ -5639,7 +5773,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
5639
5773
case PTR_TO_MEM | MEM_ALLOC :
5640
5774
case PTR_TO_BUF :
5641
5775
case PTR_TO_BUF | MEM_RDONLY :
5642
- case PTR_TO_STACK :
5776
+ case SCALAR_VALUE :
5643
5777
/* Some of the argument types nevertheless require a
5644
5778
* zero register offset.
5645
5779
*/
@@ -5837,6 +5971,36 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
5837
5971
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO );
5838
5972
5839
5973
err = check_mem_size_reg (env , reg , regno , zero_size_allowed , meta );
5974
+ } else if (arg_type_is_dynptr (arg_type )) {
5975
+ if (arg_type & MEM_UNINIT ) {
5976
+ if (!is_dynptr_reg_valid_uninit (env , reg )) {
5977
+ verbose (env , "Dynptr has to be an uninitialized dynptr\n" );
5978
+ return - EINVAL ;
5979
+ }
5980
+
5981
+ /* We only support one dynptr being uninitialized at the moment,
5982
+ * which is sufficient for the helper functions we have right now.
5983
+ */
5984
+ if (meta -> uninit_dynptr_regno ) {
5985
+ verbose (env , "verifier internal error: multiple uninitialized dynptr args\n" );
5986
+ return - EFAULT ;
5987
+ }
5988
+
5989
+ meta -> uninit_dynptr_regno = regno ;
5990
+ } else if (!is_dynptr_reg_valid_init (env , reg , arg_type )) {
5991
+ const char * err_extra = "" ;
5992
+
5993
+ switch (arg_type & DYNPTR_TYPE_FLAG_MASK ) {
5994
+ case DYNPTR_TYPE_LOCAL :
5995
+ err_extra = "local " ;
5996
+ break ;
5997
+ default :
5998
+ break ;
5999
+ }
6000
+ verbose (env , "Expected an initialized %sdynptr as arg #%d\n" ,
6001
+ err_extra , arg + 1 );
6002
+ return - EINVAL ;
6003
+ }
5840
6004
} else if (arg_type_is_alloc_size (arg_type )) {
5841
6005
if (!tnum_is_const (reg -> var_off )) {
5842
6006
verbose (env , "R%d is not a known constant'\n" ,
@@ -6970,9 +7134,27 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
6970
7134
6971
7135
regs = cur_regs (env );
6972
7136
7137
+ if (meta .uninit_dynptr_regno ) {
7138
+ /* we write BPF_DW bits (8 bytes) at a time */
7139
+ for (i = 0 ; i < BPF_DYNPTR_SIZE ; i += 8 ) {
7140
+ err = check_mem_access (env , insn_idx , meta .uninit_dynptr_regno ,
7141
+ i , BPF_DW , BPF_WRITE , -1 , false);
7142
+ if (err )
7143
+ return err ;
7144
+ }
7145
+
7146
+ err = mark_stack_slots_dynptr (env , & regs [meta .uninit_dynptr_regno ],
7147
+ fn -> arg_type [meta .uninit_dynptr_regno - BPF_REG_1 ],
7148
+ insn_idx );
7149
+ if (err )
7150
+ return err ;
7151
+ }
7152
+
6973
7153
if (meta .release_regno ) {
6974
7154
err = - EINVAL ;
6975
- if (meta .ref_obj_id )
7155
+ if (arg_type_is_dynptr (fn -> arg_type [meta .release_regno - BPF_REG_1 ]))
7156
+ err = unmark_stack_slots_dynptr (env , & regs [meta .release_regno ]);
7157
+ else if (meta .ref_obj_id )
6976
7158
err = release_reference (env , meta .ref_obj_id );
6977
7159
/* meta.ref_obj_id can only be 0 if register that is meant to be
6978
7160
* released is NULL, which must be > R0.
0 commit comments