@@ -70,10 +70,8 @@ int array_map_alloc_check(union bpf_attr *attr)
7070 attr -> map_flags & BPF_F_PRESERVE_ELEMS )
7171 return - EINVAL ;
7272
73- if (attr -> value_size > KMALLOC_MAX_SIZE )
74- /* if value_size is bigger, the user space won't be able to
75- * access the elements.
76- */
73+ /* avoid overflow on round_up(map->value_size) */
74+ if (attr -> value_size > INT_MAX )
7775 return - E2BIG ;
7876
7977 return 0 ;
@@ -156,6 +154,11 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
156154 return & array -> map ;
157155}
158156
157+ static void * array_map_elem_ptr (struct bpf_array * array , u32 index )
158+ {
159+ return array -> value + (u64 )array -> elem_size * index ;
160+ }
161+
159162/* Called from syscall or from eBPF program */
160163static void * array_map_lookup_elem (struct bpf_map * map , void * key )
161164{
@@ -165,7 +168,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
165168 if (unlikely (index >= array -> map .max_entries ))
166169 return NULL ;
167170
168- return array -> value + array -> elem_size * (index & array -> index_mask );
171+ return array -> value + ( u64 ) array -> elem_size * (index & array -> index_mask );
169172}
170173
171174static int array_map_direct_value_addr (const struct bpf_map * map , u64 * imm ,
@@ -203,7 +206,7 @@ static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
203206{
204207 struct bpf_array * array = container_of (map , struct bpf_array , map );
205208 struct bpf_insn * insn = insn_buf ;
206- u32 elem_size = round_up ( map -> value_size , 8 ) ;
209+ u32 elem_size = array -> elem_size ;
207210 const int ret = BPF_REG_0 ;
208211 const int map_ptr = BPF_REG_1 ;
209212 const int index = BPF_REG_2 ;
@@ -272,7 +275,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
272275 * access 'value_size' of them, so copying rounded areas
273276 * will not leak any kernel data
274277 */
275- size = round_up ( map -> value_size , 8 ) ;
278+ size = array -> elem_size ;
276279 rcu_read_lock ();
277280 pptr = array -> pptrs [index & array -> index_mask ];
278281 for_each_possible_cpu (cpu ) {
@@ -339,7 +342,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
339342 value , map -> value_size );
340343 } else {
341344 val = array -> value +
342- array -> elem_size * (index & array -> index_mask );
345+ ( u64 ) array -> elem_size * (index & array -> index_mask );
343346 if (map_flags & BPF_F_LOCK )
344347 copy_map_value_locked (map , val , value , false);
345348 else
@@ -376,7 +379,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
376379 * returned or zeros which were zero-filled by percpu_alloc,
377380 * so no kernel data leaks possible
378381 */
379- size = round_up ( map -> value_size , 8 ) ;
382+ size = array -> elem_size ;
380383 rcu_read_lock ();
381384 pptr = array -> pptrs [index & array -> index_mask ];
382385 for_each_possible_cpu (cpu ) {
@@ -408,8 +411,7 @@ static void array_map_free_timers(struct bpf_map *map)
408411 return ;
409412
410413 for (i = 0 ; i < array -> map .max_entries ; i ++ )
411- bpf_timer_cancel_and_free (array -> value + array -> elem_size * i +
412- map -> timer_off );
414+ bpf_timer_cancel_and_free (array_map_elem_ptr (array , i ) + map -> timer_off );
413415}
414416
415417/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
@@ -420,7 +422,7 @@ static void array_map_free(struct bpf_map *map)
420422
421423 if (map_value_has_kptrs (map )) {
422424 for (i = 0 ; i < array -> map .max_entries ; i ++ )
423- bpf_map_free_kptrs (map , array -> value + array -> elem_size * i );
425+ bpf_map_free_kptrs (map , array_map_elem_ptr ( array , i ) );
424426 bpf_map_free_kptr_off_tab (map );
425427 }
426428
@@ -556,7 +558,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
556558 index = info -> index & array -> index_mask ;
557559 if (info -> percpu_value_buf )
558560 return array -> pptrs [index ];
559- return array -> value + array -> elem_size * index ;
561+ return array_map_elem_ptr ( array , index ) ;
560562}
561563
562564static void * bpf_array_map_seq_next (struct seq_file * seq , void * v , loff_t * pos )
@@ -575,14 +577,15 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
575577 index = info -> index & array -> index_mask ;
576578 if (info -> percpu_value_buf )
577579 return array -> pptrs [index ];
578- return array -> value + array -> elem_size * index ;
580+ return array_map_elem_ptr ( array , index ) ;
579581}
580582
581583static int __bpf_array_map_seq_show (struct seq_file * seq , void * v )
582584{
583585 struct bpf_iter_seq_array_map_info * info = seq -> private ;
584586 struct bpf_iter__bpf_map_elem ctx = {};
585587 struct bpf_map * map = info -> map ;
588+ struct bpf_array * array = container_of (map , struct bpf_array , map );
586589 struct bpf_iter_meta meta ;
587590 struct bpf_prog * prog ;
588591 int off = 0 , cpu = 0 ;
@@ -603,7 +606,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
603606 ctx .value = v ;
604607 } else {
605608 pptr = v ;
606- size = round_up ( map -> value_size , 8 ) ;
609+ size = array -> elem_size ;
607610 for_each_possible_cpu (cpu ) {
608611 bpf_long_memcpy (info -> percpu_value_buf + off ,
609612 per_cpu_ptr (pptr , cpu ),
@@ -633,11 +636,12 @@ static int bpf_iter_init_array_map(void *priv_data,
633636{
634637 struct bpf_iter_seq_array_map_info * seq_info = priv_data ;
635638 struct bpf_map * map = aux -> map ;
639+ struct bpf_array * array = container_of (map , struct bpf_array , map );
636640 void * value_buf ;
637641 u32 buf_size ;
638642
639643 if (map -> map_type == BPF_MAP_TYPE_PERCPU_ARRAY ) {
640- buf_size = round_up ( map -> value_size , 8 ) * num_possible_cpus ();
644+ buf_size = array -> elem_size * num_possible_cpus ();
641645 value_buf = kmalloc (buf_size , GFP_USER | __GFP_NOWARN );
642646 if (!value_buf )
643647 return - ENOMEM ;
@@ -690,7 +694,7 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_
690694 if (is_percpu )
691695 val = this_cpu_ptr (array -> pptrs [i ]);
692696 else
693- val = array -> value + array -> elem_size * i ;
697+ val = array_map_elem_ptr ( array , i ) ;
694698 num_elems ++ ;
695699 key = i ;
696700 ret = callback_fn ((u64 )(long )map , (u64 )(long )& key ,
@@ -1322,7 +1326,7 @@ static int array_of_map_gen_lookup(struct bpf_map *map,
13221326 struct bpf_insn * insn_buf )
13231327{
13241328 struct bpf_array * array = container_of (map , struct bpf_array , map );
1325- u32 elem_size = round_up ( map -> value_size , 8 ) ;
1329+ u32 elem_size = array -> elem_size ;
13261330 struct bpf_insn * insn = insn_buf ;
13271331 const int ret = BPF_REG_0 ;
13281332 const int map_ptr = BPF_REG_1 ;
0 commit comments