@@ -175,20 +175,25 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
175175 htab -> map .map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ;
176176}
177177
178+ static inline void * htab_elem_value (struct htab_elem * l , u32 key_size )
179+ {
180+ return l -> key + round_up (key_size , 8 );
181+ }
182+
178183static inline void htab_elem_set_ptr (struct htab_elem * l , u32 key_size ,
179184 void __percpu * pptr )
180185{
181- * (void __percpu * * )( l -> key + roundup ( key_size , 8 ) ) = pptr ;
186+ * (void __percpu * * )htab_elem_value ( l , key_size ) = pptr ;
182187}
183188
184189static inline void __percpu * htab_elem_get_ptr (struct htab_elem * l , u32 key_size )
185190{
186- return * (void __percpu * * )( l -> key + roundup ( key_size , 8 ) );
191+ return * (void __percpu * * )htab_elem_value ( l , key_size );
187192}
188193
189194static void * fd_htab_map_get_ptr (const struct bpf_map * map , struct htab_elem * l )
190195{
191- return * (void * * )( l -> key + roundup ( map -> key_size , 8 ) );
196+ return * (void * * )htab_elem_value ( l , map -> key_size );
192197}
193198
194199static struct htab_elem * get_htab_elem (struct bpf_htab * htab , int i )
@@ -215,10 +220,10 @@ static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
215220 elem = get_htab_elem (htab , i );
216221 if (btf_record_has_field (htab -> map .record , BPF_TIMER ))
217222 bpf_obj_free_timer (htab -> map .record ,
218- elem -> key + round_up ( htab -> map .key_size , 8 ));
223+ htab_elem_value ( elem , htab -> map .key_size ));
219224 if (btf_record_has_field (htab -> map .record , BPF_WORKQUEUE ))
220225 bpf_obj_free_workqueue (htab -> map .record ,
221- elem -> key + round_up ( htab -> map .key_size , 8 ));
226+ htab_elem_value ( elem , htab -> map .key_size ));
222227 cond_resched ();
223228 }
224229}
@@ -245,7 +250,8 @@ static void htab_free_prealloced_fields(struct bpf_htab *htab)
245250 cond_resched ();
246251 }
247252 } else {
248- bpf_obj_free_fields (htab -> map .record , elem -> key + round_up (htab -> map .key_size , 8 ));
253+ bpf_obj_free_fields (htab -> map .record ,
254+ htab_elem_value (elem , htab -> map .key_size ));
249255 cond_resched ();
250256 }
251257 cond_resched ();
@@ -670,7 +676,7 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
670676 struct htab_elem * l = __htab_map_lookup_elem (map , key );
671677
672678 if (l )
673- return l -> key + round_up ( map -> key_size , 8 );
679+ return htab_elem_value ( l , map -> key_size );
674680
675681 return NULL ;
676682}
@@ -709,7 +715,7 @@ static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
709715 if (l ) {
710716 if (mark )
711717 bpf_lru_node_set_ref (& l -> lru_node );
712- return l -> key + round_up ( map -> key_size , 8 );
718+ return htab_elem_value ( l , map -> key_size );
713719 }
714720
715721 return NULL ;
@@ -763,7 +769,7 @@ static void check_and_free_fields(struct bpf_htab *htab,
763769 for_each_possible_cpu (cpu )
764770 bpf_obj_free_fields (htab -> map .record , per_cpu_ptr (pptr , cpu ));
765771 } else {
766- void * map_value = elem -> key + round_up ( htab -> map .key_size , 8 );
772+ void * map_value = htab_elem_value ( elem , htab -> map .key_size );
767773
768774 bpf_obj_free_fields (htab -> map .record , map_value );
769775 }
@@ -1039,11 +1045,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
10391045 htab_elem_set_ptr (l_new , key_size , pptr );
10401046 } else if (fd_htab_map_needs_adjust (htab )) {
10411047 size = round_up (size , 8 );
1042- memcpy (l_new -> key + round_up ( key_size , 8 ), value , size );
1048+ memcpy (htab_elem_value ( l_new , key_size ), value , size );
10431049 } else {
1044- copy_map_value (& htab -> map ,
1045- l_new -> key + round_up (key_size , 8 ),
1046- value );
1050+ copy_map_value (& htab -> map , htab_elem_value (l_new , key_size ), value );
10471051 }
10481052
10491053 l_new -> hash = hash ;
@@ -1106,7 +1110,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11061110 if (l_old ) {
11071111 /* grab the element lock and update value in place */
11081112 copy_map_value_locked (map ,
1109- l_old -> key + round_up ( key_size , 8 ),
1113+ htab_elem_value ( l_old , key_size ),
11101114 value , false);
11111115 return 0 ;
11121116 }
@@ -1134,7 +1138,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11341138 * and update element in place
11351139 */
11361140 copy_map_value_locked (map ,
1137- l_old -> key + round_up ( key_size , 8 ),
1141+ htab_elem_value ( l_old , key_size ),
11381142 value , false);
11391143 ret = 0 ;
11401144 goto err ;
@@ -1220,8 +1224,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
12201224 l_new = prealloc_lru_pop (htab , key , hash );
12211225 if (!l_new )
12221226 return - ENOMEM ;
1223- copy_map_value (& htab -> map ,
1224- l_new -> key + round_up (map -> key_size , 8 ), value );
1227+ copy_map_value (& htab -> map , htab_elem_value (l_new , map -> key_size ), value );
12251228
12261229 ret = htab_lock_bucket (b , & flags );
12271230 if (ret )
@@ -1500,10 +1503,10 @@ static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
15001503 /* We only free timer on uref dropping to zero */
15011504 if (btf_record_has_field (htab -> map .record , BPF_TIMER ))
15021505 bpf_obj_free_timer (htab -> map .record ,
1503- l -> key + round_up ( htab -> map .key_size , 8 ));
1506+ htab_elem_value ( l , htab -> map .key_size ));
15041507 if (btf_record_has_field (htab -> map .record , BPF_WORKQUEUE ))
15051508 bpf_obj_free_workqueue (htab -> map .record ,
1506- l -> key + round_up ( htab -> map .key_size , 8 ));
1509+ htab_elem_value ( l , htab -> map .key_size ));
15071510 }
15081511 cond_resched_rcu ();
15091512 }
@@ -1615,15 +1618,12 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
16151618 off += roundup_value_size ;
16161619 }
16171620 } else {
1618- u32 roundup_key_size = round_up ( map -> key_size , 8 );
1621+ void * src = htab_elem_value ( l , map -> key_size );
16191622
16201623 if (flags & BPF_F_LOCK )
1621- copy_map_value_locked (map , value , l -> key +
1622- roundup_key_size ,
1623- true);
1624+ copy_map_value_locked (map , value , src , true);
16241625 else
1625- copy_map_value (map , value , l -> key +
1626- roundup_key_size );
1626+ copy_map_value (map , value , src );
16271627 /* Zeroing special fields in the temp buffer */
16281628 check_and_init_map_value (map , value );
16291629 }
@@ -1680,12 +1680,12 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
16801680 bool is_percpu )
16811681{
16821682 struct bpf_htab * htab = container_of (map , struct bpf_htab , map );
1683- u32 bucket_cnt , total , key_size , value_size , roundup_key_size ;
16841683 void * keys = NULL , * values = NULL , * value , * dst_key , * dst_val ;
16851684 void __user * uvalues = u64_to_user_ptr (attr -> batch .values );
16861685 void __user * ukeys = u64_to_user_ptr (attr -> batch .keys );
16871686 void __user * ubatch = u64_to_user_ptr (attr -> batch .in_batch );
16881687 u32 batch , max_count , size , bucket_size , map_id ;
1688+ u32 bucket_cnt , total , key_size , value_size ;
16891689 struct htab_elem * node_to_free = NULL ;
16901690 u64 elem_map_flags , map_flags ;
16911691 struct hlist_nulls_head * head ;
@@ -1720,7 +1720,6 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17201720 return - ENOENT ;
17211721
17221722 key_size = htab -> map .key_size ;
1723- roundup_key_size = round_up (htab -> map .key_size , 8 );
17241723 value_size = htab -> map .value_size ;
17251724 size = round_up (value_size , 8 );
17261725 if (is_percpu )
@@ -1812,7 +1811,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
18121811 off += size ;
18131812 }
18141813 } else {
1815- value = l -> key + roundup_key_size ;
1814+ value = htab_elem_value ( l , key_size ) ;
18161815 if (map -> map_type == BPF_MAP_TYPE_HASH_OF_MAPS ) {
18171816 struct bpf_map * * inner_map = value ;
18181817
@@ -2063,11 +2062,11 @@ static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
20632062static int __bpf_hash_map_seq_show (struct seq_file * seq , struct htab_elem * elem )
20642063{
20652064 struct bpf_iter_seq_hash_map_info * info = seq -> private ;
2066- u32 roundup_key_size , roundup_value_size ;
20672065 struct bpf_iter__bpf_map_elem ctx = {};
20682066 struct bpf_map * map = info -> map ;
20692067 struct bpf_iter_meta meta ;
20702068 int ret = 0 , off = 0 , cpu ;
2069+ u32 roundup_value_size ;
20712070 struct bpf_prog * prog ;
20722071 void __percpu * pptr ;
20732072
@@ -2077,10 +2076,9 @@ static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
20772076 ctx .meta = & meta ;
20782077 ctx .map = info -> map ;
20792078 if (elem ) {
2080- roundup_key_size = round_up (map -> key_size , 8 );
20812079 ctx .key = elem -> key ;
20822080 if (!info -> percpu_value_buf ) {
2083- ctx .value = elem -> key + roundup_key_size ;
2081+ ctx .value = htab_elem_value ( elem , map -> key_size ) ;
20842082 } else {
20852083 roundup_value_size = round_up (map -> value_size , 8 );
20862084 pptr = htab_elem_get_ptr (elem , map -> key_size );
@@ -2165,7 +2163,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
21652163 struct hlist_nulls_head * head ;
21662164 struct hlist_nulls_node * n ;
21672165 struct htab_elem * elem ;
2168- u32 roundup_key_size ;
21692166 int i , num_elems = 0 ;
21702167 void __percpu * pptr ;
21712168 struct bucket * b ;
@@ -2180,7 +2177,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
21802177
21812178 is_percpu = htab_is_percpu (htab );
21822179
2183- roundup_key_size = round_up (map -> key_size , 8 );
21842180 /* migration has been disabled, so percpu value prepared here will be
21852181 * the same as the one seen by the bpf program with
21862182 * bpf_map_lookup_elem().
@@ -2196,7 +2192,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
21962192 pptr = htab_elem_get_ptr (elem , map -> key_size );
21972193 val = this_cpu_ptr (pptr );
21982194 } else {
2199- val = elem -> key + roundup_key_size ;
2195+ val = htab_elem_value ( elem , map -> key_size ) ;
22002196 }
22012197 num_elems ++ ;
22022198 ret = callback_fn ((u64 )(long )map , (u64 )(long )key ,
0 commit comments