@@ -206,9 +206,13 @@ static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
206206 return (struct htab_elem * ) (htab -> elems + i * (u64 )htab -> elem_size );
207207}
208208
209+ /* Both percpu and fd htab support in-place update, so no need for
210+ * extra elem. LRU itself can remove the least used element, so
211+ * there is no need for an extra elem during map_update.
212+ */
209213static bool htab_has_extra_elems (struct bpf_htab * htab )
210214{
211- return !htab_is_percpu (htab ) && !htab_is_lru (htab );
215+ return !htab_is_percpu (htab ) && !htab_is_lru (htab ) && ! is_fd_htab ( htab ) ;
212216}
213217
214218static void htab_free_prealloced_timers_and_wq (struct bpf_htab * htab )
@@ -464,8 +468,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
464468{
465469 bool percpu = (attr -> map_type == BPF_MAP_TYPE_PERCPU_HASH ||
466470 attr -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH );
467- bool lru = (attr -> map_type == BPF_MAP_TYPE_LRU_HASH ||
468- attr -> map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH );
469471 /* percpu_lru means each cpu has its own LRU list.
470472 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
471473 * the map's value itself is percpu. percpu_lru has
@@ -560,10 +562,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
560562 if (err )
561563 goto free_map_locked ;
562564
563- if (!percpu && !lru ) {
564- /* lru itself can remove the least used element, so
565- * there is no need for an extra elem during map_update.
566- */
565+ if (htab_has_extra_elems (htab )) {
567566 err = alloc_extra_elems (htab );
568567 if (err )
569568 goto free_prealloc ;
0 commit comments