Skip to content

Commit c85d691

Browse files
rgushchinAlexei Starovoitov
authored andcommitted
bpf: move memory size checks to bpf_map_charge_init()
Most bpf map types doing similar checks and bytes to pages conversion during memory allocation and charging. Let's unify these checks by moving them into bpf_map_charge_init(). Signed-off-by: Roman Gushchin <[email protected]> Acked-by: Song Liu <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent b936ca6 commit c85d691

File tree

14 files changed

+20
-67
lines changed

14 files changed

+20
-67
lines changed

include/linux/bpf.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -652,7 +652,7 @@ void bpf_map_put_with_uref(struct bpf_map *map);
652652
void bpf_map_put(struct bpf_map *map);
653653
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
654654
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
655-
int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages);
655+
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
656656
void bpf_map_charge_finish(struct bpf_map_memory *mem);
657657
void bpf_map_charge_move(struct bpf_map_memory *dst,
658658
struct bpf_map_memory *src);

kernel/bpf/arraymap.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -117,14 +117,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
117117

118118
/* make sure there is no u32 overflow later in round_up() */
119119
cost = array_size;
120-
if (cost >= U32_MAX - PAGE_SIZE)
121-
return ERR_PTR(-ENOMEM);
122-
if (percpu) {
120+
if (percpu)
123121
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
124-
if (cost >= U32_MAX - PAGE_SIZE)
125-
return ERR_PTR(-ENOMEM);
126-
}
127-
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
128122

129123
ret = bpf_map_charge_init(&mem, cost);
130124
if (ret < 0)

kernel/bpf/cpumap.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -106,12 +106,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
106106
/* make sure page count doesn't overflow */
107107
cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
108108
cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
109-
if (cost >= U32_MAX - PAGE_SIZE)
110-
goto free_cmap;
111109

112110
/* Notice returns -EPERM on if map size is larger than memlock limit */
113-
ret = bpf_map_charge_init(&cmap->map.memory,
114-
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
111+
ret = bpf_map_charge_init(&cmap->map.memory, cost);
115112
if (ret) {
116113
err = ret;
117114
goto free_cmap;

kernel/bpf/devmap.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,12 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
108108
/* make sure page count doesn't overflow */
109109
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
110110
cost += dev_map_bitmap_size(attr) * num_possible_cpus();
111-
if (cost >= U32_MAX - PAGE_SIZE)
112-
goto free_dtab;
113111

114112
/* if map size is larger than memlock limit, reject it */
115-
err = bpf_map_charge_init(&dtab->map.memory,
116-
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
113+
err = bpf_map_charge_init(&dtab->map.memory, cost);
117114
if (err)
118115
goto free_dtab;
119116

kernel/bpf/hashtab.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -360,13 +360,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
360360
else
361361
cost += (u64) htab->elem_size * num_possible_cpus();
362362

363-
if (cost >= U32_MAX - PAGE_SIZE)
364-
/* make sure page count doesn't overflow */
365-
goto free_htab;
366-
367363
/* if map size is larger than memlock limit, reject it */
368-
err = bpf_map_charge_init(&htab->map.memory,
369-
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
364+
err = bpf_map_charge_init(&htab->map.memory, cost);
370365
if (err)
371366
goto free_htab;
372367

kernel/bpf/local_storage.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,6 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
273273
int numa_node = bpf_map_attr_numa_node(attr);
274274
struct bpf_cgroup_storage_map *map;
275275
struct bpf_map_memory mem;
276-
u32 pages;
277276
int ret;
278277

279278
if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
@@ -293,9 +292,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
293292
/* max_entries is not used and enforced to be 0 */
294293
return ERR_PTR(-EINVAL);
295294

296-
pages = round_up(sizeof(struct bpf_cgroup_storage_map), PAGE_SIZE) >>
297-
PAGE_SHIFT;
298-
ret = bpf_map_charge_init(&mem, pages);
295+
ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
299296
if (ret < 0)
300297
return ERR_PTR(ret);
301298

kernel/bpf/lpm_trie.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -573,13 +573,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
573573
cost_per_node = sizeof(struct lpm_trie_node) +
574574
attr->value_size + trie->data_size;
575575
cost += (u64) attr->max_entries * cost_per_node;
576-
if (cost >= U32_MAX - PAGE_SIZE) {
577-
ret = -E2BIG;
578-
goto out_err;
579-
}
580576

581-
ret = bpf_map_charge_init(&trie->map.memory,
582-
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
577+
ret = bpf_map_charge_init(&trie->map.memory, cost);
583578
if (ret)
584579
goto out_err;
585580

kernel/bpf/queue_stack_maps.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,10 +73,6 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
7373

7474
size = (u64) attr->max_entries + 1;
7575
cost = queue_size = sizeof(*qs) + size * attr->value_size;
76-
if (cost >= U32_MAX - PAGE_SIZE)
77-
return ERR_PTR(-E2BIG);
78-
79-
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
8076

8177
ret = bpf_map_charge_init(&mem, cost);
8278
if (ret < 0)

kernel/bpf/reuseport_array.c

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -152,21 +152,15 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
152152
int err, numa_node = bpf_map_attr_numa_node(attr);
153153
struct reuseport_array *array;
154154
struct bpf_map_memory mem;
155-
u64 cost, array_size;
155+
u64 array_size;
156156

157157
if (!capable(CAP_SYS_ADMIN))
158158
return ERR_PTR(-EPERM);
159159

160160
array_size = sizeof(*array);
161161
array_size += (u64)attr->max_entries * sizeof(struct sock *);
162162

163-
/* make sure there is no u32 overflow later in round_up() */
164-
cost = array_size;
165-
if (cost >= U32_MAX - PAGE_SIZE)
166-
return ERR_PTR(-ENOMEM);
167-
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
168-
169-
err = bpf_map_charge_init(&mem, cost);
163+
err = bpf_map_charge_init(&mem, array_size);
170164
if (err)
171165
return ERR_PTR(err);
172166

kernel/bpf/stackmap.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -117,14 +117,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
117117
n_buckets = roundup_pow_of_two(attr->max_entries);
118118

119119
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
120-
if (cost >= U32_MAX - PAGE_SIZE)
121-
return ERR_PTR(-E2BIG);
122120
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
123-
if (cost >= U32_MAX - PAGE_SIZE)
124-
return ERR_PTR(-E2BIG);
125-
126-
err = bpf_map_charge_init(&mem,
127-
round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
121+
err = bpf_map_charge_init(&mem, cost);
128122
if (err)
129123
return ERR_PTR(err);
130124

0 commit comments

Comments
 (0)