Skip to content

Commit d937bc3

Browse files
anakryikoAlexei Starovoitov
authored andcommitted
bpf: make uniform use of array->elem_size everywhere in arraymap.c
BPF_MAP_TYPE_ARRAY is rounding value_size to closest multiple of 8 and stores that as array->elem_size for various memory allocations and accesses. But the code tends to re-calculate round_up(map->value_size, 8) in multiple places instead of using array->elem_size. Cleaning this up and making sure we always use array->size to avoid duplication of this (admittedly simple) logic for consistency. Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 87ac0d6 commit d937bc3

File tree

1 file changed

+8
-6
lines changed

1 file changed

+8
-6
lines changed

kernel/bpf/arraymap.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
208208
{
209209
struct bpf_array *array = container_of(map, struct bpf_array, map);
210210
struct bpf_insn *insn = insn_buf;
211-
u32 elem_size = round_up(map->value_size, 8);
211+
u32 elem_size = array->elem_size;
212212
const int ret = BPF_REG_0;
213213
const int map_ptr = BPF_REG_1;
214214
const int index = BPF_REG_2;
@@ -277,7 +277,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
277277
* access 'value_size' of them, so copying rounded areas
278278
* will not leak any kernel data
279279
*/
280-
size = round_up(map->value_size, 8);
280+
size = array->elem_size;
281281
rcu_read_lock();
282282
pptr = array->pptrs[index & array->index_mask];
283283
for_each_possible_cpu(cpu) {
@@ -381,7 +381,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
381381
* returned or zeros which were zero-filled by percpu_alloc,
382382
* so no kernel data leaks possible
383383
*/
384-
size = round_up(map->value_size, 8);
384+
size = array->elem_size;
385385
rcu_read_lock();
386386
pptr = array->pptrs[index & array->index_mask];
387387
for_each_possible_cpu(cpu) {
@@ -587,6 +587,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
587587
struct bpf_iter_seq_array_map_info *info = seq->private;
588588
struct bpf_iter__bpf_map_elem ctx = {};
589589
struct bpf_map *map = info->map;
590+
struct bpf_array *array = container_of(map, struct bpf_array, map);
590591
struct bpf_iter_meta meta;
591592
struct bpf_prog *prog;
592593
int off = 0, cpu = 0;
@@ -607,7 +608,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
607608
ctx.value = v;
608609
} else {
609610
pptr = v;
610-
size = round_up(map->value_size, 8);
611+
size = array->elem_size;
611612
for_each_possible_cpu(cpu) {
612613
bpf_long_memcpy(info->percpu_value_buf + off,
613614
per_cpu_ptr(pptr, cpu),
@@ -637,11 +638,12 @@ static int bpf_iter_init_array_map(void *priv_data,
637638
{
638639
struct bpf_iter_seq_array_map_info *seq_info = priv_data;
639640
struct bpf_map *map = aux->map;
641+
struct bpf_array *array = container_of(map, struct bpf_array, map);
640642
void *value_buf;
641643
u32 buf_size;
642644

643645
if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
644-
buf_size = round_up(map->value_size, 8) * num_possible_cpus();
646+
buf_size = array->elem_size * num_possible_cpus();
645647
value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
646648
if (!value_buf)
647649
return -ENOMEM;
@@ -1326,7 +1328,7 @@ static int array_of_map_gen_lookup(struct bpf_map *map,
13261328
struct bpf_insn *insn_buf)
13271329
{
13281330
struct bpf_array *array = container_of(map, struct bpf_array, map);
1329-
u32 elem_size = round_up(map->value_size, 8);
1331+
u32 elem_size = array->elem_size;
13301332
struct bpf_insn *insn = insn_buf;
13311333
const int ret = BPF_REG_0;
13321334
const int map_ptr = BPF_REG_1;

0 commit comments

Comments
 (0)