@@ -6051,42 +6051,26 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
6051
6051
6052
6052
/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
6053
6053
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
6054
- enum bpf_access_type t, enum bpf_reg_type *reg_type,
6055
- struct btf **btf, u32 *btf_id, bool *is_retval, bool is_ldsx,
6056
- u32 *ref_obj_id)
6054
+ enum bpf_access_type t, struct bpf_insn_access_aux *info)
6057
6055
{
6058
- struct bpf_insn_access_aux info = {
6059
- .reg_type = *reg_type,
6060
- .log = &env->log,
6061
- .is_retval = false,
6062
- .is_ldsx = is_ldsx,
6063
- };
6064
-
6065
6056
if (env->ops->is_valid_access &&
6066
- env->ops->is_valid_access(off, size, t, env->prog, & info)) {
6057
+ env->ops->is_valid_access(off, size, t, env->prog, info)) {
6067
6058
/* A non zero info.ctx_field_size indicates that this field is a
6068
6059
* candidate for later verifier transformation to load the whole
6069
6060
* field and then apply a mask when accessed with a narrower
6070
6061
* access than actual ctx access size. A zero info.ctx_field_size
6071
6062
* will only allow for whole field access and rejects any other
6072
6063
* type of narrower access.
6073
6064
*/
6074
- *reg_type = info.reg_type;
6075
- *is_retval = info.is_retval;
6076
-
6077
- if (base_type(*reg_type) == PTR_TO_BTF_ID) {
6078
- if (info.ref_obj_id &&
6079
- !find_reference_state(env->cur_state, info.ref_obj_id)) {
6065
+ if (base_type(info->reg_type) == PTR_TO_BTF_ID) {
6066
+ if (info->ref_obj_id &&
6067
+ !find_reference_state(env->cur_state, info->ref_obj_id)) {
6080
6068
verbose(env, "invalid bpf_context access off=%d. Reference may already be released\n",
6081
6069
off);
6082
6070
return -EACCES;
6083
6071
}
6084
-
6085
- *btf = info.btf;
6086
- *btf_id = info.btf_id;
6087
- *ref_obj_id = info.ref_obj_id;
6088
6072
} else {
6089
- env->insn_aux_data[insn_idx].ctx_field_size = info. ctx_field_size;
6073
+ env->insn_aux_data[insn_idx].ctx_field_size = info-> ctx_field_size;
6090
6074
}
6091
6075
/* remember the offset of last byte accessed in ctx */
6092
6076
if (env->prog->aux->max_ctx_offset < off + size)
@@ -7443,11 +7427,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
7443
7427
if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
7444
7428
mark_reg_unknown(env, regs, value_regno);
7445
7429
} else if (reg->type == PTR_TO_CTX) {
7446
- bool is_retval = false;
7447
7430
struct bpf_retval_range range;
7448
- enum bpf_reg_type reg_type = SCALAR_VALUE;
7449
- struct btf *btf = NULL;
7450
- u32 btf_id = 0, ref_obj_id = 0;
7431
+ struct bpf_insn_access_aux info = {
7432
+ .reg_type = SCALAR_VALUE,
7433
+ .is_ldsx = is_ldsx,
7434
+ .log = &env->log,
7435
+ };
7451
7436
7452
7437
if (t == BPF_WRITE && value_regno >= 0 &&
7453
7438
is_pointer_value(env, value_regno)) {
@@ -7459,17 +7444,16 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
7459
7444
if (err < 0)
7460
7445
return err;
7461
7446
7462
- err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf,
7463
- &btf_id, &is_retval, is_ldsx, &ref_obj_id);
7447
+ err = check_ctx_access(env, insn_idx, off, size, t, &info);
7464
7448
if (err)
7465
7449
verbose_linfo(env, insn_idx, "; ");
7466
7450
if (!err && t == BPF_READ && value_regno >= 0) {
7467
7451
/* ctx access returns either a scalar, or a
7468
7452
* PTR_TO_PACKET[_META,_END]. In the latter
7469
7453
* case, we know the offset is zero.
7470
7454
*/
7471
- if (reg_type == SCALAR_VALUE) {
7472
- if (is_retval && get_func_retval_range(env->prog, &range)) {
7455
+ if (info. reg_type == SCALAR_VALUE) {
7456
+ if (info. is_retval && get_func_retval_range(env->prog, &range)) {
7473
7457
err = __mark_reg_s32_range(env, regs, value_regno,
7474
7458
range.minval, range.maxval);
7475
7459
if (err)
@@ -7480,21 +7464,21 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
7480
7464
} else {
7481
7465
mark_reg_known_zero(env, regs,
7482
7466
value_regno);
7483
- if (type_may_be_null(reg_type))
7467
+ if (type_may_be_null(info. reg_type))
7484
7468
regs[value_regno].id = ++env->id_gen;
7485
7469
/* A load of ctx field could have different
7486
7470
* actual load size with the one encoded in the
7487
7471
* insn. When the dst is PTR, it is for sure not
7488
7472
* a sub-register.
7489
7473
*/
7490
7474
regs[value_regno].subreg_def = DEF_NOT_SUBREG;
7491
- if (base_type(reg_type) == PTR_TO_BTF_ID) {
7492
- regs[value_regno].btf = btf;
7493
- regs[value_regno].btf_id = btf_id;
7494
- regs[value_regno].ref_obj_id = ref_obj_id;
7475
+ if (base_type(info. reg_type) == PTR_TO_BTF_ID) {
7476
+ regs[value_regno].btf = info. btf;
7477
+ regs[value_regno].btf_id = info. btf_id;
7478
+ regs[value_regno].ref_obj_id = info. ref_obj_id;
7495
7479
}
7496
7480
}
7497
- regs[value_regno].type = reg_type;
7481
+ regs[value_regno].type = info. reg_type;
7498
7482
}
7499
7483
7500
7484
} else if (reg->type == PTR_TO_STACK) {
0 commit comments