Skip to content

bpf, arm64: support for timed may_goto #9439

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: bpf-next_base
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion arch/arm64/net/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
#
# ARM64 networking code
#
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_timed_may_goto.o
13 changes: 12 additions & 1 deletion arch/arm64/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1558,7 +1558,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (ret < 0)
return ret;
emit_call(func_addr, ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
/*
* Call to arch_bpf_timed_may_goto() is emitted by the
* verifier and called with custom calling convention with
* first argument and return value in BPF_REG_AX (x9).
*/
if (func_addr != (u64)arch_bpf_timed_may_goto)
emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
}
/* tail call */
Expand Down Expand Up @@ -3038,6 +3044,11 @@ bool bpf_jit_bypass_spec_v4(void)
return true;
}

bool bpf_jit_supports_timed_may_goto(void)
{
return true;
}

bool bpf_jit_inlines_helper_call(s32 imm)
{
switch (imm) {
Expand Down
40 changes: 40 additions & 0 deletions arch/arm64/net/bpf_timed_may_goto.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2025 Puranjay Mohan <[email protected]> */

#include <linux/linkage.h>

SYM_FUNC_START(arch_bpf_timed_may_goto)
/* Allocate stack space and emit frame record */
stp x29, x30, [sp, #-64]!
mov x29, sp

/* Save BPF registers R0 - R5 (x7, x0-x4)*/
stp x7, x0, [sp, #16]
stp x1, x2, [sp, #32]
stp x3, x4, [sp, #48]

/*
* Stack depth was passed in BPF_REG_AX (x9), add it to the BPF_FP
* (x25) to get the pointer to count and timestamp and pass it as the
* first argument in x0.
*
* Before generating the call to arch_bpf_timed_may_goto, the verifier
* generates a load instruction using FP, i.e. REG_AX = *(u64 *)(FP -
* stack_off_cnt), so BPF_REG_FP (x25) is always set up by the arm64
* jit in this case.
*/
add x0, x9, x25
bl bpf_check_timed_may_goto
/* BPF_REG_AX(x9) will be stored into count, so move return value to it. */
mov x9, x0

/* Restore BPF registers R0 - R5 (x7, x0-x4) */
ldp x7, x0, [sp, #16]
ldp x1, x2, [sp, #32]
ldp x3, x4, [sp, #48]

/* Restore FP and LR */
ldp x29, x30, [sp], #64

ret
SYM_FUNC_END(arch_bpf_timed_may_goto)
2 changes: 1 addition & 1 deletion tools/testing/selftests/bpf/prog_tests/stream.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ void test_stream_errors(void)
ASSERT_OK(ret, "ret");
ASSERT_OK(opts.retval, "retval");

#if !defined(__x86_64__)
#if !defined(__x86_64__) && !defined(__aarch64__)
ASSERT_TRUE(1, "Timed may_goto unsupported, skip.");
if (i == 0) {
ret = bpf_prog_stream_read(prog_fd, 2, buf, sizeof(buf), &ropts);
Expand Down
27 changes: 16 additions & 11 deletions tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
Original file line number Diff line number Diff line change
Expand Up @@ -660,19 +660,24 @@ __naked void may_goto_interaction_x86_64(void)

SEC("raw_tp")
__arch_arm64
__log_level(4) __msg("stack depth 16")
/* may_goto counter at -16 */
__xlated("0: *(u64 *)(r10 -16) =")
__xlated("1: r1 = 1")
__xlated("2: call bpf_get_smp_processor_id")
__log_level(4) __msg("stack depth 24")
/* may_goto counter at -24 */
__xlated("0: *(u64 *)(r10 -24) =")
/* may_goto timestamp at -16 */
__xlated("1: *(u64 *)(r10 -16) =")
__xlated("2: r1 = 1")
__xlated("3: call bpf_get_smp_processor_id")
/* may_goto expansion starts */
__xlated("3: r11 = *(u64 *)(r10 -16)")
__xlated("4: if r11 == 0x0 goto pc+3")
__xlated("5: r11 -= 1")
__xlated("6: *(u64 *)(r10 -16) = r11")
__xlated("4: r11 = *(u64 *)(r10 -24)")
__xlated("5: if r11 == 0x0 goto pc+6")
__xlated("6: r11 -= 1")
__xlated("7: if r11 != 0x0 goto pc+2")
__xlated("8: r11 = -24")
__xlated("9: call unknown")
__xlated("10: *(u64 *)(r10 -24) = r11")
/* may_goto expansion ends */
__xlated("7: *(u64 *)(r10 -8) = r1")
__xlated("8: exit")
__xlated("11: *(u64 *)(r10 -8) = r1")
__xlated("12: exit")
__success
__naked void may_goto_interaction_arm64(void)
{
Expand Down
34 changes: 6 additions & 28 deletions tools/testing/selftests/bpf/progs/verifier_may_goto_1.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
SEC("raw_tp")
__description("may_goto 0")
__arch_x86_64
__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
Expand All @@ -27,6 +28,7 @@ __naked void may_goto_simple(void)
SEC("raw_tp")
__description("batch 2 of may_goto 0")
__arch_x86_64
__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
Expand All @@ -47,6 +49,7 @@ __naked void may_goto_batch_0(void)
SEC("raw_tp")
__description("may_goto batch with offsets 2/1/0")
__arch_x86_64
__arch_arm64
__xlated("0: r0 = 1")
__xlated("1: exit")
__success
Expand All @@ -69,8 +72,9 @@ __naked void may_goto_batch_1(void)
}

SEC("raw_tp")
__description("may_goto batch with offsets 2/0 - x86_64")
__description("may_goto batch with offsets 2/0")
__arch_x86_64
__arch_arm64
__xlated("0: *(u64 *)(r10 -16) = 65535")
__xlated("1: *(u64 *)(r10 -8) = 0")
__xlated("2: r11 = *(u64 *)(r10 -16)")
Expand All @@ -84,33 +88,7 @@ __xlated("9: r0 = 1")
__xlated("10: r0 = 2")
__xlated("11: exit")
__success
__naked void may_goto_batch_2_x86_64(void)
{
asm volatile (
".8byte %[may_goto1];"
".8byte %[may_goto3];"
"r0 = 1;"
"r0 = 2;"
"exit;"
:
: __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)),
__imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0))
: __clobber_all);
}

SEC("raw_tp")
__description("may_goto batch with offsets 2/0 - arm64")
__arch_arm64
__xlated("0: *(u64 *)(r10 -8) = 8388608")
__xlated("1: r11 = *(u64 *)(r10 -8)")
__xlated("2: if r11 == 0x0 goto pc+3")
__xlated("3: r11 -= 1")
__xlated("4: *(u64 *)(r10 -8) = r11")
__xlated("5: r0 = 1")
__xlated("6: r0 = 2")
__xlated("7: exit")
__success
__naked void may_goto_batch_2_arm64(void)
__naked void may_goto_batch_2(void)
{
asm volatile (
".8byte %[may_goto1];"
Expand Down
Loading