@@ -647,6 +647,81 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
647647 return 0 ;
648648}
649649
650+ static int emit_atomic_ld_st (const struct bpf_insn * insn , struct jit_ctx * ctx )
651+ {
652+ const s32 imm = insn -> imm ;
653+ const s16 off = insn -> off ;
654+ const u8 code = insn -> code ;
655+ const bool arena = BPF_MODE (code ) == BPF_PROBE_ATOMIC ;
656+ const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
657+ const u8 dst = bpf2a64 [insn -> dst_reg ];
658+ const u8 src = bpf2a64 [insn -> src_reg ];
659+ const u8 tmp = bpf2a64 [TMP_REG_1 ];
660+ u8 reg ;
661+
662+ switch (imm ) {
663+ case BPF_LOAD_ACQ :
664+ reg = src ;
665+ break ;
666+ case BPF_STORE_REL :
667+ reg = dst ;
668+ break ;
669+ default :
670+ pr_err_once ("unknown atomic load/store op code %02x\n" , imm );
671+ return - EINVAL ;
672+ }
673+
674+ if (off ) {
675+ emit_a64_add_i (1 , tmp , reg , tmp , off , ctx );
676+ reg = tmp ;
677+ }
678+ if (arena ) {
679+ emit (A64_ADD (1 , tmp , reg , arena_vm_base ), ctx );
680+ reg = tmp ;
681+ }
682+
683+ switch (imm ) {
684+ case BPF_LOAD_ACQ :
685+ switch (BPF_SIZE (code )) {
686+ case BPF_B :
687+ emit (A64_LDARB (dst , reg ), ctx );
688+ break ;
689+ case BPF_H :
690+ emit (A64_LDARH (dst , reg ), ctx );
691+ break ;
692+ case BPF_W :
693+ emit (A64_LDAR32 (dst , reg ), ctx );
694+ break ;
695+ case BPF_DW :
696+ emit (A64_LDAR64 (dst , reg ), ctx );
697+ break ;
698+ }
699+ break ;
700+ case BPF_STORE_REL :
701+ switch (BPF_SIZE (code )) {
702+ case BPF_B :
703+ emit (A64_STLRB (src , reg ), ctx );
704+ break ;
705+ case BPF_H :
706+ emit (A64_STLRH (src , reg ), ctx );
707+ break ;
708+ case BPF_W :
709+ emit (A64_STLR32 (src , reg ), ctx );
710+ break ;
711+ case BPF_DW :
712+ emit (A64_STLR64 (src , reg ), ctx );
713+ break ;
714+ }
715+ break ;
716+ default :
717+ pr_err_once ("unexpected atomic load/store op code %02x\n" ,
718+ imm );
719+ return - EINVAL ;
720+ }
721+
722+ return 0 ;
723+ }
724+
650725#ifdef CONFIG_ARM64_LSE_ATOMICS
651726static int emit_lse_atomic (const struct bpf_insn * insn , struct jit_ctx * ctx )
652727{
@@ -1641,11 +1716,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
16411716 return ret ;
16421717 break ;
16431718
1719+ case BPF_STX | BPF_ATOMIC | BPF_B :
1720+ case BPF_STX | BPF_ATOMIC | BPF_H :
16441721 case BPF_STX | BPF_ATOMIC | BPF_W :
16451722 case BPF_STX | BPF_ATOMIC | BPF_DW :
1723+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_B :
1724+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_H :
16461725 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W :
16471726 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW :
1648- if (cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
1727+ if (bpf_atomic_is_load_store (insn ))
1728+ ret = emit_atomic_ld_st (insn , ctx );
1729+ else if (cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
16491730 ret = emit_lse_atomic (insn , ctx );
16501731 else
16511732 ret = emit_ll_sc_atomic (insn , ctx );
@@ -2667,13 +2748,10 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
26672748 if (!in_arena )
26682749 return true;
26692750 switch (insn -> code ) {
2670- case BPF_STX | BPF_ATOMIC | BPF_B :
2671- case BPF_STX | BPF_ATOMIC | BPF_H :
26722751 case BPF_STX | BPF_ATOMIC | BPF_W :
26732752 case BPF_STX | BPF_ATOMIC | BPF_DW :
2674- if (bpf_atomic_is_load_store (insn ))
2675- return false;
2676- if (!cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
2753+ if (!bpf_atomic_is_load_store (insn ) &&
2754+ !cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
26772755 return false;
26782756 }
26792757 return true;
0 commit comments