Skip to content

Commit 1e82dfa

Browse files
hbathinimpe
authored andcommitted
powerpc/bpf/64: Add instructions for atomic_[cmp]xchg
This adds two atomic opcodes BPF_XCHG and BPF_CMPXCHG on ppc64, both of which include the BPF_FETCH flag. The kernel's atomic_cmpxchg operation fundamentally has 3 operands, but we only have two register fields. Therefore the operand we compare against (the kernel's API calls it 'old') is hard-coded to be BPF_REG_R0. Also, kernel's atomic_cmpxchg returns the previous value at dst_reg + off. JIT the same for BPF too with return value put in BPF_REG_0. BPF_REG_R0 = atomic_cmpxchg(dst_reg + off, BPF_REG_R0, src_reg); Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> (ppc64le) Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220610155552.25892-4-hbathini@linux.ibm.com
1 parent dbe6e24 commit 1e82dfa

File tree

1 file changed

+34
-5
lines changed

1 file changed

+34
-5
lines changed

arch/powerpc/net/bpf_jit_comp64.c

Lines changed: 34 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -360,6 +360,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
360360
u32 size = BPF_SIZE(code);
361361
u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
362362
u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
363+
u32 save_reg, ret_reg;
363364
s16 off = insn[i].off;
364365
s32 imm = insn[i].imm;
365366
bool func_addr_fixed;
@@ -778,6 +779,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
778779
*/
779780
case BPF_STX | BPF_ATOMIC | BPF_W:
780781
case BPF_STX | BPF_ATOMIC | BPF_DW:
782+
save_reg = tmp2_reg;
783+
ret_reg = src_reg;
784+
781785
/* Get offset into TMP_REG_1 */
782786
EMIT(PPC_RAW_LI(tmp1_reg, off));
783787
tmp_idx = ctx->idx * 4;
@@ -808,6 +812,24 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
808812
case BPF_XOR | BPF_FETCH:
809813
EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
810814
break;
815+
case BPF_CMPXCHG:
816+
/*
817+
* Return old value in BPF_REG_0 for BPF_CMPXCHG &
818+
* in src_reg for other cases.
819+
*/
820+
ret_reg = bpf_to_ppc(BPF_REG_0);
821+
822+
/* Compare with old value in BPF_R0 */
823+
if (size == BPF_DW)
824+
EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
825+
else
826+
EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
827+
/* Don't set if different from old value */
828+
PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
829+
fallthrough;
830+
case BPF_XCHG:
831+
save_reg = src_reg;
832+
break;
811833
default:
812834
pr_err_ratelimited(
813835
"eBPF filter atomic op code %02x (@%d) unsupported\n",
@@ -817,15 +839,22 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
817839

818840
/* store new value */
819841
if (size == BPF_DW)
820-
EMIT(PPC_RAW_STDCX(tmp2_reg, tmp1_reg, dst_reg));
842+
EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
821843
else
822-
EMIT(PPC_RAW_STWCX(tmp2_reg, tmp1_reg, dst_reg));
844+
EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
823845
/* we're done if this succeeded */
824846
PPC_BCC_SHORT(COND_NE, tmp_idx);
825847

826-
/* For the BPF_FETCH variant, get old value into src_reg */
827-
if (imm & BPF_FETCH)
828-
EMIT(PPC_RAW_MR(src_reg, _R0));
848+
if (imm & BPF_FETCH) {
849+
EMIT(PPC_RAW_MR(ret_reg, _R0));
850+
/*
851+
* Skip unnecessary zero-extension for 32-bit cmpxchg.
852+
* For context, see commit 39491867ace5.
853+
*/
854+
if (size != BPF_DW && imm == BPF_CMPXCHG &&
855+
insn_is_zext(&insn[i + 1]))
856+
addrs[++i] = ctx->idx * 4;
857+
}
829858
break;
830859

831860
/*

0 commit comments

Comments
 (0)