Skip to content

Commit

Permalink
Merge branch 'bpf,x64: Use BMI2 for shifts'
Browse files Browse the repository at this point in the history
Jie Meng says:

====================

With baseline x64 instruction set, shift count can only be an immediate
or in %cl. The implicit dependency on %cl makes it necessary to shuffle
registers around and/or add push/pop operations.

BMI2 provides shift instructions that can use any general register as
the shift count, saving us instructions and a few bytes in most cases.

Suboptimal codegen when %ecx is source and/or destination is also
addressed and unnecessary instructions are removed.

test_progs: Summary: 267/1340 PASSED, 25 SKIPPED, 0 FAILED
test_progs-no_alu32: Summary: 267/1333 PASSED, 26 SKIPPED, 0 FAILED
test_verifier: Summary: 1367 PASSED, 636 SKIPPED, 0 FAILED (same result
 with or without BMI2)
test_maps: OK, 0 SKIPPED
lib/test_bpf:
  test_bpf: Summary: 1026 PASSED, 0 FAILED, [1014/1014 JIT'ed]
  test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
  test_bpf: test_skb_segment: Summary: 2 PASSED, 0 FAILED
---
v4 -> v5:
- More comments regarding instruction encoding
v3 -> v4:
- Fixed a regression when BMI2 isn't available
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Alexei Starovoitov committed Oct 19, 2022
2 parents 7d8d535 + 8662de2 commit 04a8f9d
Show file tree
Hide file tree
Showing 2 changed files with 118 additions and 12 deletions.
106 changes: 94 additions & 12 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -891,6 +891,65 @@ static void emit_nops(u8 **pprog, int len)
*pprog = prog;
}

/* emit the 3-byte VEX prefix
*
* r: same as rex.r, extra bit for ModRM reg field
* x: same as rex.x, extra bit for SIB index field
* b: same as rex.b, extra bit for ModRM r/m, or SIB base
* m: opcode map select, encoding escape bytes e.g. 0x0f38
* w: same as rex.w (32 bit or 64 bit) or opcode specific
* src_reg2: additional source reg (encoded as BPF reg)
* l: vector length (128 bit or 256 bit) or reserved
* pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
*/
static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
bool w, u8 src_reg2, bool l, u8 pp)
{
u8 *prog = *pprog;
const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
u8 b1, b2;
u8 vvvv = reg2hex[src_reg2];

/* reg2hex gives only the lower 3 bit of vvvv */
if (is_ereg(src_reg2))
vvvv |= 1 << 3;

/*
* 2nd byte of 3-byte VEX prefix
* ~ means bit inverted encoding
*
* 7 0
* +---+---+---+---+---+---+---+---+
* |~R |~X |~B | m |
* +---+---+---+---+---+---+---+---+
*/
b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
/*
* 3rd byte of 3-byte VEX prefix
*
* 7 0
* +---+---+---+---+---+---+---+---+
* | W | ~vvvv | L | pp |
* +---+---+---+---+---+---+---+---+
*/
b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);

EMIT3(b0, b1, b2);
*pprog = prog;
}

/* emit BMI2 shift instruction */
static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
{
u8 *prog = *pprog;
bool r = is_ereg(dst_reg);
u8 m = 2; /* escape code 0f38 */

emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
*pprog = prog;
}

#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))

static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
Expand Down Expand Up @@ -1137,17 +1196,38 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
case BPF_ALU64 | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
/* BMI2 shifts aren't better when shift count is already in rcx */
if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
/* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
u8 op;

switch (BPF_OP(insn->code)) {
case BPF_LSH:
op = 1; /* prefix 0x66 */
break;
case BPF_RSH:
op = 3; /* prefix 0xf2 */
break;
case BPF_ARSH:
op = 2; /* prefix 0xf3 */
break;
}

/* Check for bad case when dst_reg == rcx */
if (dst_reg == BPF_REG_4) {
/* mov r11, dst_reg */
EMIT_mov(AUX_REG, dst_reg);
dst_reg = AUX_REG;
emit_shiftx(&prog, dst_reg, src_reg, w, op);

break;
}

if (src_reg != BPF_REG_4) { /* common case */
EMIT1(0x51); /* push rcx */

/* Check for bad case when dst_reg == rcx */
if (dst_reg == BPF_REG_4) {
/* mov r11, dst_reg */
EMIT_mov(AUX_REG, dst_reg);
dst_reg = AUX_REG;
} else {
EMIT1(0x51); /* push rcx */
}
/* mov rcx, src_reg */
EMIT_mov(BPF_REG_4, src_reg);
}
Expand All @@ -1159,12 +1239,14 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
b3 = simple_alu_opcodes[BPF_OP(insn->code)];
EMIT2(0xD3, add_1reg(b3, dst_reg));

if (src_reg != BPF_REG_4)
EMIT1(0x59); /* pop rcx */
if (src_reg != BPF_REG_4) {
if (insn->dst_reg == BPF_REG_4)
/* mov dst_reg, r11 */
EMIT_mov(insn->dst_reg, AUX_REG);
else
EMIT1(0x59); /* pop rcx */
}

if (insn->dst_reg == BPF_REG_4)
/* mov dst_reg, r11 */
EMIT_mov(insn->dst_reg, AUX_REG);
break;

case BPF_ALU | BPF_END | BPF_FROM_BE:
Expand Down
24 changes: 24 additions & 0 deletions tools/testing/selftests/bpf/verifier/jit.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,30 @@
.result = ACCEPT,
.retval = 2,
},
{
"jit: lsh, rsh, arsh by reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_MOV64_IMM(BPF_REG_1, 0xff),
BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_0),
BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_4),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_1),
BPF_ALU32_REG(BPF_RSH, BPF_REG_4, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0xff, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ARSH, BPF_REG_4, BPF_REG_4),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: mov32 for ldimm64, 1",
.insns = {
Expand Down

0 comments on commit 04a8f9d

Please sign in to comment.