Skip to content

Commit

Permalink
Adds explicit sign extension of results in SBPF-v2.
Browse files Browse the repository at this point in the history
  • Loading branch information
Lichtso committed Dec 18, 2023
1 parent 2e48d19 commit ac39e9e
Show file tree
Hide file tree
Showing 5 changed files with 182 additions and 34 deletions.
44 changes: 30 additions & 14 deletions src/interpreter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,18 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
true
}

fn sign_extension(&self, value: i32) -> u64 {
if self
.executable
.get_sbpf_version()
.implicit_sign_extension_of_results()
{
value as i64 as u64
} else {
value as u32 as u64
}
}

/// Advances the interpreter state by one instruction
///
/// Returns false if the program terminated or threw an error.
Expand Down Expand Up @@ -245,14 +257,14 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
},

// BPF_ALU class
ebpf::ADD32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(insn.imm as i32) as u64,
ebpf::ADD32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_add(self.reg[src] as i32) as u64,
ebpf::ADD32_IMM => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_add(insn.imm as i32)),
ebpf::ADD32_REG => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_add(self.reg[src] as i32)),
ebpf::SUB32_IMM => if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() {
self.reg[dst] = (insn.imm as i32).wrapping_sub(self.reg[dst] as i32) as u64
self.reg[dst] = self.sign_extension((insn.imm as i32).wrapping_sub(self.reg[dst] as i32))
} else {
self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(insn.imm as i32) as u64
self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_sub(insn.imm as i32))
},
ebpf::SUB32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32) as u64,
ebpf::SUB32_REG => self.reg[dst] = self.sign_extension((self.reg[dst] as i32).wrapping_sub(self.reg[src] as i32)),
ebpf::MUL32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64,
ebpf::MUL32_REG if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64,
ebpf::DIV32_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u32 / insn.imm as u32) as u64,
Expand All @@ -277,9 +289,13 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
ebpf::XOR32_IMM => self.reg[dst] = (self.reg[dst] as u32 ^ insn.imm as u32) as u64,
ebpf::XOR32_REG => self.reg[dst] = (self.reg[dst] as u32 ^ self.reg[src] as u32) as u64,
ebpf::MOV32_IMM => self.reg[dst] = insn.imm as u32 as u64,
ebpf::MOV32_REG => self.reg[dst] = (self.reg[src] as u32) as u64,
ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u64 & (u32::MAX as u64),
ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u64 & (u32::MAX as u64),
ebpf::MOV32_REG => self.reg[dst] = if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.reg[src] as u32 as u64
} else {
self.reg[src] as i32 as i64 as u64
},
ebpf::ARSH32_IMM => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(insn.imm as u32) as u32 as u64,
ebpf::ARSH32_REG => self.reg[dst] = (self.reg[dst] as i32).wrapping_shr(self.reg[src] as u32) as u32 as u64,
ebpf::LE if self.executable.get_sbpf_version().enable_le() => {
self.reg[dst] = match insn.imm {
16 => (self.reg[dst] as u16).to_le() as u64,
Expand Down Expand Up @@ -342,8 +358,8 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
}

// BPF_PQR class
ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u64,
ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u64,
ebpf::LMUL32_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(insn.imm as i32) as u32 as u64,
ebpf::LMUL32_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as i32).wrapping_mul(self.reg[src] as i32) as u32 as u64,
ebpf::LMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(insn.imm as u64),
ebpf::LMUL64_REG if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = self.reg[dst].wrapping_mul(self.reg[src]),
ebpf::UHMUL64_IMM if self.executable.get_sbpf_version().enable_pqr() => self.reg[dst] = (self.reg[dst] as u128).wrapping_mul(insn.imm as u64 as u128).wrapping_shr(64) as u64,
Expand Down Expand Up @@ -380,12 +396,12 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
},
ebpf::SDIV32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32);
self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u64;
self.reg[dst] = (self.reg[dst] as i32 / insn.imm as i32) as u32 as u64;
}
ebpf::SDIV32_REG if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideByZero; self, self.reg[src], i32);
throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32);
self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u64;
self.reg[dst] = (self.reg[dst] as i32 / self.reg[src] as i32) as u32 as u64;
},
ebpf::SDIV64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64);
Expand All @@ -398,12 +414,12 @@ impl<'a, 'b, C: ContextObject> Interpreter<'a, 'b, C> {
},
ebpf::SREM32_IMM if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i32);
self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u64;
self.reg[dst] = (self.reg[dst] as i32 % insn.imm as i32) as u32 as u64;
}
ebpf::SREM32_REG if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideByZero; self, self.reg[src], i32);
throw_error!(DivideOverflow; self, self.reg[src], self.reg[dst], i32);
self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u64;
self.reg[dst] = (self.reg[dst] as i32 % self.reg[src] as i32) as u32 as u64;
},
ebpf::SREM64_IMM if self.executable.get_sbpf_version().enable_pqr() => {
throw_error!(DivideOverflow; self, insn.imm, self.reg[dst], i64);
Expand Down
26 changes: 20 additions & 6 deletions src/jit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -460,11 +460,15 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
// BPF_ALU class
ebpf::ADD32_IMM => {
self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm);
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
},
ebpf::ADD32_REG => {
self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x01, src, dst, 0, None));
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
},
ebpf::SUB32_IMM => {
if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() {
Expand All @@ -475,11 +479,15 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
} else {
self.emit_sanitized_alu(OperandSize::S32, 0x29, 5, dst, insn.imm);
}
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
},
ebpf::SUB32_REG => {
self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x29, src, dst, 0, None));
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
},
ebpf::MUL32_IMM | ebpf::DIV32_IMM | ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() =>
self.emit_product_quotient_remainder(OperandSize::S32, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MOD, (insn.opc & ebpf::BPF_ALU_OP_MASK) != ebpf::BPF_MUL, (insn.opc & ebpf::BPF_ALU_OP_MASK) == ebpf::BPF_MUL, dst, dst, Some(insn.imm)),
Expand All @@ -503,7 +511,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
self.emit_ins(X86Instruction::load_immediate(OperandSize::S32, dst, insn.imm));
}
}
ebpf::MOV32_REG => self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst)),
ebpf::MOV32_REG => {
if self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::mov(OperandSize::S32, src, dst));
} else {
self.emit_ins(X86Instruction::mov_with_sign_extension(OperandSize::S64, src, dst));
}
}
ebpf::ARSH32_IMM => self.emit_shift(OperandSize::S32, 7, REGISTER_SCRATCH, dst, Some(insn.imm)),
ebpf::ARSH32_REG => self.emit_shift(OperandSize::S32, 7, src, dst, None),
ebpf::LE if self.executable.get_sbpf_version().enable_le() => {
Expand Down Expand Up @@ -1262,7 +1276,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> {
self.emit_ins(X86Instruction::pop(RAX));
}
if let OperandSize::S32 = size {
if signed {
if signed && self.executable.get_sbpf_version().implicit_sign_extension_of_results() {
self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64
}
}
Expand Down
5 changes: 5 additions & 0 deletions src/program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@ pub enum SBPFVersion {
}

impl SBPFVersion {
/// Implicitly perform sign extension of results
pub fn implicit_sign_extension_of_results(&self) -> bool {
self == &SBPFVersion::V1
}

/// Enable the little-endian byte swap instructions
pub fn enable_le(&self) -> bool {
self == &SBPFVersion::V1
Expand Down
13 changes: 13 additions & 0 deletions src/x86.rs
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,19 @@ impl X86Instruction {
}
}

/// Move source to destination
#[inline]
pub const fn mov_with_sign_extension(size: OperandSize, source: u8, destination: u8) -> Self {
exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16);
Self {
size,
opcode: 0x63,
first_operand: destination,
second_operand: source,
..Self::DEFAULT
}
}

/// Conditionally move source to destination
#[inline]
pub const fn cmov(size: OperandSize, condition: u8, source: u8, destination: u8) -> Self {
Expand Down
128 changes: 114 additions & 14 deletions tests/execution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,29 @@ macro_rules! test_interpreter_and_jit_elf {
// BPF_ALU : Arithmetic and Logic

#[test]
fn test_mov() {
fn test_mov32_imm() {
test_interpreter_and_jit_asm!(
"
mov32 r0, 1
exit",
[],
(),
TestContextObject::new(2),
ProgramResult::Ok(1),
);
test_interpreter_and_jit_asm!(
"
mov32 r0, -1
exit",
[],
(),
TestContextObject::new(2),
ProgramResult::Ok(0xffffffff),
);
}

#[test]
fn test_mov32_reg() {
test_interpreter_and_jit_asm!(
"
mov32 r1, 1
Expand All @@ -195,32 +217,61 @@ fn test_mov() {
TestContextObject::new(3),
ProgramResult::Ok(0x1),
);
test_interpreter_and_jit_asm!(
"
mov32 r1, -1
mov32 r0, r1
exit",
[],
(),
TestContextObject::new(3),
ProgramResult::Ok(0xffffffffffffffff),
);
}

#[test]
fn test_mov32_imm_large() {
fn test_mov64_imm() {
test_interpreter_and_jit_asm!(
"
mov32 r0, -1
mov64 r0, 1
exit",
[],
(),
TestContextObject::new(2),
ProgramResult::Ok(0xffffffff),
ProgramResult::Ok(1),
);
test_interpreter_and_jit_asm!(
"
mov64 r0, -1
exit",
[],
(),
TestContextObject::new(2),
ProgramResult::Ok(0xffffffffffffffff),
);
}

#[test]
fn test_mov_large() {
fn test_mov64_reg() {
test_interpreter_and_jit_asm!(
"
mov32 r1, -1
mov32 r0, r1
mov64 r1, 1
mov64 r0, r1
exit",
[],
(),
TestContextObject::new(3),
ProgramResult::Ok(0xffffffff),
ProgramResult::Ok(0x1),
);
test_interpreter_and_jit_asm!(
"
mov64 r1, -1
mov64 r0, r1
exit",
[],
(),
TestContextObject::new(3),
ProgramResult::Ok(0xffffffffffffffff),
);
}

Expand Down Expand Up @@ -658,26 +709,56 @@ fn test_pqr() {
(ebpf::SDIV64_IMM, 13i64 as u64, 4i64 as u64, 3i64 as u64),
(ebpf::SREM32_IMM, 13i64 as u64, 4i32 as u64, 1i64 as u64),
(ebpf::SREM64_IMM, 13i64 as u64, 4i64 as u64, 1i64 as u64),
(ebpf::LMUL32_IMM, 13i64 as u64, -4i32 as u64, -52i32 as u64),
(
ebpf::LMUL32_IMM,
13i64 as u64,
-4i32 as u64,
-52i32 as u32 as u64,
),
(ebpf::LMUL64_IMM, 13i64 as u64, -4i64 as u64, -52i64 as u64),
(ebpf::SHMUL64_IMM, 13i64 as u64, -4i64 as u64, -1i64 as u64),
(ebpf::SDIV32_IMM, 13i64 as u64, -4i32 as u64, -3i32 as u64),
(
ebpf::SDIV32_IMM,
13i64 as u64,
-4i32 as u64,
-3i32 as u32 as u64,
),
(ebpf::SDIV64_IMM, 13i64 as u64, -4i64 as u64, -3i64 as u64),
(ebpf::SREM32_IMM, 13i64 as u64, -4i32 as u64, 1i64 as u64),
(ebpf::SREM64_IMM, 13i64 as u64, -4i64 as u64, 1i64 as u64),
(ebpf::LMUL32_IMM, -13i64 as u64, 4i32 as u64, -52i32 as u64),
(
ebpf::LMUL32_IMM,
-13i64 as u64,
4i32 as u64,
-52i32 as u32 as u64,
),
(ebpf::LMUL64_IMM, -13i64 as u64, 4i64 as u64, -52i64 as u64),
(ebpf::SHMUL64_IMM, -13i64 as u64, 4i64 as u64, -1i64 as u64),
(ebpf::SDIV32_IMM, -13i64 as u64, 4i32 as u64, -3i32 as u64),
(
ebpf::SDIV32_IMM,
-13i64 as u64,
4i32 as u64,
-3i32 as u32 as u64,
),
(ebpf::SDIV64_IMM, -13i64 as u64, 4i64 as u64, -3i64 as u64),
(ebpf::SREM32_IMM, -13i64 as u64, 4i32 as u64, -1i64 as u64),
(
ebpf::SREM32_IMM,
-13i64 as u64,
4i32 as u64,
-1i32 as u32 as u64,
),
(ebpf::SREM64_IMM, -13i64 as u64, 4i64 as u64, -1i64 as u64),
(ebpf::LMUL32_IMM, -13i64 as u64, -4i32 as u64, 52i32 as u64),
(ebpf::LMUL64_IMM, -13i64 as u64, -4i64 as u64, 52i64 as u64),
(ebpf::SHMUL64_IMM, -13i64 as u64, -4i64 as u64, 0i64 as u64),
(ebpf::SDIV32_IMM, -13i64 as u64, -4i32 as u64, 3i32 as u64),
(ebpf::SDIV64_IMM, -13i64 as u64, -4i64 as u64, 3i64 as u64),
(ebpf::SREM32_IMM, -13i64 as u64, -4i32 as u64, -1i64 as u64),
(
ebpf::SREM32_IMM,
-13i64 as u64,
-4i32 as u64,
-1i32 as u32 as u64,
),
(ebpf::SREM64_IMM, -13i64 as u64, -4i64 as u64, -1i64 as u64),
] {
LittleEndian::write_u32(&mut prog[4..], dst as u32);
Expand Down Expand Up @@ -3407,6 +3488,25 @@ fn test_err_fixed_stack_out_of_bound() {
);
}

#[test]
fn test_mov32_reg_truncating() {
let config = Config {
enable_sbpf_v2: false,
..Config::default()
};
test_interpreter_and_jit_asm!(
"
mov64 r1, -1
mov32 r0, r1
exit",
config,
[],
(),
TestContextObject::new(3),
ProgramResult::Ok(0xffffffff),
);
}

#[test]
fn test_lddw() {
let config = Config {
Expand Down

0 comments on commit ac39e9e

Please sign in to comment.