diff --git a/external/bpf_conformance b/external/bpf_conformance index 0eed408b..3566334b 160000 --- a/external/bpf_conformance +++ b/external/bpf_conformance @@ -1 +1 @@ -Subproject commit 0eed408b10ba22848886d31bfc2a77fa8adfc5ed +Subproject commit 3566334b7dd99c305eb45f161a40b94441451f4e diff --git a/vm/ubpf_vm.c b/vm/ubpf_vm.c index 270ad73b..e2d71ae0 100644 --- a/vm/ubpf_vm.c +++ b/vm/ubpf_vm.c @@ -293,6 +293,17 @@ i32(uint64_t x) return x; } +/** + * @brief Sign extend immediate value to a signed 64-bit value. + * + * @param[in] immediate The signed 32-bit immediate value to sign extend. + * @return The sign extended 64-bit value. + */ +static int64_t i64(int32_t immediate) { + return (int64_t)immediate; + +} + #define IS_ALIGNED(x, a) (((uintptr_t)(x) & ((a)-1)) == 0) inline static uint64_t @@ -685,7 +696,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret pc += inst.offset; break; case EBPF_OP_JEQ_IMM: - if (reg[inst.dst] == inst.imm) { + if (reg[inst.dst] == (uint64_t)i64(inst.imm)) { pc += inst.offset; } break; @@ -705,7 +716,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JGT_IMM: - if (reg[inst.dst] > u32(inst.imm)) { + if (reg[inst.dst] > (uint64_t)i64(inst.imm)) { pc += inst.offset; } break; @@ -725,7 +736,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JGE_IMM: - if (reg[inst.dst] >= u32(inst.imm)) { + if (reg[inst.dst] >= (uint64_t)i64(inst.imm)) { pc += inst.offset; } break; @@ -745,7 +756,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JLT_IMM: - if (reg[inst.dst] < u32(inst.imm)) { + if (reg[inst.dst] < (uint64_t)i64(inst.imm)) { pc += inst.offset; } break; @@ -765,7 +776,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JLE_IMM: - if (reg[inst.dst] <= u32(inst.imm)) { + if (reg[inst.dst] <= (uint64_t)i64(inst.imm)) { pc += inst.offset; } break; @@ -785,7 +796,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JSET_IMM: - if (reg[inst.dst] & inst.imm) { + if (reg[inst.dst] & (uint64_t)i64(inst.imm)) { pc += inst.offset; } break; @@ -805,7 +816,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JNE_IMM: - if (reg[inst.dst] != inst.imm) { + if (reg[inst.dst] != (uint64_t)i64(inst.imm)) { pc += inst.offset; } break; @@ -825,7 +836,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JSGT_IMM: - if ((int64_t)reg[inst.dst] > inst.imm) { + if ((int64_t)reg[inst.dst] > i64(inst.imm)) { pc += inst.offset; } break; @@ -845,7 +856,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JSGE_IMM: - if ((int64_t)reg[inst.dst] >= inst.imm) { + if ((int64_t)reg[inst.dst] >= i64(inst.imm)) { pc += inst.offset; } break; @@ -865,7 +876,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JSLT_IMM: - if ((int64_t)reg[inst.dst] < inst.imm) { + if ((int64_t)reg[inst.dst] < i64(inst.imm)) { pc += inst.offset; } break; @@ -885,7 +896,7 @@ ubpf_exec(const struct ubpf_vm* vm, void* mem, size_t mem_len, uint64_t* bpf_ret } break; case EBPF_OP_JSLE_IMM: - if ((int64_t)reg[inst.dst] <= inst.imm) { + if ((int64_t)reg[inst.dst] <= i64(inst.imm)) { pc += inst.offset; } break;