Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

allow using binary.NativeEndian #1384

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions asm/instruction.go
Original file line number Diff line number Diff line change
Expand Up @@ -935,14 +935,19 @@ func (iter *InstructionIterator) Next() bool {
type bpfRegisters uint8

func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
buf := make([]byte, 2)
val := uint16(dst&0x0F)<<8 | uint16(src&0x0F)
switch bo {
case binary.LittleEndian:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not replacing this whole switch with bo.PutUint16 directly ?

Copy link
Collaborator

@ti-mo ti-mo Mar 22, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's the annoying bit: calling an interface method cannot be inlined, plus passing a pointer to an interface method causes it to escape. This function is called a ton, so it starts adding up.

We have some amount of discovery to do, because now binary.NativeEndian is a thing, we can probably eliminate these kind of switches/branches and replace them with binary.NativeEndian.PutUint16, since marshaling bpf insns is usually done before loading into the host kernel, and we'd almost always do that in native endianness.

Instruction.Marshal() is exported API, though, so we should have a look at public callers on Sourcegraph to see if anyone depends on marshaling asm.Instructions in arbitrary endiannesses (e.g. for converting ELFs from LE to BE).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah yeah, that's too bad

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Instruction.Marshal() is exported API, though, so we should have a look at public callers on Sourcegraph to see if anyone depends on marshaling asm.Instructions in arbitrary endiannesses

I'd rather not get into the habit of hardcoding NativeEndian, it leads to not thinking about endianness at callsites and that tends to cause subtle bugs. Compared to that the type switch isn't bad at all imo.

return bpfRegisters((src << 4) | (dst & 0xF)), nil
binary.LittleEndian.PutUint16(buf, val)
case binary.BigEndian:
return bpfRegisters((dst << 4) | (src & 0xF)), nil
binary.BigEndian.PutUint16(buf, val)
case binary.NativeEndian:
binary.NativeEndian.PutUint16(buf, val)
default:
return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
}
return bpfRegisters(buf[0]<<4 | buf[1]), nil
}

// IsUnreferencedSymbol returns true if err was caused by
Expand Down
31 changes: 31 additions & 0 deletions asm/instruction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -431,3 +431,34 @@ func TestLongJumpPatching(t *testing.T) {
t.Errorf("Expected offset to be 3, got %d", insns[1].Constant)
}
}

func TestNewBPFRegisters(t *testing.T) {
for _, test := range []struct {
src, dst Register
order binary.ByteOrder
result bpfRegisters
}{
{
R1, R2,
binary.LittleEndian,
0x12,
},
{
RFP, R2,
binary.BigEndian,
0x2a,
},
} {
t.Run(fmt.Sprintf("%s %s:%s", test.order, test.src, test.dst), func(t *testing.T) {
have, err := newBPFRegisters(test.dst, test.src, test.order)
qt.Assert(t, qt.IsNil(err))
qt.Assert(t, qt.Equals(have, test.result))
})
}

allocs := testing.AllocsPerRun(1, func() {
newBPFRegisters(R1, RFP, binary.NativeEndian)
})

qt.Assert(t, qt.Equals(allocs, 0))
}
2 changes: 1 addition & 1 deletion btf/btf.go
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ func (s *Spec) TypeByName(name string, typ interface{}) error {
// Types from base are used to resolve references in the split BTF.
// The returned Spec only contains types from the split BTF, not from the base.
func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
return loadRawSpec(r, internal.NativeEndian, base)
return loadRawSpec(r, binary.NativeEndian, base)
}

// TypesIterator iterates over types of a given spec.
Expand Down
3 changes: 2 additions & 1 deletion btf/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"strings"

"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
)

// Code in this file is derived from libbpf, which is available under a BSD
Expand Down Expand Up @@ -211,7 +212,7 @@ func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder,
}

for _, target := range targets {
if bo != target.imm.byteOrder {
if !internal.ByteOrderEqual(bo, target.imm.byteOrder) {
return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder)
}
}
Expand Down
6 changes: 3 additions & 3 deletions btf/core_reloc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func TestCORERelocationLoad(t *testing.T) {
t.Fatal(err)
}

if spec.ByteOrder != internal.NativeEndian {
if !internal.IsNativeEndian(spec.ByteOrder) {
return
}

Expand Down Expand Up @@ -76,7 +76,7 @@ func TestCORERelocationRead(t *testing.T) {
t.Fatal(err)
}

if spec.ByteOrder != internal.NativeEndian {
if !internal.IsNativeEndian(spec.ByteOrder) {
return
}

Expand Down Expand Up @@ -126,7 +126,7 @@ func TestLD64IMMReloc(t *testing.T) {
t.Fatal(err)
}

if spec.ByteOrder != internal.NativeEndian {
if !internal.IsNativeEndian(spec.ByteOrder) {
return
}

Expand Down
8 changes: 4 additions & 4 deletions btf/core_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package btf

import (
"encoding/binary"
"errors"
"fmt"
"os"
Expand All @@ -10,7 +11,6 @@ import (

"github.com/google/go-cmp/cmp"

"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/testutils"

"github.com/go-quicktest/qt"
Expand Down Expand Up @@ -637,7 +637,7 @@ func TestCOREReloFieldSigned(t *testing.T) {
relo := &CORERelocation{
typ, coreAccessor{0}, reloFieldSigned, 0,
}
fixup, err := coreCalculateFixup(relo, &Void{}, 0, internal.NativeEndian)
fixup, err := coreCalculateFixup(relo, &Void{}, 0, binary.NativeEndian)
qt.Assert(t, qt.IsTrue(fixup.poison))
qt.Assert(t, qt.IsNil(err))
})
Expand All @@ -647,7 +647,7 @@ func TestCOREReloFieldSigned(t *testing.T) {
relo := &CORERelocation{
&Array{}, coreAccessor{0}, reloFieldSigned, 0,
}
_, err := coreCalculateFixup(relo, &Array{}, 0, internal.NativeEndian)
_, err := coreCalculateFixup(relo, &Array{}, 0, binary.NativeEndian)
qt.Assert(t, qt.ErrorIs(err, errNoSignedness))
})
}
Expand All @@ -664,7 +664,7 @@ func TestCOREReloFieldShiftU64(t *testing.T) {
{typ, coreAccessor{0, 0}, reloFieldLShiftU64, 1},
} {
t.Run(relo.kind.String(), func(t *testing.T) {
_, err := coreCalculateFixup(relo, typ, 1, internal.NativeEndian)
_, err := coreCalculateFixup(relo, typ, 1, binary.NativeEndian)
qt.Assert(t, qt.ErrorIs(err, errUnsizedType))
})
}
Expand Down
12 changes: 6 additions & 6 deletions btf/ext_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,8 +408,8 @@ func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error {
TypeID: id,
}
buf := make([]byte, FuncInfoSize)
internal.NativeEndian.PutUint32(buf, bfi.InsnOff)
internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID))
binary.NativeEndian.PutUint32(buf, bfi.InsnOff)
binary.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID))
_, err = w.Write(buf)
return err
}
Expand Down Expand Up @@ -617,10 +617,10 @@ func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error {
}

buf := make([]byte, LineInfoSize)
internal.NativeEndian.PutUint32(buf, bli.InsnOff)
internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff)
internal.NativeEndian.PutUint32(buf[8:], bli.LineOff)
internal.NativeEndian.PutUint32(buf[12:], bli.LineCol)
binary.NativeEndian.PutUint32(buf, bli.InsnOff)
binary.NativeEndian.PutUint32(buf[4:], bli.FileNameOff)
binary.NativeEndian.PutUint32(buf[8:], bli.LineOff)
binary.NativeEndian.PutUint32(buf[12:], bli.LineCol)
_, err = w.Write(buf)
return err
}
Expand Down
7 changes: 3 additions & 4 deletions btf/ext_info_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@ package btf

import (
"bytes"
"encoding/binary"
"strings"
"testing"

"github.com/cilium/ebpf/internal"
)

func TestParseExtInfoBigRecordSize(t *testing.T) {
Expand All @@ -15,11 +14,11 @@ func TestParseExtInfoBigRecordSize(t *testing.T) {
t.Fatal(err)
}

if _, err := parseFuncInfos(rd, internal.NativeEndian, table); err == nil {
if _, err := parseFuncInfos(rd, binary.NativeEndian, table); err == nil {
t.Error("Parsing func info with large record size doesn't return an error")
}

if _, err := parseLineInfos(rd, internal.NativeEndian, table); err == nil {
if _, err := parseLineInfos(rd, binary.NativeEndian, table); err == nil {
t.Error("Parsing line info with large record size doesn't return an error")
}
}
10 changes: 4 additions & 6 deletions btf/fuzz_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,11 @@ import (
"fmt"
"io"
"testing"

"github.com/cilium/ebpf/internal"
)

func FuzzSpec(f *testing.F) {
var buf bytes.Buffer
err := binary.Write(&buf, internal.NativeEndian, &btfHeader{
err := binary.Write(&buf, binary.NativeEndian, &btfHeader{
Magic: btfMagic,
Version: 1,
HdrLen: uint32(binary.Size(btfHeader{})),
Expand All @@ -26,7 +24,7 @@ func FuzzSpec(f *testing.F) {
t.Skip("data is too short")
}

spec, err := loadRawSpec(bytes.NewReader(data), internal.NativeEndian, nil)
spec, err := loadRawSpec(bytes.NewReader(data), binary.NativeEndian, nil)
if err != nil {
if spec != nil {
t.Fatal("spec is not nil")
Expand All @@ -47,7 +45,7 @@ func FuzzSpec(f *testing.F) {

func FuzzExtInfo(f *testing.F) {
var buf bytes.Buffer
err := binary.Write(&buf, internal.NativeEndian, &btfExtHeader{
err := binary.Write(&buf, binary.NativeEndian, &btfExtHeader{
Magic: btfMagic,
Version: 1,
HdrLen: uint32(binary.Size(btfExtHeader{})),
Expand All @@ -70,7 +68,7 @@ func FuzzExtInfo(f *testing.F) {
emptySpec := specFromTypes(t, nil)
emptySpec.strings = table

info, err := loadExtInfos(bytes.NewReader(data), internal.NativeEndian, emptySpec)
info, err := loadExtInfos(bytes.NewReader(data), binary.NativeEndian, emptySpec)
if err != nil {
if info != nil {
t.Fatal("info is not nil")
Expand Down
3 changes: 2 additions & 1 deletion btf/handle.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package btf

import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math"
Expand Down Expand Up @@ -113,7 +114,7 @@ func (h *Handle) Spec(base *Spec) (*Spec, error) {
return nil, fmt.Errorf("missing base types")
}

return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base)
return loadRawSpec(bytes.NewReader(btfBuffer), binary.NativeEndian, base)
}

// Close destroys the handle.
Expand Down
5 changes: 3 additions & 2 deletions btf/kernel.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package btf

import (
"encoding/binary"
"errors"
"fmt"
"os"
Expand Down Expand Up @@ -98,7 +99,7 @@ func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
if err == nil {
defer fh.Close()

spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
spec, err := loadRawSpec(fh, binary.NativeEndian, nil)
return spec, false, err
}

Expand All @@ -124,7 +125,7 @@ func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) {
}
defer fh.Close()

return loadRawSpec(fh, internal.NativeEndian, base)
return loadRawSpec(fh, binary.NativeEndian, base)
}

// findVMLinux scans multiple well-known paths for vmlinux kernel images.
Expand Down
4 changes: 2 additions & 2 deletions btf/marshal.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ type MarshalOptions struct {
// KernelMarshalOptions will generate BTF suitable for the current kernel.
func KernelMarshalOptions() *MarshalOptions {
return &MarshalOptions{
Order: internal.NativeEndian,
Order: binary.NativeEndian,
StripFuncLinkage: haveFuncLinkage() != nil,
ReplaceEnum64: haveEnum64() != nil,
}
Expand Down Expand Up @@ -149,7 +149,7 @@ func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) {
}

if opts == nil {
opts = &MarshalOptions{Order: internal.NativeEndian}
opts = &MarshalOptions{Order: binary.NativeEndian}
}

// Reserve space for the BTF header.
Expand Down
11 changes: 5 additions & 6 deletions btf/marshal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"github.com/go-quicktest/qt"
"github.com/google/go-cmp/cmp"

"github.com/cilium/ebpf/internal"
"github.com/cilium/ebpf/internal/testutils"
)

Expand All @@ -31,11 +30,11 @@ func TestBuilderMarshal(t *testing.T) {
qt.Assert(t, qt.IsNil(err))

cpy := *b
buf, err := b.Marshal(nil, &MarshalOptions{Order: internal.NativeEndian})
buf, err := b.Marshal(nil, &MarshalOptions{Order: binary.NativeEndian})
qt.Assert(t, qt.IsNil(err))
qt.Assert(t, qt.CmpEquals(b, &cpy, cmp.AllowUnexported(*b)), qt.Commentf("Marshaling should not change Builder state"))

have, err := loadRawSpec(bytes.NewReader(buf), internal.NativeEndian, nil)
have, err := loadRawSpec(bytes.NewReader(buf), binary.NativeEndian, nil)
qt.Assert(t, qt.IsNil(err), qt.Commentf("Couldn't parse BTF"))
qt.Assert(t, qt.DeepEquals(have.imm.types, want))
}
Expand Down Expand Up @@ -129,12 +128,12 @@ func TestMarshalEnum64(t *testing.T) {
b, err := NewBuilder([]Type{enum})
qt.Assert(t, qt.IsNil(err))
buf, err := b.Marshal(nil, &MarshalOptions{
Order: internal.NativeEndian,
Order: binary.NativeEndian,
ReplaceEnum64: true,
})
qt.Assert(t, qt.IsNil(err))

spec, err := loadRawSpec(bytes.NewReader(buf), internal.NativeEndian, nil)
spec, err := loadRawSpec(bytes.NewReader(buf), binary.NativeEndian, nil)
qt.Assert(t, qt.IsNil(err))

var have *Union
Expand Down Expand Up @@ -196,7 +195,7 @@ func specFromTypes(tb testing.TB, types []Type) *Spec {
tb.Helper()

btf := marshalNativeEndian(tb, types)
spec, err := loadRawSpec(bytes.NewReader(btf), internal.NativeEndian, nil)
spec, err := loadRawSpec(bytes.NewReader(btf), binary.NativeEndian, nil)
qt.Assert(tb, qt.IsNil(err))

return spec
Expand Down
2 changes: 1 addition & 1 deletion collection.go
Original file line number Diff line number Diff line change
Expand Up @@ -623,7 +623,7 @@ func resolveKconfig(m *MapSpec) error {
if err != nil {
return fmt.Errorf("getting kernel version: %w", err)
}
internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel())
binary.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel())

case "LINUX_HAS_SYSCALL_WRAPPER":
integer, ok := v.Type.(*btf.Int)
Expand Down
Loading