Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add CO-RE support for kernel modules #1300

Merged
merged 8 commits into from
Feb 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
101 changes: 0 additions & 101 deletions btf/btf.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,107 +396,6 @@ func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentia
return typeIDs, typesByName
}

// LoadKernelSpec returns the current kernel's BTF information.
//
// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
func LoadKernelSpec() (*Spec, error) {
spec, _, err := kernelSpec()
if err != nil {
return nil, err
}
return spec.Copy(), nil
}

var kernelBTF struct {
sync.RWMutex
spec *Spec
// True if the spec was read from an ELF instead of raw BTF in /sys.
fallback bool
}

// FlushKernelSpec removes any cached kernel type information.
func FlushKernelSpec() {
kernelBTF.Lock()
defer kernelBTF.Unlock()

kernelBTF.spec, kernelBTF.fallback = nil, false
}

func kernelSpec() (*Spec, bool, error) {
kernelBTF.RLock()
spec, fallback := kernelBTF.spec, kernelBTF.fallback
kernelBTF.RUnlock()

if spec == nil {
kernelBTF.Lock()
defer kernelBTF.Unlock()

spec, fallback = kernelBTF.spec, kernelBTF.fallback
}

if spec != nil {
return spec, fallback, nil
}

spec, fallback, err := loadKernelSpec()
if err != nil {
return nil, false, err
}

kernelBTF.spec, kernelBTF.fallback = spec, fallback
return spec, fallback, nil
}

func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
fh, err := os.Open("/sys/kernel/btf/vmlinux")
if err == nil {
defer fh.Close()

spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
return spec, false, err
}

file, err := findVMLinux()
if err != nil {
return nil, false, err
}
defer file.Close()

spec, err := LoadSpecFromReader(file)
return spec, true, err
}

// findVMLinux scans multiple well-known paths for vmlinux kernel images.
func findVMLinux() (*os.File, error) {
release, err := internal.KernelRelease()
if err != nil {
return nil, err
}

// use same list of locations as libbpf
// https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
locations := []string{
"/boot/vmlinux-%s",
"/lib/modules/%s/vmlinux-%[1]s",
"/lib/modules/%s/build/vmlinux",
"/usr/lib/modules/%s/kernel/vmlinux",
"/usr/lib/debug/boot/vmlinux-%s",
"/usr/lib/debug/boot/vmlinux-%s.debug",
"/usr/lib/debug/lib/modules/%s/vmlinux",
}

for _, loc := range locations {
file, err := os.Open(fmt.Sprintf(loc, release))
if errors.Is(err, os.ErrNotExist) {
continue
}
return file, err
}

return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
}

func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
buf := new(bufio.Reader)
for _, bo := range []binary.ByteOrder{
Expand Down
23 changes: 6 additions & 17 deletions btf/btf_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"runtime"
"sync"
Expand All @@ -23,16 +24,15 @@ func vmlinuxSpec(tb testing.TB) *Spec {

// /sys/kernel/btf was introduced in 341dfcf8d78e ("btf: expose BTF info
// through sysfs"), which shipped in Linux 5.4.
testutils.SkipOnOldKernel(tb, "5.4", "vmlinux BTF in sysfs")
if _, err := os.Stat("/sys/kernel/btf/vmlinux"); errors.Is(err, fs.ErrNotExist) {
tb.Skip("No /sys/kernel/btf/vmlinux")
}

spec, fallback, err := kernelSpec()
spec, err := LoadKernelSpec()
if err != nil {
tb.Fatal(err)
}
if fallback {
tb.Fatal("/sys/kernel/btf/vmlinux is not available")
}
return spec.Copy()
return spec
}

type specAndRawBTF struct {
Expand Down Expand Up @@ -320,17 +320,6 @@ func TestVerifierError(t *testing.T) {
}
}

func TestLoadKernelSpec(t *testing.T) {
if _, err := os.Stat("/sys/kernel/btf/vmlinux"); os.IsNotExist(err) {
t.Skip("/sys/kernel/btf/vmlinux not present")
}

_, err := LoadKernelSpec()
if err != nil {
t.Fatal("Can't load kernel spec:", err)
}
}

func TestGuessBTFByteOrder(t *testing.T) {
bo := guessRawBTFByteOrder(vmlinuxTestdataReader(t))
if bo != binary.LittleEndian {
Expand Down
51 changes: 39 additions & 12 deletions btf/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,33 @@ func (k coreKind) String() string {
}
}

type mergedSpec []*Spec

func (s mergedSpec) TypeByID(id TypeID) (Type, error) {
for _, sp := range s {
t, err := sp.TypeByID(id)
if err != nil {
if errors.Is(err, ErrNotFound) {
continue
}
return nil, err
}
return t, nil
}
return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s[0].imm.firstTypeID, ErrNotFound)
}

func (s mergedSpec) NamedTypes(name essentialName) []TypeID {
var typeIDs []TypeID
for _, sp := range s {
namedTypes := sp.imm.namedTypes[name]
if len(namedTypes) > 0 {
typeIDs = append(typeIDs, namedTypes...)
}
}
return typeIDs
}

// CORERelocate calculates changes needed to adjust eBPF instructions for differences
// in types.
//
Expand All @@ -177,17 +204,16 @@ func (k coreKind) String() string {
//
// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
// for relos[i].
func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) {
if target == nil {
var err error
target, _, err = kernelSpec()
if err != nil {
return nil, fmt.Errorf("load kernel spec: %w", err)
}
func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) {
if len(targets) == 0 {
lmb marked this conversation as resolved.
Show resolved Hide resolved
// Explicitly check for nil here since the argument used to be optional.
return nil, fmt.Errorf("targets must be provided")
}

if bo != target.imm.byteOrder {
return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder)
for _, target := range targets {
if bo != target.imm.byteOrder {
return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder)
}
}

type reloGroup struct {
Expand Down Expand Up @@ -229,14 +255,15 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder, re
group.indices = append(group.indices, i)
}

mergeTarget := mergedSpec(targets)
for localType, group := range relosByType {
localTypeName := localType.TypeName()
if localTypeName == "" {
return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
}

targets := target.imm.namedTypes[newEssentialName(localTypeName)]
fixups, err := coreCalculateFixups(group.relos, target, targets, bo)
targets := mergeTarget.NamedTypes(newEssentialName(localTypeName))
fixups, err := coreCalculateFixups(group.relos, &mergeTarget, targets, bo)
if err != nil {
return nil, fmt.Errorf("relocate %s: %w", localType, err)
}
Expand All @@ -259,7 +286,7 @@ var errIncompatibleTypes = errors.New("incompatible types")
//
// The best target is determined by scoring: the less poisoning we have to do
// the better the target is.
func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []TypeID, bo binary.ByteOrder) ([]COREFixup, error) {
func coreCalculateFixups(relos []*CORERelocation, targetSpec *mergedSpec, targets []TypeID, bo binary.ByteOrder) ([]COREFixup, error) {
bestScore := len(relos)
var bestFixups []COREFixup
for _, targetID := range targets {
Expand Down
4 changes: 2 additions & 2 deletions btf/core_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -592,7 +592,7 @@ func TestCORERelocation(t *testing.T) {
relos = append(relos, reloInfo.relo)
}

fixups, err := CORERelocate(relos, spec, spec.imm.byteOrder, spec.TypeID)
fixups, err := CORERelocate(relos, []*Spec{spec}, spec.imm.byteOrder, spec.TypeID)
if want := errs[name]; want != nil {
if !errors.Is(err, want) {
t.Fatal("Expected", want, "got", err)
Expand Down Expand Up @@ -744,7 +744,7 @@ func BenchmarkCORESkBuff(b *testing.B) {
b.ReportAllocs()

for i := 0; i < b.N; i++ {
_, err = CORERelocate([]*CORERelocation{relo}, spec, spec.imm.byteOrder, spec.TypeID)
_, err = CORERelocate([]*CORERelocation{relo}, []*Spec{spec}, spec.imm.byteOrder, spec.TypeID)
if err != nil {
b.Fatal(err)
}
Expand Down