From 1ca4828035ef72c2db79ae6efaa273a24e4eb5d2 Mon Sep 17 00:00:00 2001 From: Anastasios Papagiannis Date: Mon, 6 Feb 2023 17:53:12 +0000 Subject: [PATCH 1/8] matchBinaries fixes There are some issues regarding matchBinaries. In this patch we still support up to 4 values in matchBinaries. Increasing this will be a followup. For matchBinaries, we use names_map that has binary names to id translations. During exec events we check if the binary name exists in this map and if that is true we keep that id in the execve_map_value struct. Now we write in the matchBinaries selectors the value 1 everywhere. To fix that we introduce a single global variable that get a new unique ID for each binary specified. We cannot use a separate names_map for each kprobe as they should also be shared with the execve kprobe. We keep a single names_map for all kprobes. Signed-off-by: Anastasios Papagiannis --- bpf/process/bpf_execve_event.c | 3 ++- pkg/selectors/kernel.go | 18 ++++++++----- pkg/selectors/selectors.go | 28 ++++++++++++++++++++ pkg/sensors/tracing/binaryentry.go | 20 +++------------ pkg/sensors/tracing/generickprobe.go | 38 ++++++++++------------------ 5 files changed, 58 insertions(+), 49 deletions(-) diff --git a/bpf/process/bpf_execve_event.c b/bpf/process/bpf_execve_event.c index 82311f511d..b87d0ac742 100644 --- a/bpf/process/bpf_execve_event.c +++ b/bpf/process/bpf_execve_event.c @@ -135,7 +135,8 @@ event_filename_builder(void *ctx, struct msg_process *curr, __u32 curr_pid, if (!heap) return bin; - probe_read_str(heap->pathname, 255, filename); + memset(heap->pathname, 0, 256); + probe_read_str(heap->pathname, size, filename); value = map_lookup_elem(&names_map, heap->pathname); if (value) return *value; diff --git a/pkg/selectors/kernel.go b/pkg/selectors/kernel.go index 28cb4d6a7f..90a554b401 100644 --- a/pkg/selectors/kernel.go +++ b/pkg/selectors/kernel.go @@ -6,6 +6,7 @@ package selectors import ( "encoding/binary" "fmt" + "math" "strconv" "strings" @@ -632,16 +633,21 @@ func parseMatchCapabilityChanges(k *KernelSelectorState, actions []v1alpha1.Capa return nil } -func parseMatchBinary(k *KernelSelectorState, index uint32, b *v1alpha1.BinarySelector) error { +func parseMatchBinary(k *KernelSelectorState, b *v1alpha1.BinarySelector) error { op, err := selectorOp(b.Operator) if err != nil { return fmt.Errorf("matchpid error: %w", err) } WriteSelectorUint32(k, op) - WriteSelectorUint32(k, index) - WriteSelectorUint32(k, index) - WriteSelectorUint32(k, index) - WriteSelectorUint32(k, index) + if len(b.Values) > 4 { + return fmt.Errorf("Only support up to 4 values in MatchBinary") + } + for _, s := range b.Values { + WriteSelectorUint32(k, k.AddBinaryName(s)) + } + for l := len(b.Values); l < 4; l++ { + WriteSelectorUint32(k, math.MaxUint32) + } return nil } @@ -658,7 +664,7 @@ func parseMatchBinaries(k *KernelSelectorState, binarys []v1alpha1.BinarySelecto WriteSelectorUint32(k, 0) WriteSelectorUint32(k, 0) } else { - if err := parseMatchBinary(k, 1, &binarys[0]); err != nil { + if err := parseMatchBinary(k, &binarys[0]); err != nil { return err } } diff --git a/pkg/selectors/selectors.go b/pkg/selectors/selectors.go index 4ddb4cfba8..9a87356392 100644 --- a/pkg/selectors/selectors.go +++ b/pkg/selectors/selectors.go @@ -5,6 +5,14 @@ package selectors import ( "encoding/binary" + "sync" +) + +// as we use a single names_map for all kprobes, so we have to use +// a global variable to assign values to binary names +var ( + binMu sync.Mutex + binIdx uint32 = 1 ) type KernelSelectorState struct { @@ -13,6 +21,26 @@ type KernelSelectorState struct { // valueMaps are used to populate value maps for InMap and NotInMap operators valueMaps []map[[8]byte]struct{} + + // matchBinaries mappings + binVals map[uint32]string +} + +func (k *KernelSelectorState) AddBinaryName(binary string) uint32 { + if k.binVals == nil { + k.binVals = make(map[uint32]string) + } + + binMu.Lock() + defer binMu.Unlock() + idx := binIdx + binIdx++ + k.binVals[idx] = binary + return idx +} + +func (k *KernelSelectorState) GetBinaryMappings() map[uint32]string { + return k.binVals } func (k *KernelSelectorState) Buffer() [4096]byte { diff --git a/pkg/sensors/tracing/binaryentry.go b/pkg/sensors/tracing/binaryentry.go index 6eb534458f..f303037489 100644 --- a/pkg/sensors/tracing/binaryentry.go +++ b/pkg/sensors/tracing/binaryentry.go @@ -3,31 +3,18 @@ package tracing import ( - "fmt" - "unsafe" - - "github.com/cilium/tetragon/pkg/bpf" + "github.com/cilium/ebpf" ) type BinaryMapKey struct { PathName [256]byte } -func (k *BinaryMapKey) String() string { return fmt.Sprintf("pathname: %s", string(k.PathName[:])) } -func (k *BinaryMapKey) NewValue() bpf.MapValue { return &BinaryMapValue{} } -func (k *BinaryMapKey) GetKeyPtr() unsafe.Pointer { return unsafe.Pointer(k) } -func (k *BinaryMapKey) DeepCopyMapKey() bpf.MapKey { return &BinaryMapKey{} } - type BinaryMapValue struct { Id uint32 } -func (v *BinaryMapValue) String() string { return fmt.Sprintf("ID: %d", v.Id) } -func (v *BinaryMapValue) NewValue() bpf.MapValue { return &BinaryMapValue{} } -func (v *BinaryMapValue) GetValuePtr() unsafe.Pointer { return unsafe.Pointer(v) } -func (v *BinaryMapValue) DeepCopyMapValue() bpf.MapValue { return &BinaryMapValue{} } - -func writeBinaryMap(id int, path string, m *bpf.Map) error { +func writeBinaryMap(m *ebpf.Map, id uint32, path string) error { p := [256]byte{0} copy(p[:], path) @@ -37,6 +24,5 @@ func writeBinaryMap(id int, path string, m *bpf.Map) error { v := &BinaryMapValue{ Id: uint32(id), } - err := m.Update(k, v) - return err + return m.Update(k, v, ebpf.UpdateAny) } diff --git a/pkg/sensors/tracing/generickprobe.go b/pkg/sensors/tracing/generickprobe.go index 6da1aad301..3b8b07a51d 100644 --- a/pkg/sensors/tracing/generickprobe.go +++ b/pkg/sensors/tracing/generickprobe.go @@ -186,17 +186,6 @@ func getMetaValue(arg *v1alpha1.KProbeArg) (int, error) { return meta, nil } -var binaryNames []v1alpha1.BinarySelector - -func initBinaryNames(spec *v1alpha1.KProbeSpec) error { - for _, s := range spec.Selectors { - for _, b := range s.MatchBinaries { - binaryNames = append(binaryNames, b) - } - } - return nil -} - func createMultiKprobeSensor(sensorPath string, multiIDs, multiRetIDs []idtable.EntryID) ([]*program.Program, []*program.Map) { var progs []*program.Program var maps []*program.Map @@ -392,11 +381,6 @@ func createGenericKprobeSensor(name string, kprobes []v1alpha1.KProbeSpec) (*sen } } - // Parse Binary Name into kernel data structures - if err := initBinaryNames(f); err != nil { - return nil, err - } - hasOverride := selectors.HasOverride(f) if hasOverride && !bpf.HasOverrideHelper() { return nil, fmt.Errorf("Error override_return bpf helper not available") @@ -614,14 +598,14 @@ func loadSingleKprobeSensor(id idtable.EntryID, bpfDir, mapDir string, load *pro return err } - m, err := bpf.OpenMap(filepath.Join(mapDir, base.NamesMap.Name)) + m, err := ebpf.LoadPinnedMap(filepath.Join(mapDir, base.NamesMap.Name), nil) if err != nil { return err } - for i, b := range binaryNames { - for _, path := range b.Values { - writeBinaryMap(i+1, path, m) - } + defer m.Close() + + for i, path := range gk.loadArgs.selectors.GetBinaryMappings() { + writeBinaryMap(m, i, path) } return err @@ -662,13 +646,17 @@ func loadMultiKprobeSensor(ids []idtable.EntryID, bpfDir, mapDir string, load *p return err } - m, err := bpf.OpenMap(filepath.Join(mapDir, base.NamesMap.Name)) + m, err := ebpf.LoadPinnedMap(filepath.Join(mapDir, base.NamesMap.Name), nil) if err != nil { return err } - for i, b := range binaryNames { - for _, path := range b.Values { - writeBinaryMap(i+1, path, m) + defer m.Close() + + for _, id := range ids { + if gk, err := genericKprobeTableGet(id); err == nil { + for i, path := range gk.loadArgs.selectors.GetBinaryMappings() { + writeBinaryMap(m, i, path) + } } } From 87f94dbee3b256c304717d247414e35b626bd97f Mon Sep 17 00:00:00 2001 From: Anastasios Papagiannis Date: Wed, 8 Feb 2023 11:18:35 +0000 Subject: [PATCH 2/8] Increase the number of values in matchBinaries Before that we had a limit to 4 values in the matchBinaries selector. This patch uses a map per kprobe (sel_names_map) to remove this limitation. The current limit is 256 values (the size of the map) and should be enough for all cases. As a follow-up we can also clear entries from the (shared) names_map when we remove kprobes. For now we also increase the size of that map to 256 entries. This means that we can define up to 256 unique binary names among all matchBinaries selectors. Signed-off-by: Anastasios Papagiannis --- bpf/lib/generic.h | 2 +- bpf/process/types/basic.h | 65 +++++++++++------------- pkg/selectors/kernel.go | 34 ++++--------- pkg/selectors/kernel_test.go | 29 +++-------- pkg/selectors/selectors.go | 45 ++++++++++++---- pkg/sensors/tracing/generickprobe.go | 10 +++- pkg/sensors/tracing/generictracepoint.go | 3 ++ pkg/sensors/tracing/selectors.go | 35 +++++++++++++ 8 files changed, 130 insertions(+), 93 deletions(-) diff --git a/bpf/lib/generic.h b/bpf/lib/generic.h index 4eed059900..67839bb0c8 100644 --- a/bpf/lib/generic.h +++ b/bpf/lib/generic.h @@ -70,7 +70,7 @@ struct names_map_key { struct { __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 64); + __uint(max_entries, 256); /* maximum number of binary names for all matchBinary selectors */ __type(key, struct names_map_key); __type(value, __u32); } names_map SEC(".maps"); diff --git a/bpf/process/types/basic.h b/bpf/process/types/basic.h index 2950e81b49..d30e3a190a 100644 --- a/bpf/process/types/basic.h +++ b/bpf/process/types/basic.h @@ -83,12 +83,6 @@ struct selector_action { __u32 act[]; }; -struct selector_binary_filter { - __u32 arglen; - __u32 op; - __u32 index[4]; -}; - struct selector_arg_filter { __u32 arglen; __u32 index; @@ -930,14 +924,33 @@ static inline __attribute__((always_inline)) size_t type_to_min_size(int type, #define INDEX_MASK 0x3ff +/* + * For matchBinaries we use two maps: + * 1. names_map: global (for all sensors) keeps a mapping from names -> ids + * 2. sel_names_map: per-sensor: keeps a mapping from id -> selector val + * + * At exec time, we check names_map and set ->binary in execve_map equal to + * the id stored in names_map. Assuming the binary name exists in the map, + * otherwise binary is 0. + * + * When we check the selectors, use ->binary to index sel_names_map and decide + * whether the selector matches or not. + */ +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 256); + __type(key, __u32); + __type(value, __u32); +} sel_names_map SEC(".maps"); + static inline __attribute__((always_inline)) int selector_arg_offset(__u8 *f, struct msg_generic_kprobe *e, __u32 selidx) { struct selector_arg_filter *filter; - struct selector_binary_filter *binary; long seloff, argoff, pass; - __u32 index; + __u32 index, *op; char *args; + __u32 max = 0xffffffff; // UINT32_MAX seloff = 4; /* start of the relative offsets */ seloff += (selidx * 4); /* relative offset for this selector */ @@ -958,42 +971,26 @@ selector_arg_offset(__u8 *f, struct msg_generic_kprobe *e, __u32 selidx) /* skip the matchCapabilityChanges by reading its length */ seloff += *(__u32 *)((__u64)f + (seloff & INDEX_MASK)); - /* seloff must leave space for verifier to walk strings - * so we set inside 4k maximum. Advance to binary matches. - */ - seloff &= INDEX_MASK; - binary = (struct selector_binary_filter *)&f[seloff]; - - /* Run binary name filters - */ - if (binary->op == op_filter_in) { + op = map_lookup_elem(&sel_names_map, &max); + if (op && *op == op_filter_in) { struct execve_map_value *execve; bool walker = 0; - __u32 ppid; + __u32 ppid, bin_key, *bin_val; execve = event_find_curr(&ppid, &walker); if (!execve) return 0; - if (binary->index[0] != execve->binary && - binary->index[1] != execve->binary && - binary->index[2] != execve->binary && - binary->index[3] != execve->binary) - return 0; - } - /* Advance to matchArgs we use fixed size binary filters for now. It helps - * the verifier and its still unclear how many entries are needed. At any - * rate each entry is a uint32 now and we should really be able to pack - * an entry into a byte which would give us 4x more entries. - */ - seloff += sizeof(struct selector_binary_filter); - if (seloff > 3800) { - return 0; + bin_key = execve->binary; + bin_val = map_lookup_elem(&sel_names_map, &bin_key); + if (!bin_val) + return 0; + if (*bin_val != 1) + return 0; } /* Making binary selectors fixes size helps on some kernels */ - asm volatile("%[seloff] &= 0xfff;\n" ::[seloff] "+r"(seloff) - :); + seloff &= INDEX_MASK; filter = (struct selector_arg_filter *)&f[seloff]; if (filter->arglen <= 4) // no filters diff --git a/pkg/selectors/kernel.go b/pkg/selectors/kernel.go index 90a554b401..c03de6fd62 100644 --- a/pkg/selectors/kernel.go +++ b/pkg/selectors/kernel.go @@ -6,7 +6,6 @@ package selectors import ( "encoding/binary" "fmt" - "math" "strconv" "strings" @@ -636,39 +635,30 @@ func parseMatchCapabilityChanges(k *KernelSelectorState, actions []v1alpha1.Capa func parseMatchBinary(k *KernelSelectorState, b *v1alpha1.BinarySelector) error { op, err := selectorOp(b.Operator) if err != nil { - return fmt.Errorf("matchpid error: %w", err) + return fmt.Errorf("matchBinary error: %w", err) } - WriteSelectorUint32(k, op) - if len(b.Values) > 4 { - return fmt.Errorf("Only support up to 4 values in MatchBinary") + if op != selectorOpIn { + return fmt.Errorf("matchBinary error: Only In operator is supported") } + k.SetBinaryOp(op) for _, s := range b.Values { - WriteSelectorUint32(k, k.AddBinaryName(s)) - } - for l := len(b.Values); l < 4; l++ { - WriteSelectorUint32(k, math.MaxUint32) + if len(s) > 255 { + return fmt.Errorf("matchBinary error: Binary names > 255 chars do not supported") + } + k.AddBinaryName(s) } return nil } func parseMatchBinaries(k *KernelSelectorState, binarys []v1alpha1.BinarySelector) error { - loff := AdvanceSelectorLength(k) if len(binarys) > 1 { return fmt.Errorf("Only support single binary selector") - } else if len(binarys) == 0 { - // To aid verifier we always zero in binary fields to allow - // BPF to assume the values exist. - WriteSelectorUint32(k, 0) - WriteSelectorUint32(k, 0) - WriteSelectorUint32(k, 0) - WriteSelectorUint32(k, 0) - WriteSelectorUint32(k, 0) - } else { - if err := parseMatchBinary(k, &binarys[0]); err != nil { + } + for _, s := range binarys { + if err := parseMatchBinary(k, &s); err != nil { return err } } - WriteSelectorLength(k, loff) return nil } @@ -721,7 +711,6 @@ func parseSelector( // [matchCapabilities] // [matchNamespaceChanges] // [matchCapabilityChanges] -// [matchBinaries] // [matchArgs] // [matchActions] // @@ -730,7 +719,6 @@ func parseSelector( // matchCapabilities := [length][CAx][CAy]...[CAn] // matchNamespaceChanges := [length][NCx][NCy]...[NCn] // matchCapabilityChanges := [length][CAx][CAy]...[CAn] -// matchBinaries := [length][op][Index]...[Index] // matchArgs := [length][ARGx][ARGy]...[ARGn] // PIDn := [op][flags][nValues][v1]...[vn] // Argn := [index][op][valueGen] diff --git a/pkg/selectors/kernel_test.go b/pkg/selectors/kernel_test.go index 19d1465103..6737ff3322 100644 --- a/pkg/selectors/kernel_test.go +++ b/pkg/selectors/kernel_test.go @@ -397,11 +397,11 @@ func TestMultipleSelectorsExample(t *testing.T) { expectedLen += 4 } - // vaue absolute offset explanation + // value absolute offset explanation expU32Push(2) // off: 0 number of selectors expU32Push(8) // off: 4 relative ofset of 1st selector (4 + 8 = 12) - expU32Push(104) // off: 8 relative ofset of 2nd selector (8 + 104 = 112) - expU32Push(100) // off: 12 selector1: length (100 + 12 = 112) + expU32Push(80) // off: 8 relative ofset of 2nd selector (8 + 80 = 88) + expU32Push(76) // off: 12 selector1: length (76 + 12 = 112) expU32Push(24) // off: 16 selector1: MatchPIDs: len expU32Push(selectorOpNotIn) // off: 20 selector1: MatchPIDs[0]: op expU32Push(0) // off: 24 selector1: MatchPIDs[0]: flags @@ -412,12 +412,6 @@ func TestMultipleSelectorsExample(t *testing.T) { expU32Push(4) // off: 44 selector1: MatchCapabilities: len expU32Push(4) // off: 48 selector1: MatchNamespaceChanges: len expU32Push(4) // off: 52 selector1: MatchCapabilityChanges: len - expU32Push(4 + 5*4) // off: 56 selector1: matchBinaries: len - expU32Push(0) // off: 60 selector1: matchBinaries: 0 - expU32Push(0) // off: 64 selector1: matchBinaries: 1 - expU32Push(0) // off: 68 selector1: matchBinaries: 2 - expU32Push(0) // off: 72 selector1: matchBinaries: 3 - expU32Push(0) // off: 76 selector1: matchBinaries: 4 expU32Push(28) // off: 80 selector1: matchArgs: len expU32Push(1) // off: 84 selector1: matchArgs: arg0: index expU32Push(selectorOpEQ) // off: 88 selector1: matchArgs: arg0: operator @@ -426,7 +420,7 @@ func TestMultipleSelectorsExample(t *testing.T) { expU32Push(10) // off: 100 selector1: matchArgs: arg0: val0: 10 expU32Push(20) // off: 104 selector1: matchArgs: arg0: val1: 20 expU32Push(4) // off: 108 selector1: matchActions: length - expU32Push(100) // off: 112 selector2: length + expU32Push(76) // off: 112 selector2: length // ... everything else should be the same as selector1 ... if bytes.Equal(expected[:expectedLen], b[:expectedLen]) == false { @@ -443,11 +437,11 @@ func TestInitKernelSelectors(t *testing.T) { } expected_selsize_small := []byte{ - 0xfe, 0x00, 0x00, 0x00, // size = pids + binarys + args + actions + namespaces + capabilities + 4 + 0xe6, 0x00, 0x00, 0x00, // size = pids + args + actions + namespaces + capabilities + 4 } expected_selsize_large := []byte{ - 0x1a, 0x01, 0x00, 0x00, // size = pids + binarys + args + actions + namespaces + namespacesChanges + capabilities + capabilityChanges + 4 + 0x02, 0x01, 0x00, 0x00, // size = pids + args + actions + namespaces + namespacesChanges + capabilities + capabilityChanges + 4 } expected_filters := []byte{ @@ -531,17 +525,6 @@ func TestInitKernelSelectors(t *testing.T) { } expected_last := []byte{ - // binaryNames header - 24, 0x00, 0x00, 0x00, // size = sizeof(uint32) * 4 - - // binaryNames Ids, always has 4 to ease verify complexity and - // zeroes unused entries. - 0x00, 0x00, 0x00, 0x00, // op - 0x00, 0x00, 0x00, 0x00, // index0 - 0x00, 0x00, 0x00, 0x00, // index1 - 0x00, 0x00, 0x00, 0x00, // index2 - 0x00, 0x00, 0x00, 0x00, // index3 - // arg header 54, 0x00, 0x00, 0x00, // size = sizeof(arg2) + sizeof(arg1) + 4 diff --git a/pkg/selectors/selectors.go b/pkg/selectors/selectors.go index 9a87356392..e3071b1d07 100644 --- a/pkg/selectors/selectors.go +++ b/pkg/selectors/selectors.go @@ -13,6 +13,8 @@ import ( var ( binMu sync.Mutex binIdx uint32 = 1 + // contains all entries for the names_map + binVals = make(map[string]uint32) ) type KernelSelectorState struct { @@ -23,24 +25,47 @@ type KernelSelectorState struct { valueMaps []map[[8]byte]struct{} // matchBinaries mappings - binVals map[uint32]string + op uint32 + newBinVals map[uint32]string // these should be added in the names_map + selNamesMap map[uint32]uint32 // these will be used for the sel_names_map + initOnce sync.Once } -func (k *KernelSelectorState) AddBinaryName(binary string) uint32 { - if k.binVals == nil { - k.binVals = make(map[uint32]string) - } +func (k *KernelSelectorState) SetBinaryOp(op uint32) { + k.op = op +} + +func (k *KernelSelectorState) GetBinaryOp() uint32 { + return k.op +} + +func (k *KernelSelectorState) AddBinaryName(binary string) { + k.initOnce.Do(func() { + k.newBinVals = make(map[uint32]string) + k.selNamesMap = make(map[uint32]uint32) + }) binMu.Lock() defer binMu.Unlock() - idx := binIdx + idx, ok := binVals[binary] + if ok { + k.selNamesMap[idx] = 1 + return + } + + idx = binIdx binIdx++ - k.binVals[idx] = binary - return idx + binVals[binary] = idx // global map of all names_map entries + k.newBinVals[idx] = binary // new names_map entries that we should add + k.selNamesMap[idx] = 1 // value in the per-selector names_map +} + +func (k *KernelSelectorState) GetNewBinaryMappings() map[uint32]string { + return k.newBinVals } -func (k *KernelSelectorState) GetBinaryMappings() map[uint32]string { - return k.binVals +func (k *KernelSelectorState) GetBinSelNamesMap() map[uint32]uint32 { + return k.selNamesMap } func (k *KernelSelectorState) Buffer() [4096]byte { diff --git a/pkg/sensors/tracing/generickprobe.go b/pkg/sensors/tracing/generickprobe.go index 3b8b07a51d..860da53556 100644 --- a/pkg/sensors/tracing/generickprobe.go +++ b/pkg/sensors/tracing/generickprobe.go @@ -230,6 +230,9 @@ func createMultiKprobeSensor(sensorPath string, multiIDs, multiRetIDs []idtable. callHeap := program.MapBuilderPin("process_call_heap", sensors.PathJoin(pinPath, "process_call_heap"), load) maps = append(maps, callHeap) + selNamesMap := program.MapBuilderPin("sel_names_map", sensors.PathJoin(pinPath, "sel_names_map"), load) + maps = append(maps, selNamesMap) + if len(multiRetIDs) != 0 { loadret := program.Builder( path.Join(option.Config.HubbleLib, loadProgRetName), @@ -491,6 +494,9 @@ func createGenericKprobeSensor(name string, kprobes []v1alpha1.KProbeSpec) (*sen callHeap := program.MapBuilderPin("process_call_heap", sensors.PathJoin(pinPath, "process_call_heap"), load) maps = append(maps, callHeap) + selNamesMap := program.MapBuilderPin("sel_names_map", sensors.PathJoin(pinPath, "sel_names_map"), load) + maps = append(maps, selNamesMap) + if setRetprobe { pinRetProg := sensors.PathJoin(pinPath, fmt.Sprintf("%s_ret_prog", kprobeEntry.funcName)) loadret := program.Builder( @@ -604,7 +610,7 @@ func loadSingleKprobeSensor(id idtable.EntryID, bpfDir, mapDir string, load *pro } defer m.Close() - for i, path := range gk.loadArgs.selectors.GetBinaryMappings() { + for i, path := range gk.loadArgs.selectors.GetNewBinaryMappings() { writeBinaryMap(m, i, path) } @@ -654,7 +660,7 @@ func loadMultiKprobeSensor(ids []idtable.EntryID, bpfDir, mapDir string, load *p for _, id := range ids { if gk, err := genericKprobeTableGet(id); err == nil { - for i, path := range gk.loadArgs.selectors.GetBinaryMappings() { + for i, path := range gk.loadArgs.selectors.GetNewBinaryMappings() { writeBinaryMap(m, i, path) } } diff --git a/pkg/sensors/tracing/generictracepoint.go b/pkg/sensors/tracing/generictracepoint.go index 7a5ea9782e..899a53a0c8 100644 --- a/pkg/sensors/tracing/generictracepoint.go +++ b/pkg/sensors/tracing/generictracepoint.go @@ -365,6 +365,9 @@ func createGenericTracepointSensor(name string, confs []GenericTracepointConf) ( argFilterMaps := program.MapBuilderPin("argfilter_maps", sensors.PathJoin(pinPath, "argfilter_maps"), prog0) maps = append(maps, argFilterMaps) + + selNamesMap := program.MapBuilderPin("sel_names_map", sensors.PathJoin(pinPath, "sel_names_map"), prog0) + maps = append(maps, selNamesMap) } return &sensors.Sensor{ diff --git a/pkg/sensors/tracing/selectors.go b/pkg/sensors/tracing/selectors.go index ac6ec84860..655b4d982a 100644 --- a/pkg/sensors/tracing/selectors.go +++ b/pkg/sensors/tracing/selectors.go @@ -53,6 +53,25 @@ func updateSelectors( return fmt.Errorf("failed to populate argfilter_maps: %w", err) } + selNamesMapName, ok := pinMap["sel_names_map"] + if !ok { + return fmt.Errorf("cannot find pinned sel_names_map") + } + selNamesMapName = filepath.Join(bpf.MapPrefixPath(), selNamesMapName) + selNamesMap, err := ebpf.LoadPinnedMap(selNamesMapName, nil) + if err != nil { + return fmt.Errorf("failed to open sel_names_map map %s: %w", selNamesMapName, err) + } + defer selNamesMap.Close() + if err := selNamesMap.Update(uint32(0xffffffff), ks.GetBinaryOp(), ebpf.UpdateAny); err != nil { + return err + } + for idx, val := range ks.GetBinSelNamesMap() { + if err := selNamesMap.Update(idx, val, ebpf.UpdateAny); err != nil { + return err + } + } + return nil } @@ -71,6 +90,22 @@ func selectorsMaploads(ks *selectors.KernelSelectorState, pinPathPrefix string, Load: func(outerMap *ebpf.Map, index uint32) error { return populateArgFilterMaps(ks, pinPathPrefix, outerMap) }, + }, { + Index: 0, + Name: "sel_names_map", + Load: func(m *ebpf.Map, index uint32) error { + // add a special entry (key == UINT32_MAX) that has as a value the number of matchBinaries entry + // if this is zero we don't have any matchBinaries selectors + if err := m.Update(uint32(0xffffffff), ks.GetBinaryOp(), ebpf.UpdateAny); err != nil { + return err + } + for idx, val := range ks.GetBinSelNamesMap() { + if err := m.Update(idx, val, ebpf.UpdateAny); err != nil { + return err + } + } + return nil + }, }, } } From 84e71d2777f43b53fcb8b53caa01431d4640ffa6 Mon Sep 17 00:00:00 2001 From: Anastasios Papagiannis Date: Mon, 20 Feb 2023 12:07:59 +0000 Subject: [PATCH 3/8] Add NotIn operator for matchBinaries For now we only supported In operator in matchBinaries. This patch adds support for the NotIn operator. Signed-off-by: Anastasios Papagiannis --- bpf/process/types/basic.h | 21 ++++++++++++++++----- pkg/selectors/kernel.go | 4 ++-- pkg/selectors/selectors.go | 2 +- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/bpf/process/types/basic.h b/bpf/process/types/basic.h index d30e3a190a..3a75dc2858 100644 --- a/bpf/process/types/basic.h +++ b/bpf/process/types/basic.h @@ -972,7 +972,7 @@ selector_arg_offset(__u8 *f, struct msg_generic_kprobe *e, __u32 selidx) seloff += *(__u32 *)((__u64)f + (seloff & INDEX_MASK)); op = map_lookup_elem(&sel_names_map, &max); - if (op && *op == op_filter_in) { + if (op) { struct execve_map_value *execve; bool walker = 0; __u32 ppid, bin_key, *bin_val; @@ -983,10 +983,21 @@ selector_arg_offset(__u8 *f, struct msg_generic_kprobe *e, __u32 selidx) bin_key = execve->binary; bin_val = map_lookup_elem(&sel_names_map, &bin_key); - if (!bin_val) - return 0; - if (*bin_val != 1) - return 0; + + /* + * The following things may happen: + * binary is not part of names_map, execve_map->binary will be `0` and `bin_val` will always be `0` + * binary is part of `names_map`: + * if binary is not part of this selector, bin_val will be`0` + * if binary is part of this selector: `bin_val will be `!0` + */ + if (*op == op_filter_in) { + if (!bin_val) + return 0; + } else if (*op == op_filter_notin) { + if (bin_val) + return 0; + } } /* Making binary selectors fixes size helps on some kernels */ diff --git a/pkg/selectors/kernel.go b/pkg/selectors/kernel.go index c03de6fd62..d841aeb606 100644 --- a/pkg/selectors/kernel.go +++ b/pkg/selectors/kernel.go @@ -637,8 +637,8 @@ func parseMatchBinary(k *KernelSelectorState, b *v1alpha1.BinarySelector) error if err != nil { return fmt.Errorf("matchBinary error: %w", err) } - if op != selectorOpIn { - return fmt.Errorf("matchBinary error: Only In operator is supported") + if op != selectorOpIn && op != selectorOpNotIn { + return fmt.Errorf("matchBinary error: Only In and NotIn operators are supported") } k.SetBinaryOp(op) for _, s := range b.Values { diff --git a/pkg/selectors/selectors.go b/pkg/selectors/selectors.go index e3071b1d07..a9b296e052 100644 --- a/pkg/selectors/selectors.go +++ b/pkg/selectors/selectors.go @@ -57,7 +57,7 @@ func (k *KernelSelectorState) AddBinaryName(binary string) { binIdx++ binVals[binary] = idx // global map of all names_map entries k.newBinVals[idx] = binary // new names_map entries that we should add - k.selNamesMap[idx] = 1 // value in the per-selector names_map + k.selNamesMap[idx] = 1 // value in the per-selector names_map (we ignore the value) } func (k *KernelSelectorState) GetNewBinaryMappings() map[uint32]string { From ceec3ccd6d3da0ff1047aed3d424f373983f168b Mon Sep 17 00:00:00 2001 From: Anastasios Papagiannis Date: Mon, 20 Feb 2023 12:16:20 +0000 Subject: [PATCH 4/8] Fix names_map update in generic tracepoints After loading a tracepoint program we should update names_map with new enties in a similar way to kprobes. Signed-off-by: Anastasios Papagiannis --- pkg/sensors/tracing/generictracepoint.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pkg/sensors/tracing/generictracepoint.go b/pkg/sensors/tracing/generictracepoint.go index 899a53a0c8..7f45eef63f 100644 --- a/pkg/sensors/tracing/generictracepoint.go +++ b/pkg/sensors/tracing/generictracepoint.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "path" + "path/filepath" "reflect" "sync" "sync/atomic" @@ -26,6 +27,7 @@ import ( "github.com/cilium/tetragon/pkg/option" "github.com/cilium/tetragon/pkg/selectors" "github.com/cilium/tetragon/pkg/sensors" + "github.com/cilium/tetragon/pkg/sensors/base" "github.com/cilium/tetragon/pkg/sensors/program" "github.com/cilium/tetragon/pkg/tracepoint" "github.com/sirupsen/logrus" @@ -536,7 +538,23 @@ func LoadGenericTracepointSensor(bpfDir, mapDir string, load *program.Program, v } load.MapLoad = append(load.MapLoad, cfg) - return program.LoadTracepointProgram(bpfDir, mapDir, load, verbose) + if err := program.LoadTracepointProgram(bpfDir, mapDir, load, verbose); err == nil { + logger.GetLogger().Infof("Loaded generic tracepoint program: %s -> %s", load.Name, load.Attach) + } else { + return err + } + + m, err := ebpf.LoadPinnedMap(filepath.Join(mapDir, base.NamesMap.Name), nil) + if err != nil { + return err + } + defer m.Close() + + for i, path := range kernelSelectors.GetNewBinaryMappings() { + writeBinaryMap(m, i, path) + } + + return err } func handleGenericTracepoint(r *bytes.Reader) ([]observer.Event, error) { From b5d2ef4bc1f846a30ba1e14cc0eb1bdd8518494c Mon Sep 17 00:00:00 2001 From: Anastasios Papagiannis Date: Tue, 21 Feb 2023 09:09:47 +0000 Subject: [PATCH 5/8] Add tests for matchBinaries Signed-off-by: Anastasios Papagiannis --- pkg/sensors/tracing/kprobe_test.go | 98 ++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/pkg/sensors/tracing/kprobe_test.go b/pkg/sensors/tracing/kprobe_test.go index 96d8cc3c6b..e20823774f 100644 --- a/pkg/sensors/tracing/kprobe_test.go +++ b/pkg/sensors/tracing/kprobe_test.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "os" + "os/exec" "path/filepath" "strconv" "sync" @@ -2303,6 +2304,103 @@ func TestKprobeMatchArgsFdPrefix(t *testing.T) { assert.NoError(t, err) } +func getMatchBinariesCrd(opStr string, vals []string) string { + configHook := `apiVersion: cilium.io/v1alpha1 +kind: TracingPolicy +metadata: + name: "testing-file-match-binaries" +spec: + kprobes: + - call: "fd_install" + syscall: false + return: false + args: + - index: 0 + type: int + - index: 1 + type: "file" + selectors: + - matchBinaries: + - operator: "` + opStr + `" + values: ` + for i := 0; i < len(vals); i++ { + configHook += fmt.Sprintf("\n - \"%s\"", vals[i]) + } + return configHook +} + +func createBinariesChecker(binary, filename string) *ec.ProcessKprobeChecker { + kpChecker := ec.NewProcessKprobeChecker(""). + WithProcess(ec.NewProcessChecker().WithBinary(sm.Full(binary))). + WithFunctionName(sm.Full("fd_install")). + WithArgs(ec.NewKprobeArgumentListMatcher(). + WithOperator(lc.Subset). + WithValues( + ec.NewKprobeArgumentChecker().WithFileArg(ec.NewKprobeFileChecker().WithPath(sm.Full(filename))), + )) + return kpChecker +} + +func TestKprobeMatchBinariesIn(t *testing.T) { + var doneWG, readyWG sync.WaitGroup + defer doneWG.Wait() + + ctx, cancel := context.WithTimeout(context.Background(), tus.Conf().CmdWaitTime) + defer cancel() + + createCrdFile(t, getMatchBinariesCrd("In", []string{"/usr/bin/cat"})) + + obs, err := observer.GetDefaultObserverWithFile(t, ctx, testConfigFile, tus.Conf().TetragonLib) + if err != nil { + t.Fatalf("GetDefaultObserverWithFile error: %s", err) + } + observer.LoopEvents(ctx, t, &doneWG, &readyWG, obs) + readyWG.Wait() + + if err := exec.Command("/usr/bin/cat", "/etc/passwd").Run(); err != nil { + t.Fatalf("failed to run cat /etc/passwd: %s", err) + } + + if err := exec.Command("/usr/bin/head", "/etc/passwd").Run(); err != nil { + t.Fatalf("failed to run head /etc/passwd: %s", err) + } + + kpChecker := createBinariesChecker("/usr/bin/cat", "/etc/passwd") + checker := ec.NewUnorderedEventChecker(kpChecker) + err = jsonchecker.JsonTestCheck(t, checker) + assert.NoError(t, err) +} + +func TestKprobeMatchBinariesNotIn(t *testing.T) { + var doneWG, readyWG sync.WaitGroup + defer doneWG.Wait() + + ctx, cancel := context.WithTimeout(context.Background(), tus.Conf().CmdWaitTime) + defer cancel() + + createCrdFile(t, getMatchBinariesCrd("NotIn", []string{"/usr/bin/tail"})) + + obs, err := observer.GetDefaultObserverWithFile(t, ctx, testConfigFile, tus.Conf().TetragonLib) + if err != nil { + t.Fatalf("GetDefaultObserverWithFile error: %s", err) + } + observer.LoopEvents(ctx, t, &doneWG, &readyWG, obs) + readyWG.Wait() + + if err := exec.Command("/usr/bin/tail", "/etc/passwd").Run(); err != nil { + t.Fatalf("failed to run cat /etc/passwd: %s", err) + } + + if err := exec.Command("/usr/bin/head", "/etc/passwd").Run(); err != nil { + t.Fatalf("failed to run head /etc/passwd: %s", err) + } + + kpChecker := createBinariesChecker("/usr/bin/head", "/etc/passwd") + checker := ec.NewUnorderedEventChecker(kpChecker) + err = jsonchecker.JsonTestCheck(t, checker) + assert.NoError(t, err) +} + func loadTestCrd(t *testing.T) error { testHook := `apiVersion: cilium.io/v1alpha1 kind: TracingPolicy From eb1a47c402da785d477d71ce375b42d105b8157d Mon Sep 17 00:00:00 2001 From: Anastasios Papagiannis Date: Wed, 22 Feb 2023 13:45:56 +0000 Subject: [PATCH 6/8] matchBinaries: Do not match the parent binary Now, if the process binary does not match these that we have in matchBinaries selector, it will also check the parent binary name. This is not the desired behaviour and this patch removed that. Signed-off-by: Anastasios Papagiannis --- bpf/process/bpf_execve_event.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/bpf/process/bpf_execve_event.c b/bpf/process/bpf_execve_event.c index b87d0ac742..109701dcca 100644 --- a/bpf/process/bpf_execve_event.c +++ b/bpf/process/bpf_execve_event.c @@ -94,8 +94,7 @@ event_args_builder(void *ctx, struct msg_execve_event *event) } static inline __attribute__((always_inline)) uint32_t -event_filename_builder(void *ctx, struct msg_process *curr, __u32 curr_pid, - __u32 flags, __u32 bin, void *filename) +event_filename_builder(void *ctx, struct msg_process *curr, __u32 curr_pid, __u32 flags, void *filename) { struct execve_heap *heap; int64_t size = 0; @@ -133,14 +132,14 @@ event_filename_builder(void *ctx, struct msg_process *curr, __u32 curr_pid, heap = map_lookup_elem(&execve_heap, &zero); if (!heap) - return bin; + return 0; memset(heap->pathname, 0, 256); probe_read_str(heap->pathname, size, filename); value = map_lookup_elem(&names_map, heap->pathname); if (value) return *value; - return bin; + return 0; } __attribute__((section("tracepoint/sys_execve"), used)) int @@ -150,7 +149,6 @@ event_execve(struct sched_execve_args *ctx) struct msg_execve_event *event; struct execve_map_value *parent; struct msg_process *execve; - uint32_t binary = 0; bool walker = 0; __u32 zero = 0; __u32 pid; @@ -173,16 +171,13 @@ event_execve(struct sched_execve_args *ctx) parent = event_find_parent(); if (parent) { event->parent = parent->key; - binary = parent->binary; } else { event_minimal_parent(event, task); } execve = &event->process; fileoff = ctx->filename & 0xFFFF; - binary = event_filename_builder(ctx, execve, pid, EVENT_EXECVE, binary, - (char *)ctx + fileoff); - event->binary = binary; + event->binary = event_filename_builder(ctx, execve, pid, EVENT_EXECVE, (char *)ctx + fileoff); event_args_builder(ctx, event); compiler_barrier(); From 458626177ef1457a491d707e6db8f68239cdc728 Mon Sep 17 00:00:00 2001 From: Anastasios Papagiannis Date: Wed, 22 Feb 2023 16:33:46 +0000 Subject: [PATCH 7/8] matchBinaries: Skip binary check for long binary names In the case where the binary name is > 255 characters we simply skip the test. In order to support that we have to filter using data events that can be a follow-up. Generally, 255 characters for binary names should be enough in most cases. Signed-off-by: Anastasios Papagiannis --- bpf/process/bpf_execve_event.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bpf/process/bpf_execve_event.c b/bpf/process/bpf_execve_event.c index 109701dcca..3d279264ab 100644 --- a/bpf/process/bpf_execve_event.c +++ b/bpf/process/bpf_execve_event.c @@ -130,6 +130,10 @@ event_filename_builder(void *ctx, struct msg_process *curr, __u32 curr_pid, __u3 curr->ktime = ktime_get_ns(); curr->size = size + offsetof(struct msg_process, args); + // skip binaries check for long (> 255) filenames for now + if (flags & EVENT_DATA_FILENAME) + return 0; + heap = map_lookup_elem(&execve_heap, &zero); if (!heap) return 0; From 2fc6811a9fb345b78416235abb12e5d52be8cd8c Mon Sep 17 00:00:00 2001 From: Anastasios Papagiannis Date: Fri, 24 Feb 2023 09:59:49 +0000 Subject: [PATCH 8/8] Convert max binary size to a define Signed-off-by: Anastasios Papagiannis --- bpf/lib/process.h | 3 ++- bpf/process/bpf_execve_event.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/bpf/lib/process.h b/bpf/lib/process.h index 91bade42e6..164262163d 100644 --- a/bpf/lib/process.h +++ b/bpf/lib/process.h @@ -104,6 +104,7 @@ #define __ASM_ARGSBUFFER 976 #define ARGSBUFFERMASK (ARGSBUFFER - 1) #define MAXARGMASK (MAXARG - 1) +#define PATHNAME_SIZE 256 /* Task flags */ #ifndef PF_KTHREAD @@ -316,7 +317,7 @@ struct { struct execve_heap { union { - char pathname[256]; + char pathname[PATHNAME_SIZE]; char maxpath[4096]; }; }; diff --git a/bpf/process/bpf_execve_event.c b/bpf/process/bpf_execve_event.c index 3d279264ab..7d5f40f777 100644 --- a/bpf/process/bpf_execve_event.c +++ b/bpf/process/bpf_execve_event.c @@ -138,7 +138,7 @@ event_filename_builder(void *ctx, struct msg_process *curr, __u32 curr_pid, __u3 if (!heap) return 0; - memset(heap->pathname, 0, 256); + memset(heap->pathname, 0, PATHNAME_SIZE); probe_read_str(heap->pathname, size, filename); value = map_lookup_elem(&names_map, heap->pathname); if (value)