diff --git a/btf/btf.go b/btf/btf.go index 80f64d78a..6a3e53d67 100644 --- a/btf/btf.go +++ b/btf/btf.go @@ -29,9 +29,8 @@ var ( // ID represents the unique ID of a BTF object. type ID = sys.BTFID -// Spec allows querying a set of Types and loading the set into the -// kernel. -type Spec struct { +// immutableTypes is a set of types which musn't be changed. +type immutableTypes struct { // All types contained by the spec, not including types from the base in // case the spec was parsed from split BTF. types []Type @@ -44,13 +43,132 @@ type Spec struct { // Types indexed by essential name. // Includes all struct flavors and types with the same name. - namedTypes map[essentialName][]Type + namedTypes map[essentialName][]TypeID + + // Byte order of the types. This affects things like struct member order + // when using bitfields. + byteOrder binary.ByteOrder +} + +func (s *immutableTypes) typeByID(id TypeID) (Type, bool) { + if id < s.firstTypeID { + return nil, false + } + + index := int(id - s.firstTypeID) + if index >= len(s.types) { + return nil, false + } + + return s.types[index], true +} + +// mutableTypes is a set of types which may be changed. +type mutableTypes struct { + imm immutableTypes + copies map[Type]Type // map[orig]copy + copiedTypeIDs map[Type]TypeID //map[copy]origID +} + +// add a type to the set of mutable types. +// +// Copies type and all of its children once. Repeated calls with the same type +// do not copy again. +func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type { + return modifyGraphPreorder(typ, func(t Type) (Type, bool) { + cpy, ok := mt.copies[t] + if ok { + // This has been copied previously, no need to continue. + return cpy, false + } + + cpy = t.copy() + mt.copies[t] = cpy + + if id, ok := typeIDs[t]; ok { + mt.copiedTypeIDs[cpy] = id + } + + // This is a new copy, keep copying children. + return cpy, true + }) +} + +// copy a set of mutable types. +func (mt *mutableTypes) copy() mutableTypes { + mtCopy := mutableTypes{ + mt.imm, + make(map[Type]Type, len(mt.copies)), + make(map[Type]TypeID, len(mt.copiedTypeIDs)), + } + + copies := make(map[Type]Type, len(mt.copies)) + for orig, copy := range mt.copies { + // NB: We make a copy of copy, not orig, so that changes to mutable types + // are preserved. + copyOfCopy := mtCopy.add(copy, mt.copiedTypeIDs) + copies[orig] = copyOfCopy + } + + // mtCopy.copies is currently map[copy]copyOfCopy, replace it with + // map[orig]copyOfCopy. + mtCopy.copies = copies + return mtCopy +} + +func (mt *mutableTypes) typeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + id, ok := mt.copiedTypeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, false + } + + return mt.add(immT, mt.imm.typeIDs), true +} + +func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) { + immTypes := mt.imm.namedTypes[newEssentialName(name)] + if len(immTypes) == 0 { + return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) + } + + // Return a copy to prevent changes to namedTypes. + result := make([]Type, 0, len(immTypes)) + for _, id := range immTypes { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, fmt.Errorf("no type with ID %d", id) + } + + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if immT.TypeName() == name { + result = append(result, mt.add(immT, mt.imm.typeIDs)) + } + } + return result, nil +} + +// Spec allows querying a set of Types and loading the set into the +// kernel. +type Spec struct { + mutableTypes // String table from ELF. strings *stringTable - - // Byte order of the ELF we decoded the spec from, may be nil. - byteOrder binary.ByteOrder } // LoadSpec opens file and calls LoadSpecFromReader on it. @@ -181,7 +299,7 @@ func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { return nil, err } - err = fixupDatasec(spec.types, sectionSizes, offsets) + err = fixupDatasec(spec.imm.types, sectionSizes, offsets) if err != nil { return nil, err } @@ -197,7 +315,7 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error ) if base != nil { - if base.firstTypeID != 0 { + if base.imm.firstTypeID != 0 { return nil, fmt.Errorf("can't use split BTF as base") } @@ -217,16 +335,22 @@ func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error typeIDs, typesByName := indexTypes(types, firstTypeID) return &Spec{ - namedTypes: typesByName, - typeIDs: typeIDs, - types: types, - firstTypeID: firstTypeID, - strings: rawStrings, - byteOrder: bo, + mutableTypes{ + immutableTypes{ + types, + typeIDs, + firstTypeID, + typesByName, + bo, + }, + make(map[Type]Type), + make(map[Type]TypeID), + }, + rawStrings, }, nil } -func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) { +func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) { namedTypes := 0 for _, typ := range types { if typ.TypeName() != "" { @@ -238,13 +362,15 @@ func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentia } typeIDs := make(map[Type]TypeID, len(types)) - typesByName := make(map[essentialName][]Type, namedTypes) + typesByName := make(map[essentialName][]TypeID, namedTypes) for i, typ := range types { + id := firstTypeID + TypeID(i) + typeIDs[typ] = id + if name := newEssentialName(typ.TypeName()); name != "" { - typesByName[name] = append(typesByName[name], typ) + typesByName[name] = append(typesByName[name], id) } - typeIDs[typ] = firstTypeID + TypeID(i) } return typeIDs, typesByName @@ -492,17 +618,9 @@ func fixupDatasecLayout(ds *Datasec) error { // Copy creates a copy of Spec. func (s *Spec) Copy() *Spec { - types := copyTypes(s.types, nil) - typeIDs, typesByName := indexTypes(types, s.firstTypeID) - - // NB: Other parts of spec are not copied since they are immutable. return &Spec{ - types, - typeIDs, - s.firstTypeID, - typesByName, + s.mutableTypes.copy(), s.strings, - s.byteOrder, } } @@ -519,8 +637,8 @@ func (sw sliceWriter) Write(p []byte) (int, error) { // nextTypeID returns the next unallocated type ID or an error if there are no // more type IDs. func (s *Spec) nextTypeID() (TypeID, error) { - id := s.firstTypeID + TypeID(len(s.types)) - if id < s.firstTypeID { + id := s.imm.firstTypeID + TypeID(len(s.imm.types)) + if id < s.imm.firstTypeID { return 0, fmt.Errorf("no more type IDs") } return id, nil @@ -531,33 +649,19 @@ func (s *Spec) nextTypeID() (TypeID, error) { // Returns an error wrapping ErrNotFound if a Type with the given ID // does not exist in the Spec. func (s *Spec) TypeByID(id TypeID) (Type, error) { - if id < s.firstTypeID { - return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound) - } - - index := int(id - s.firstTypeID) - if index >= len(s.types) { - return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound) + typ, ok := s.typeByID(id) + if !ok { + return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound) } - return s.types[index], nil + return typ, nil } // TypeID returns the ID for a given Type. // // Returns an error wrapping ErrNoFound if the type isn't part of the Spec. func (s *Spec) TypeID(typ Type) (TypeID, error) { - if _, ok := typ.(*Void); ok { - // Equality is weird for void, since it is a zero sized type. - return 0, nil - } - - id, ok := s.typeIDs[typ] - if !ok { - return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) - } - - return id, nil + return s.mutableTypes.typeID(typ) } // AnyTypesByName returns a list of BTF Types with the given name. @@ -568,21 +672,7 @@ func (s *Spec) TypeID(typ Type) (TypeID, error) { // // Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. func (s *Spec) AnyTypesByName(name string) ([]Type, error) { - types := s.namedTypes[newEssentialName(name)] - if len(types) == 0 { - return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) - } - - // Return a copy to prevent changes to namedTypes. - result := make([]Type, 0, len(types)) - for _, t := range types { - // Match against the full name, not just the essential one - // in case the type being looked up is a struct flavor. - if t.TypeName() == name { - result = append(result, t) - } - } - return result, nil + return s.mutableTypes.anyTypesByName(name) } // AnyTypeByName returns a Type with the given name. @@ -671,26 +761,27 @@ func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { // TypesIterator iterates over types of a given spec. type TypesIterator struct { - types []Type - index int + spec *Spec + id TypeID + done bool // The last visited type in the spec. Type Type } // Iterate returns the types iterator. func (s *Spec) Iterate() *TypesIterator { - // We share the backing array of types with the Spec. This is safe since - // we don't allow deletion or shuffling of types. - return &TypesIterator{types: s.types, index: 0} + return &TypesIterator{spec: s, id: s.imm.firstTypeID} } // Next returns true as long as there are any remaining types. func (iter *TypesIterator) Next() bool { - if len(iter.types) <= iter.index { + if iter.done { return false } - iter.Type = iter.types[iter.index] - iter.index++ - return true + var ok bool + iter.Type, ok = iter.spec.typeByID(iter.id) + iter.id++ + iter.done = !ok + return !iter.done } diff --git a/btf/btf_test.go b/btf/btf_test.go index fd8790daf..5c72d26e9 100644 --- a/btf/btf_test.go +++ b/btf/btf_test.go @@ -226,7 +226,7 @@ func BenchmarkParseVmlinux(b *testing.B) { func TestParseCurrentKernelBTF(t *testing.T) { spec := vmlinuxSpec(t) - if len(spec.namedTypes) == 0 { + if len(spec.imm.namedTypes) == 0 { t.Fatal("Empty kernel BTF") } @@ -257,7 +257,7 @@ func TestFindVMLinux(t *testing.T) { t.Fatal("Can't load BTF:", err) } - if len(spec.namedTypes) == 0 { + if len(spec.imm.namedTypes) == 0 { t.Fatal("Empty kernel BTF") } } @@ -339,10 +339,10 @@ func TestSpecCopy(t *testing.T) { spec := parseELFBTF(t, "../testdata/loader-el.elf") cpy := spec.Copy() - have := typesFromSpec(t, spec) - qt.Assert(t, qt.IsTrue(len(spec.types) > 0)) + have := typesFromSpec(spec) + qt.Assert(t, qt.IsTrue(len(have) > 0)) - want := typesFromSpec(t, cpy) + want := typesFromSpec(cpy) qt.Assert(t, qt.HasLen(want, len(have))) for i := range want { @@ -359,6 +359,28 @@ func TestSpecCopy(t *testing.T) { } } +func TestSpecCopyModifications(t *testing.T) { + spec := specFromTypes(t, []Type{&Int{Name: "a", Size: 4}}) + + typ, err := spec.TypeByID(1) + qt.Assert(t, qt.IsNil(err)) + + i := typ.(*Int) + i.Name = "b" + i.Size = 2 + + cpy := spec.Copy() + typ2, err := cpy.TypeByID(1) + qt.Assert(t, qt.IsNil(err)) + i2 := typ2.(*Int) + + qt.Assert(t, qt.Not(qt.Equals(i2, i)), qt.Commentf("Types are distinct")) + qt.Assert(t, qt.DeepEquals(i2, i), qt.Commentf("Modifications are preserved")) + + i.Name = "bar" + qt.Assert(t, qt.Equals(i2.Name, "b")) +} + func TestSpecTypeByID(t *testing.T) { spec := specFromTypes(t, nil) @@ -459,10 +481,8 @@ func TestLoadSplitSpecFromReader(t *testing.T) { t.Fatal("'int' is not supposed to be found in the split BTF") } - if fnProto.Return != intType { - t.Fatalf("Return type of 'bpf_testmod_init()' (%s) does not match 'int' type (%s)", - fnProto.Return, intType) - } + qt.Assert(t, qt.Not(qt.Equals(fnProto.Return, intType)), + qt.Commentf("types found in base of split spec should be copies")) // Check that copied split-BTF's spec has correct type indexing splitSpecCopy := splitSpec.Copy() @@ -512,3 +532,16 @@ func BenchmarkSpecCopy(b *testing.B) { spec.Copy() } } + +func BenchmarkSpecTypeByID(b *testing.B) { + spec := vmlinuxTestdataSpec(b) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := spec.TypeByID(1) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/btf/core.go b/btf/core.go index 720309a1b..724fae4b8 100644 --- a/btf/core.go +++ b/btf/core.go @@ -178,8 +178,8 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder, re } } - if bo != target.byteOrder { - return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder) + if bo != target.imm.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder) } type reloGroup struct { @@ -227,7 +227,7 @@ func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder, re return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) } - targets := target.namedTypes[newEssentialName(localTypeName)] + targets := target.imm.namedTypes[newEssentialName(localTypeName)] fixups, err := coreCalculateFixups(group.relos, target, targets, bo) if err != nil { return nil, fmt.Errorf("relocate %s: %w", localType, err) @@ -251,13 +251,13 @@ var errIncompatibleTypes = errors.New("incompatible types") // // The best target is determined by scoring: the less poisoning we have to do // the better the target is. -func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) { +func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []TypeID, bo binary.ByteOrder) ([]COREFixup, error) { bestScore := len(relos) var bestFixups []COREFixup - for _, target := range targets { - targetID, err := targetSpec.TypeID(target) + for _, targetID := range targets { + target, err := targetSpec.TypeByID(targetID) if err != nil { - return nil, fmt.Errorf("target type ID: %w", err) + return nil, fmt.Errorf("look up target: %w", err) } score := 0 // lower is better diff --git a/btf/core_test.go b/btf/core_test.go index a7c2c39f1..4ba42cd0c 100644 --- a/btf/core_test.go +++ b/btf/core_test.go @@ -592,7 +592,7 @@ func TestCORERelocation(t *testing.T) { relos = append(relos, reloInfo.relo) } - fixups, err := CORERelocate(relos, spec, spec.byteOrder, spec.TypeID) + fixups, err := CORERelocate(relos, spec, spec.imm.byteOrder, spec.TypeID) if want := errs[name]; want != nil { if !errors.Is(err, want) { t.Fatal("Expected", want, "got", err) @@ -744,7 +744,7 @@ func BenchmarkCORESkBuff(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - _, err = CORERelocate([]*CORERelocation{relo}, spec, spec.byteOrder, spec.TypeID) + _, err = CORERelocate([]*CORERelocation{relo}, spec, spec.imm.byteOrder, spec.TypeID) if err != nil { b.Fatal(err) } diff --git a/btf/fuzz_test.go b/btf/fuzz_test.go index 804c8bf59..64ebe1098 100644 --- a/btf/fuzz_test.go +++ b/btf/fuzz_test.go @@ -38,8 +38,9 @@ func FuzzSpec(f *testing.F) { t.Fatal("spec is nil") } - for _, typ := range spec.types { - fmt.Fprintf(io.Discard, "%+10v", typ) + iter := spec.Iterate() + for iter.Next() { + fmt.Fprintf(io.Discard, "%+10v", iter.Type) } }) } diff --git a/btf/marshal_test.go b/btf/marshal_test.go index c09aae04a..9cbfc0b5e 100644 --- a/btf/marshal_test.go +++ b/btf/marshal_test.go @@ -37,7 +37,7 @@ func TestBuilderMarshal(t *testing.T) { have, err := loadRawSpec(bytes.NewReader(buf), internal.NativeEndian, nil) qt.Assert(t, qt.IsNil(err), qt.Commentf("Couldn't parse BTF")) - qt.Assert(t, qt.DeepEquals(have.types, want)) + qt.Assert(t, qt.DeepEquals(have.imm.types, want)) } func TestBuilderAdd(t *testing.T) { @@ -71,7 +71,7 @@ func TestBuilderAdd(t *testing.T) { } func TestRoundtripVMlinux(t *testing.T) { - types := typesFromSpec(t, vmlinuxSpec(t)) + types := typesFromSpec(vmlinuxSpec(t)) // Randomize the order to force different permutations of walking the type // graph. Keep Void at index 0. @@ -105,7 +105,7 @@ limitTypes: rebuilt, err := loadRawSpec(bytes.NewReader(buf), binary.LittleEndian, nil) qt.Assert(t, qt.IsNil(err), qt.Commentf("round tripping BTF failed")) - if n := len(rebuilt.types); n > math.MaxUint16 { + if n := len(rebuilt.imm.types); n > math.MaxUint16 { t.Logf("Rebuilt BTF contains %d types which exceeds uint16, test may fail on older kernels", n) } @@ -153,7 +153,7 @@ func TestMarshalEnum64(t *testing.T) { } func BenchmarkMarshaler(b *testing.B) { - types := typesFromSpec(b, vmlinuxTestdataSpec(b))[:100] + types := typesFromSpec(vmlinuxTestdataSpec(b))[:100] b.ReportAllocs() b.ResetTimer() @@ -168,7 +168,7 @@ func BenchmarkMarshaler(b *testing.B) { } func BenchmarkBuildVmlinux(b *testing.B) { - types := typesFromSpec(b, vmlinuxTestdataSpec(b)) + types := typesFromSpec(vmlinuxTestdataSpec(b)) b.ReportAllocs() b.ResetTimer() @@ -202,9 +202,7 @@ func specFromTypes(tb testing.TB, types []Type) *Spec { return spec } -func typesFromSpec(tb testing.TB, spec *Spec) []Type { - tb.Helper() - +func typesFromSpec(spec *Spec) []Type { var types []Type iter := spec.Iterate() for iter.Next() { diff --git a/btf/traversal.go b/btf/traversal.go index a3a9dec94..5a7387b06 100644 --- a/btf/traversal.go +++ b/btf/traversal.go @@ -87,6 +87,43 @@ func (po *postorderIterator) Next() bool { return po.Type != nil } +// modifyGraphPreorder allows modifying every Type in a graph. +// +// fn is invoked in preorder for every unique Type in a graph. See [Type] for the definition +// of equality. Every occurrence of node is substituted with its replacement. +// +// If cont is true, fn is invoked for every child of replacement. Otherwise +// traversal stops. +// +// Returns the substitution of the root node. +func modifyGraphPreorder(root Type, fn func(node Type) (replacement Type, cont bool)) Type { + sub, cont := fn(root) + replacements := map[Type]Type{root: sub} + + // This is a preorder traversal. + var walk func(*Type) + walk = func(node *Type) { + sub, visited := replacements[*node] + if visited { + *node = sub + return + } + + sub, cont := fn(*node) + replacements[*node] = sub + *node = sub + + if cont { + walkType(*node, walk) + } + } + + if cont { + walkType(sub, walk) + } + return sub +} + // walkType calls fn on each child of typ. func walkType(typ Type, fn func(*Type)) { // Explicitly type switch on the most common types to allow the inliner to diff --git a/btf/traversal_test.go b/btf/traversal_test.go index c8919f814..2b87ccacc 100644 --- a/btf/traversal_test.go +++ b/btf/traversal_test.go @@ -65,6 +65,41 @@ func TestPostorderTraversalVmlinux(t *testing.T) { } } +func TestModifyGraph(t *testing.T) { + a := &Int{} + b := &Int{} + skipped := &Int{} + c := &Pointer{skipped} + root := &Struct{ + Members: []Member{ + {Type: a}, + {Type: a}, + {Type: b}, + {Type: c}, + }, + } + + counts := make(map[Type]int) + modifyGraphPreorder(root, func(node Type) (Type, bool) { + counts[node]++ + if node == c { + return nil, false + } + return node, true + }) + + qt.Assert(t, qt.Equals(counts[root], 1)) + qt.Assert(t, qt.Equals(counts[a], 1)) + qt.Assert(t, qt.Equals(counts[b], 1)) + qt.Assert(t, qt.Equals(counts[c], 1)) + qt.Assert(t, qt.Equals(counts[skipped], 0)) + + qt.Assert(t, qt.Equals[Type](root.Members[0].Type, a)) + qt.Assert(t, qt.Equals[Type](root.Members[1].Type, a)) + qt.Assert(t, qt.Equals[Type](root.Members[2].Type, b)) + qt.Assert(t, qt.IsNil(root.Members[3].Type)) +} + func BenchmarkPostorderTraversal(b *testing.B) { spec := vmlinuxTestdataSpec(b) @@ -95,3 +130,32 @@ func BenchmarkPostorderTraversal(b *testing.B) { }) } } + +func BenchmarkPreorderTraversal(b *testing.B) { + spec := vmlinuxTestdataSpec(b) + + var fn *Func + err := spec.TypeByName("gov_update_cpu_data", &fn) + if err != nil { + b.Fatal(err) + } + + for _, test := range []struct { + name string + typ Type + }{ + {"single type", &Int{}}, + {"cycle(1)", newCyclicalType(1)}, + {"cycle(10)", newCyclicalType(10)}, + {"gov_update_cpu_data", fn}, + } { + b.Logf("%10v", test.typ) + + b.Run(test.name, func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + modifyGraphPreorder(test.typ, func(t Type) (Type, bool) { return t, true }) + } + }) + } +} diff --git a/btf/types.go b/btf/types.go index 8bd7fc77f..8bac8018c 100644 --- a/btf/types.go +++ b/btf/types.go @@ -678,52 +678,31 @@ type Transformer func(Type) Type // typ may form a cycle. If transform is not nil, it is called with the // to be copied type, and the returned value is copied instead. func Copy(typ Type, transform Transformer) Type { - copies := copier{copies: make(map[Type]Type)} - copies.copy(&typ, transform) - return typ + copies := make(copier) + return copies.copy(typ, transform) } -// copy a slice of Types recursively. -// -// See Copy for the semantics. -func copyTypes(types []Type, transform Transformer) []Type { - result := make([]Type, len(types)) - copy(result, types) - - copies := copier{copies: make(map[Type]Type, len(types))} - for i := range result { - copies.copy(&result[i], transform) - } - - return result -} - -type copier struct { - copies map[Type]Type - work typeDeque -} +// A map of a type to its copy. +type copier map[Type]Type -func (c *copier) copy(typ *Type, transform Transformer) { - for t := typ; t != nil; t = c.work.Pop() { - // *t is the identity of the type. - if cpy := c.copies[*t]; cpy != nil { - *t = cpy - continue +func (c copier) copy(typ Type, transform Transformer) Type { + return modifyGraphPreorder(typ, func(t Type) (Type, bool) { + cpy, ok := c[t] + if ok { + // This has been copied previously, no need to continue. + return cpy, false } - var cpy Type if transform != nil { - cpy = transform(*t).copy() + cpy = transform(t).copy() } else { - cpy = (*t).copy() + cpy = t.copy() } + c[t] = cpy - c.copies[*t] = cpy - *t = cpy - - // Mark any nested types for copying. - walkType(cpy, c.work.Push) - } + // This is a new copy, keep copying children. + return cpy, true + }) } type typeDeque = internal.Deque[*Type]