From 5500cbf0e420a6d643835ec05f35abb170e3e443 Mon Sep 17 00:00:00 2001 From: Jes Cok Date: Thu, 25 Sep 2025 04:41:12 +0000 Subject: [PATCH 001/152] debug/elf: prevent offset overflow When applying relocations, a malformed ELF file can provide an offset that, when added to the relocation size, overflows. This wrapped-around value could then incorrectly pass the bounds check, leading to a panic when the slice is accessed with the original large offset. This change eliminates the manual bounds and overflow checks and writes a relocation to slice by calling putUint. The putUint helper function centralizes the logic for validating slice access, correctly handling both out-of-bounds and integer overflow conditions. This simplifies the relocation code and improves robustness when parsing malformed ELF files. Fixes #75516 Change-Id: I00d806bf5501a9bf70200585ba4fd0475d7b2ddc GitHub-Last-Rev: 49144311d31fecc63cb81b6c31bf9a206acb0596 GitHub-Pull-Request: golang/go#75522 Reviewed-on: https://go-review.googlesource.com/c/go/+/705075 Reviewed-by: Florian Lehner LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Auto-Submit: Ian Lance Taylor Reviewed-by: Michael Knyszek Reviewed-by: Ian Lance Taylor Commit-Queue: Ian Lance Taylor --- src/debug/elf/file.go | 160 +++++++++++++++--------------------------- 1 file changed, 57 insertions(+), 103 deletions(-) diff --git a/src/debug/elf/file.go b/src/debug/elf/file.go index 50452b5bef45f4..1d56a06c3fb221 100644 --- a/src/debug/elf/file.go +++ b/src/debug/elf/file.go @@ -25,6 +25,7 @@ import ( "internal/saferio" "internal/zstd" "io" + "math" "os" "strings" "unsafe" @@ -830,17 +831,9 @@ func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error { switch t { case R_X86_64_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_X86_64_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -872,12 +865,7 @@ func (f *File) applyRelocations386(dst []byte, rels []byte) error { sym := &symbols[symNo-1] if t == R_386_32 { - if rel.Off+4 >= uint32(len(dst)) { - continue - } - val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) - val += uint32(sym.Value) - f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) + putUint(f.ByteOrder, dst, uint64(rel.Off), 4, sym.Value, 0, true) } } @@ -910,12 +898,7 @@ func (f *File) applyRelocationsARM(dst []byte, rels []byte) error { switch t { case R_ARM_ABS32: - if rel.Off+4 >= uint32(len(dst)) { - continue - } - val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) - val += uint32(sym.Value) - f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) + putUint(f.ByteOrder, dst, uint64(rel.Off), 4, sym.Value, 0, true) } } @@ -955,17 +938,9 @@ func (f *File) applyRelocationsARM64(dst []byte, rels []byte) error { switch t { case R_AARCH64_ABS64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_AARCH64_ABS32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1001,11 +976,7 @@ func (f *File) applyRelocationsPPC(dst []byte, rels []byte) error { switch t { case R_PPC_ADDR32: - if rela.Off+4 >= uint32(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, uint64(rela.Off), 4, sym.Value, 0, false) } } @@ -1041,17 +1012,9 @@ func (f *File) applyRelocationsPPC64(dst []byte, rels []byte) error { switch t { case R_PPC64_ADDR64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_PPC64_ADDR32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1084,12 +1047,7 @@ func (f *File) applyRelocationsMIPS(dst []byte, rels []byte) error { switch t { case R_MIPS_32: - if rel.Off+4 >= uint32(len(dst)) { - continue - } - val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) - val += uint32(sym.Value) - f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) + putUint(f.ByteOrder, dst, uint64(rel.Off), 4, sym.Value, 0, true) } } @@ -1132,17 +1090,9 @@ func (f *File) applyRelocationsMIPS64(dst []byte, rels []byte) error { switch t { case R_MIPS_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_MIPS_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1180,17 +1130,9 @@ func (f *File) applyRelocationsLOONG64(dst []byte, rels []byte) error { switch t { case R_LARCH_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_LARCH_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1226,17 +1168,9 @@ func (f *File) applyRelocationsRISCV64(dst []byte, rels []byte) error { switch t { case R_RISCV_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_RISCV_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1272,17 +1206,9 @@ func (f *File) applyRelocationss390x(dst []byte, rels []byte) error { switch t { case R_390_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_390_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1318,17 +1244,10 @@ func (f *File) applyRelocationsSPARC64(dst []byte, rels []byte) error { switch t { case R_SPARC_64, R_SPARC_UA64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) + case R_SPARC_32, R_SPARC_UA32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1903,3 +1822,38 @@ type nobitsSectionReader struct{} func (*nobitsSectionReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, errors.New("unexpected read from SHT_NOBITS section") } + +// putUint writes a relocation to slice +// at offset start of length length (4 or 8 bytes), +// adding sym+addend to the existing value if readUint is true, +// or just writing sym+addend if readUint is false. +// If the write would extend beyond the end of slice, putUint does nothing. +// If the addend is negative, putUint does nothing. +// If the addition would overflow, putUint does nothing. +func putUint(byteOrder binary.ByteOrder, slice []byte, start, length, sym uint64, addend int64, readUint bool) { + if start+length > uint64(len(slice)) || math.MaxUint64-start < length { + return + } + if addend < 0 { + return + } + + s := slice[start : start+length] + + switch length { + case 4: + ae := uint32(addend) + if readUint { + ae += byteOrder.Uint32(s) + } + byteOrder.PutUint32(s, uint32(sym)+ae) + case 8: + ae := uint64(addend) + if readUint { + ae += byteOrder.Uint64(s) + } + byteOrder.PutUint64(s, sym+ae) + default: + panic("can't happen") + } +} From 6d51f932575284d6e78baa5e98f47f737a9f5b19 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Mon, 29 Sep 2025 14:14:28 +0200 Subject: [PATCH 002/152] runtime: jump instead of branch in netbsd/arm64 entry point CL 706175 removed the NOFRAME directive from _rt0_arm64_netbsd but did not change the BL instruction to a JMP instruction. This causes the frame pointer to be stored on the stack, this making direct load from RSP to be off by 8 bytes. Cq-Include-Trybots: luci.golang.try:gotip-netbsd-arm64 Change-Id: I0c212fbaba74cfce508f961090dc6e66154c3054 Reviewed-on: https://go-review.googlesource.com/c/go/+/707675 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt Reviewed-by: Cherry Mui --- src/runtime/rt0_netbsd_arm64.s | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/rt0_netbsd_arm64.s b/src/runtime/rt0_netbsd_arm64.s index 07fb0a1240aec5..80802209b78f12 100644 --- a/src/runtime/rt0_netbsd_arm64.s +++ b/src/runtime/rt0_netbsd_arm64.s @@ -7,7 +7,7 @@ TEXT _rt0_arm64_netbsd(SB),NOSPLIT,$0 MOVD 0(RSP), R0 // argc ADD $8, RSP, R1 // argv - BL main(SB) + JMP main(SB) // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. From d42d56b764f4c8b06aaa2de2dc9c1d2171e79490 Mon Sep 17 00:00:00 2001 From: apocelipes Date: Wed, 24 Sep 2025 03:23:03 +0000 Subject: [PATCH 003/152] encoding/xml: make use of reflect.TypeAssert MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To make the code more readable and improve performance: goos: darwin goarch: arm64 pkg: encoding/xml cpu: Apple M4 │ old │ new │ │ sec/op │ sec/op vs base │ Marshal-10 1.902µ ± 1% 1.496µ ± 1% -21.37% (p=0.000 n=10) Unmarshal-10 3.877µ ± 1% 3.418µ ± 2% -11.84% (p=0.000 n=10) HTMLAutoClose-10 1.314µ ± 3% 1.333µ ± 1% ~ (p=0.270 n=10) geomean 2.132µ 1.896µ -11.07% │ old │ new │ │ B/op │ B/op vs base │ Marshal-10 5.570Ki ± 0% 5.570Ki ± 0% ~ (p=1.000 n=10) ¹ Unmarshal-10 7.586Ki ± 0% 7.555Ki ± 0% -0.41% (p=0.000 n=10) HTMLAutoClose-10 3.496Ki ± 0% 3.496Ki ± 0% ~ (p=1.000 n=10) ¹ geomean 5.286Ki 5.279Ki -0.14% ¹ all samples are equal │ old │ new │ │ allocs/op │ allocs/op vs base │ Marshal-10 23.00 ± 0% 23.00 ± 0% ~ (p=1.000 n=10) ¹ Unmarshal-10 185.0 ± 0% 184.0 ± 0% -0.54% (p=0.000 n=10) HTMLAutoClose-10 93.00 ± 0% 93.00 ± 0% ~ (p=1.000 n=10) ¹ geomean 73.42 73.28 -0.18% ¹ all samples are equal Updates #62121 Change-Id: Ie458e7458d4358c380374571d380ca3b65ca87bb GitHub-Last-Rev: bb6bb3039328ca1d53ee3d56fd6597109ed76b09 GitHub-Pull-Request: golang/go#75483 Reviewed-on: https://go-review.googlesource.com/c/go/+/704215 LUCI-TryBot-Result: Go LUCI Auto-Submit: Keith Randall Reviewed-by: Keith Randall Reviewed-by: Keith Randall Reviewed-by: Carlos Amedee --- src/encoding/xml/marshal.go | 127 ++++++++++++++++++++---------------- src/encoding/xml/read.go | 77 +++++++++++++--------- 2 files changed, 117 insertions(+), 87 deletions(-) diff --git a/src/encoding/xml/marshal.go b/src/encoding/xml/marshal.go index 133503fa2de41c..13fbeeeedc75ce 100644 --- a/src/encoding/xml/marshal.go +++ b/src/encoding/xml/marshal.go @@ -416,12 +416,6 @@ func (p *printer) popPrefix() { } } -var ( - marshalerType = reflect.TypeFor[Marshaler]() - marshalerAttrType = reflect.TypeFor[MarshalerAttr]() - textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]() -) - // marshalValue writes one or more XML elements representing val. // If val was obtained from a struct field, finfo must have its details. func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { @@ -450,24 +444,32 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat typ := val.Type() // Check for marshaler. - if val.CanInterface() && typ.Implements(marshalerType) { - return p.marshalInterface(val.Interface().(Marshaler), defaultStart(typ, finfo, startTemplate)) + if val.CanInterface() { + if marshaler, ok := reflect.TypeAssert[Marshaler](val); ok { + return p.marshalInterface(marshaler, defaultStart(typ, finfo, startTemplate)) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(marshalerType) { - return p.marshalInterface(pv.Interface().(Marshaler), defaultStart(pv.Type(), finfo, startTemplate)) + if pv.CanInterface() { + if marshaler, ok := reflect.TypeAssert[Marshaler](pv); ok { + return p.marshalInterface(marshaler, defaultStart(pv.Type(), finfo, startTemplate)) + } } } // Check for text marshaler. - if val.CanInterface() && typ.Implements(textMarshalerType) { - return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), defaultStart(typ, finfo, startTemplate)) + if val.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](val); ok { + return p.marshalTextInterface(textMarshaler, defaultStart(typ, finfo, startTemplate)) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), defaultStart(pv.Type(), finfo, startTemplate)) + if pv.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok { + return p.marshalTextInterface(textMarshaler, defaultStart(pv.Type(), finfo, startTemplate)) + } } } @@ -503,7 +505,7 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name } else { fv := xmlname.value(val, dontInitNilPointers) - if v, ok := fv.Interface().(Name); ok && v.Local != "" { + if v, ok := reflect.TypeAssert[Name](fv); ok && v.Local != "" { start.Name = v } } @@ -580,21 +582,9 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat // marshalAttr marshals an attribute with the given name and value, adding to start.Attr. func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) error { - if val.CanInterface() && val.Type().Implements(marshalerAttrType) { - attr, err := val.Interface().(MarshalerAttr).MarshalXMLAttr(name) - if err != nil { - return err - } - if attr.Name.Local != "" { - start.Attr = append(start.Attr, attr) - } - return nil - } - - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { - attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if val.CanInterface() { + if marshaler, ok := reflect.TypeAssert[MarshalerAttr](val); ok { + attr, err := marshaler.MarshalXMLAttr(name) if err != nil { return err } @@ -605,19 +595,25 @@ func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) } } - if val.CanInterface() && val.Type().Implements(textMarshalerType) { - text, err := val.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() { + if marshaler, ok := reflect.TypeAssert[MarshalerAttr](pv); ok { + attr, err := marshaler.MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + return nil + } } - start.Attr = append(start.Attr, Attr{name, string(text)}) - return nil } - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if val.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](val); ok { + text, err := textMarshaler.MarshalText() if err != nil { return err } @@ -626,6 +622,20 @@ func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) } } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok { + text, err := textMarshaler.MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + return nil + } + } + } + // Dereference or skip nil pointer, interface values. switch val.Kind() { case reflect.Pointer, reflect.Interface: @@ -647,7 +657,8 @@ func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) } if val.Type() == attrType { - start.Attr = append(start.Attr, val.Interface().(Attr)) + attr, _ := reflect.TypeAssert[Attr](val) + start.Attr = append(start.Attr, attr) return nil } @@ -854,20 +865,9 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { if err := s.trim(finfo.parents); err != nil { return err } - if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { - data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if err := emit(p, data); err != nil { - return err - } - continue - } - if vf.CanAddr() { - pv := vf.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if vf.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](vf); ok { + data, err := textMarshaler.MarshalText() if err != nil { return err } @@ -877,6 +877,21 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { continue } } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok { + data, err := textMarshaler.MarshalText() + if err != nil { + return err + } + if err := emit(p, data); err != nil { + return err + } + continue + } + } + } var scratch [64]byte vf = indirect(vf) @@ -902,7 +917,7 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { return err } case reflect.Slice: - if elem, ok := vf.Interface().([]byte); ok { + if elem, ok := reflect.TypeAssert[[]byte](vf); ok { if err := emit(p, elem); err != nil { return err } diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go index af25c20f0618dc..d3cb74b2c4311a 100644 --- a/src/encoding/xml/read.go +++ b/src/encoding/xml/read.go @@ -255,28 +255,36 @@ func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { } val = val.Elem() } - if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + if val.CanInterface() { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. - return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + if unmarshaler, ok := reflect.TypeAssert[UnmarshalerAttr](val); ok { + return unmarshaler.UnmarshalXMLAttr(attr) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { - return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + if pv.CanInterface() { + if unmarshaler, ok := reflect.TypeAssert[UnmarshalerAttr](pv); ok { + return unmarshaler.UnmarshalXMLAttr(attr) + } } } // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. - if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + if val.CanInterface() { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. - return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](val); ok { + return textUnmarshaler.UnmarshalText([]byte(attr.Value)) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + if pv.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok { + return textUnmarshaler.UnmarshalText([]byte(attr.Value)) + } } } @@ -303,12 +311,7 @@ func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { return copyValue(val, []byte(attr.Value)) } -var ( - attrType = reflect.TypeFor[Attr]() - unmarshalerType = reflect.TypeFor[Unmarshaler]() - unmarshalerAttrType = reflect.TypeFor[UnmarshalerAttr]() - textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]() -) +var attrType = reflect.TypeFor[Attr]() const ( maxUnmarshalDepth = 10000 @@ -352,27 +355,35 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) e val = val.Elem() } - if val.CanInterface() && val.Type().Implements(unmarshalerType) { + if val.CanInterface() { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. - return d.unmarshalInterface(val.Interface().(Unmarshaler), start) + if unmarshaler, ok := reflect.TypeAssert[Unmarshaler](val); ok { + return d.unmarshalInterface(unmarshaler, start) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { - return d.unmarshalInterface(pv.Interface().(Unmarshaler), start) + if pv.CanInterface() { + if unmarshaler, ok := reflect.TypeAssert[Unmarshaler](pv); ok { + return d.unmarshalInterface(unmarshaler, start) + } } } - if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { - return d.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler)) + if val.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](val); ok { + return d.unmarshalTextInterface(textUnmarshaler) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - return d.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler)) + if pv.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok { + return d.unmarshalTextInterface(textUnmarshaler) + } } } @@ -453,7 +464,7 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) e return UnmarshalError(e) } fv := finfo.value(sv, initNilPointers) - if _, ok := fv.Interface().(Name); ok { + if _, ok := reflect.TypeAssert[Name](fv); ok { fv.Set(reflect.ValueOf(start.Name)) } } @@ -578,20 +589,24 @@ Loop: } } - if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { - if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return err + if saveData.IsValid() && saveData.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](saveData); ok { + if err := textUnmarshaler.UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} } - saveData = reflect.Value{} } if saveData.IsValid() && saveData.CanAddr() { pv := saveData.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return err + if pv.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok { + if err := textUnmarshaler.UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} } - saveData = reflect.Value{} } } From fe3ba74b9e6e3385cbf7c2f3a9c0b72baeac4b01 Mon Sep 17 00:00:00 2001 From: Richard Miller Date: Sat, 27 Sep 2025 19:45:36 +0100 Subject: [PATCH 004/152] cmd/link: skip TestFlagW on platforms without DWARF symbol table As with other DWARF tests, don't run TestFlagW on platforms where executables don't have a DWARF symbol table. Fixes #75585 Change-Id: I81014bf59b15e30ac1b2a7d24a52f9647db46c26 Reviewed-on: https://go-review.googlesource.com/c/go/+/706418 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee --- src/cmd/link/dwarf_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go index 6a60a746a5b48b..4fce358e602de5 100644 --- a/src/cmd/link/dwarf_test.go +++ b/src/cmd/link/dwarf_test.go @@ -364,6 +364,10 @@ func TestFlagW(t *testing.T) { if runtime.GOOS == "aix" { t.Skip("internal/xcoff cannot parse file without symbol table") } + if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH) + } + t.Parallel() tmpdir := t.TempDir() From ae8eba071b228dd9e05de0b0c338f3d941a0a43f Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Sun, 28 Sep 2025 21:25:24 -0700 Subject: [PATCH 005/152] cmd/link: use correct length for pcln.cutab The pcln.cutab slice holds uint32 elements, as can be seen in the runtime.moduledata type. The slice was being created with the len (and cap) set to the size of the slice, which means that the count was four times too large. This patch sets the correct len/cap. This doesn't matter for the runtime because nothing looks at the len of cutab. Since the incorrect len is larger, all valid indexes remain valid. Using the correct length means that more invalid indexes will be caught at run time, but such cases are unlikely. Still, using the correct len is less confusing. While we're here use the simpler sliceSym for pcln.pclntab. Change-Id: I09f680b3287467120d994b171c86c784085e3d27 Reviewed-on: https://go-review.googlesource.com/c/go/+/707595 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: Carlos Amedee Auto-Submit: Ian Lance Taylor --- src/cmd/link/internal/ld/symtab.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 759262286d39d8..2c999ccc4e3a19 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -645,7 +645,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { sliceSym(pcln.funcnametab) // The cutab slice - sliceSym(pcln.cutab) + slice(pcln.cutab, uint64(ldr.SymSize(pcln.cutab))/4) // The filetab slice sliceSym(pcln.filetab) @@ -654,7 +654,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { sliceSym(pcln.pctab) // The pclntab slice - slice(pcln.pclntab, uint64(ldr.SymSize(pcln.pclntab))) + sliceSym(pcln.pclntab) // The ftab slice slice(pcln.pclntab, uint64(pcln.nfunc+1)) From 047c2ab841e2d2233d0bef420d1b5ecb545a380a Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 26 Sep 2025 09:45:08 -0400 Subject: [PATCH 006/152] cmd/link: don't pass -Wl,-S on Solaris Solaris linker's -S has a different meaning. Fixes #75637. Change-Id: I51e641d5bc6d7f64ab5aa280090c70ec787a1fbf Reviewed-on: https://go-review.googlesource.com/c/go/+/707096 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/link/dwarf_test.go | 2 +- src/cmd/link/internal/ld/lib.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go index 4fce358e602de5..56a076002a92d4 100644 --- a/src/cmd/link/dwarf_test.go +++ b/src/cmd/link/dwarf_test.go @@ -386,7 +386,7 @@ func TestFlagW(t *testing.T) { {"-s", false}, // -s implies -w {"-s -w=0", true}, // -w=0 negates the implied -w } - if testenv.HasCGO() { + if testenv.HasCGO() && runtime.GOOS != "solaris" { // Solaris linker doesn't support the -S flag tests = append(tests, testCase{"-w -linkmode=external", false}, testCase{"-s -linkmode=external", false}, diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 8d2763bb57f31a..c7596d535e0ee5 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1452,7 +1452,7 @@ func (ctxt *Link) hostlink() { argv = append(argv, "-s") } } else if *FlagW { - if !ctxt.IsAIX() { // The AIX linker's -S has different meaning + if !ctxt.IsAIX() && !ctxt.IsSolaris() { // The AIX and Solaris linkers' -S has different meaning argv = append(argv, "-Wl,-S") // suppress debugging symbols } } From 4e9006a716533fe1c7ee08df02dfc73078f7dc19 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Mon, 29 Sep 2025 10:11:56 -0700 Subject: [PATCH 007/152] crypto/tls: quote protocols in ALPN error message Quote the protocols sent by the client when returning the ALPN negotiation error message. Fixes CVE-2025-58189 Fixes #75652 Change-Id: Ie7b3a1ed0b6efcc1705b71f0f1e8417126661330 Reviewed-on: https://go-review.googlesource.com/c/go/+/707776 Auto-Submit: Roland Shoemaker Reviewed-by: Neal Patel Reviewed-by: Nicholas Husin Auto-Submit: Nicholas Husin Reviewed-by: Nicholas Husin TryBot-Bypass: Roland Shoemaker Reviewed-by: Daniel McCarney --- src/crypto/tls/handshake_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go index 1e0b5f06672d15..088c66fadb2a44 100644 --- a/src/crypto/tls/handshake_server.go +++ b/src/crypto/tls/handshake_server.go @@ -357,7 +357,7 @@ func negotiateALPN(serverProtos, clientProtos []string, quic bool) (string, erro if http11fallback { return "", nil } - return "", fmt.Errorf("tls: client requested unsupported application protocols (%s)", clientProtos) + return "", fmt.Errorf("tls: client requested unsupported application protocols (%q)", clientProtos) } // supportsECDHE returns whether ECDHE key exchanges can be used with this From 4b7773356515c178f0af859b952b4b3a78f0813d Mon Sep 17 00:00:00 2001 From: qmuntal Date: Mon, 29 Sep 2025 08:37:35 +0200 Subject: [PATCH 008/152] internal/syscall/windows: regenerate GetFileSizeEx GetFileSizeEx was generated before mkwinsyscall was updated to use SyscallN. Regenerate to use the new style. Fixes #75642 Change-Id: Ia473a167633b67fb75b5762d693848ecee425a7e Reviewed-on: https://go-review.googlesource.com/c/go/+/707615 Reviewed-by: Roland Shoemaker Auto-Submit: Dmitri Shuralyov Reviewed-by: Dmitri Shuralyov LUCI-TryBot-Result: Go LUCI Reviewed-by: Dmitri Shuralyov --- src/internal/syscall/windows/zsyscall_windows.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go index fb8bc80629a626..70c4d76dff0d0f 100644 --- a/src/internal/syscall/windows/zsyscall_windows.go +++ b/src/internal/syscall/windows/zsyscall_windows.go @@ -328,7 +328,7 @@ func GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byt } func GetFileSizeEx(handle syscall.Handle, size *int64) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileSizeEx.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(size)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileSizeEx.Addr(), uintptr(handle), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } From eaf2345256613dfbda7e8e69e5f845c4209246c6 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Mon, 22 Sep 2025 15:48:36 +0200 Subject: [PATCH 009/152] cmd/link: use a .def file to mark exported symbols on Windows Binutils defaults to exporting all symbols when building a Windows DLL. To avoid that we were marking symbols with __declspec(dllexport) in the cgo-generated headers, which instructs ld to export only those symbols. However, that approach makes the headers hard to reuse when importing the resulting DLL into other projects, as imported symbols should be marked with __declspec(dllimport). A better approach is to generate a .def file listing the symbols to export, which gets the same effect without having to modify the headers. Updates #30674 Fixes #56994 Change-Id: I22bd0aa079e2be4ae43b13d893f6b804eaeddabf Reviewed-on: https://go-review.googlesource.com/c/go/+/705776 Reviewed-by: Michael Knyszek Reviewed-by: Junyang Shao Reviewed-by: Than McIntosh Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- .../cgo/internal/testcshared/cshared_test.go | 75 ++++++++----------- src/cmd/cgo/out.go | 6 +- src/cmd/link/internal/ld/lib.go | 9 ++- src/cmd/link/internal/ld/pe.go | 44 +++++++++++ src/cmd/link/internal/ld/xcoff.go | 5 +- src/cmd/link/internal/loader/loader.go | 13 ++++ src/runtime/cgo/gcc_libinit_windows.c | 9 --- src/runtime/cgo/windows.go | 22 ++++++ 8 files changed, 118 insertions(+), 65 deletions(-) create mode 100644 src/runtime/cgo/windows.go diff --git a/src/cmd/cgo/internal/testcshared/cshared_test.go b/src/cmd/cgo/internal/testcshared/cshared_test.go index f1c30f8f9a2b2a..2ce705adba44f3 100644 --- a/src/cmd/cgo/internal/testcshared/cshared_test.go +++ b/src/cmd/cgo/internal/testcshared/cshared_test.go @@ -8,6 +8,7 @@ import ( "bufio" "bytes" "cmd/cgo/internal/cgotest" + "cmp" "debug/elf" "debug/pe" "encoding/binary" @@ -272,7 +273,7 @@ func createHeaders() error { // which results in the linkers output implib getting overwritten at each step. So instead build the // import library the traditional way, using a def file. err = os.WriteFile("libgo.def", - []byte("LIBRARY libgo.dll\nEXPORTS\n\tDidInitRun\n\tDidMainRun\n\tDivu\n\tFromPkg\n\t_cgo_dummy_export\n"), + []byte("LIBRARY libgo.dll\nEXPORTS\n\tDidInitRun\n\tDidMainRun\n\tDivu\n\tFromPkg\n"), 0644) if err != nil { return fmt.Errorf("unable to write def file: %v", err) @@ -375,9 +376,23 @@ func TestExportedSymbols(t *testing.T) { } } -func checkNumberOfExportedFunctionsWindows(t *testing.T, prog string, exportedFunctions int, wantAll bool) { +func checkNumberOfExportedSymbolsWindows(t *testing.T, exportedSymbols int, wantAll bool) { + t.Parallel() tmpdir := t.TempDir() + prog := ` +package main +import "C" +func main() {} +` + + for i := range exportedSymbols { + prog += fmt.Sprintf(` +//export GoFunc%d +func GoFunc%d() {} +`, i, i) + } + srcfile := filepath.Join(tmpdir, "test.go") objfile := filepath.Join(tmpdir, "test.dll") if err := os.WriteFile(srcfile, []byte(prog), 0666); err != nil { @@ -443,18 +458,19 @@ func checkNumberOfExportedFunctionsWindows(t *testing.T, prog string, exportedFu t.Fatalf("binary.Read failed: %v", err) } - // Only the two exported functions and _cgo_dummy_export should be exported. + exportedSymbols = cmp.Or(exportedSymbols, 1) // _cgo_stub_export is exported if there are no other symbols exported + // NumberOfNames is the number of functions exported with a unique name. // NumberOfFunctions can be higher than that because it also counts // functions exported only by ordinal, a unique number asigned by the linker, // and linkers might add an unknown number of their own ordinal-only functions. if wantAll { - if e.NumberOfNames <= uint32(exportedFunctions) { - t.Errorf("got %d exported names, want > %d", e.NumberOfNames, exportedFunctions) + if e.NumberOfNames <= uint32(exportedSymbols) { + t.Errorf("got %d exported names, want > %d", e.NumberOfNames, exportedSymbols) } } else { - if e.NumberOfNames > uint32(exportedFunctions) { - t.Errorf("got %d exported names, want <= %d", e.NumberOfNames, exportedFunctions) + if e.NumberOfNames != uint32(exportedSymbols) { + t.Errorf("got %d exported names, want %d", e.NumberOfNames, exportedSymbols) } } } @@ -470,43 +486,14 @@ func TestNumberOfExportedFunctions(t *testing.T) { t.Parallel() - const prog0 = ` -package main - -import "C" - -func main() { -} -` - - const prog2 = ` -package main - -import "C" - -//export GoFunc -func GoFunc() { - println(42) -} - -//export GoFunc2 -func GoFunc2() { - println(24) -} - -func main() { -} -` - // All programs export _cgo_dummy_export, so add 1 to the expected counts. - t.Run("OnlyExported/0", func(t *testing.T) { - checkNumberOfExportedFunctionsWindows(t, prog0, 0+1, false) - }) - t.Run("OnlyExported/2", func(t *testing.T) { - checkNumberOfExportedFunctionsWindows(t, prog2, 2+1, false) - }) - t.Run("All", func(t *testing.T) { - checkNumberOfExportedFunctionsWindows(t, prog2, 2+1, true) - }) + for i := range 3 { + t.Run(fmt.Sprintf("OnlyExported/%d", i), func(t *testing.T) { + checkNumberOfExportedSymbolsWindows(t, i, false) + }) + t.Run(fmt.Sprintf("All/%d", i), func(t *testing.T) { + checkNumberOfExportedSymbolsWindows(t, i, true) + }) + } } // test1: shared library can be dynamically loaded and exported symbols are accessible. diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index dfa54e41d33399..622d35ac7b3bab 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -1005,12 +1005,8 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { } // Build the wrapper function compiled by gcc. - gccExport := "" - if goos == "windows" { - gccExport = "__declspec(dllexport) " - } var s strings.Builder - fmt.Fprintf(&s, "%s%s %s(", gccExport, gccResult, exp.ExpName) + fmt.Fprintf(&s, "%s %s(", gccResult, exp.ExpName) if fn.Recv != nil { s.WriteString(p.cgoType(fn.Recv.List[0].Type).C.String()) s.WriteString(" recv") diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index c7596d535e0ee5..79d3d37835e9ad 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1772,7 +1772,8 @@ func (ctxt *Link) hostlink() { } // Force global symbols to be exported for dlopen, etc. - if ctxt.IsELF { + switch { + case ctxt.IsELF: if ctxt.DynlinkingGo() || ctxt.BuildMode == BuildModeCShared || !linkerFlagSupported(ctxt.Arch, argv[0], altLinker, "-Wl,--export-dynamic-symbol=main") { argv = append(argv, "-rdynamic") } else { @@ -1783,10 +1784,12 @@ func (ctxt *Link) hostlink() { sort.Strings(exports) argv = append(argv, exports...) } - } - if ctxt.HeadType == objabi.Haix { + case ctxt.IsAIX(): fileName := xcoffCreateExportFile(ctxt) argv = append(argv, "-Wl,-bE:"+fileName) + case ctxt.IsWindows() && !slices.Contains(flagExtldflags, "-Wl,--export-all-symbols"): + fileName := peCreateExportFile(ctxt, filepath.Base(outopt)) + argv = append(argv, fileName) } const unusedArguments = "-Qunused-arguments" diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 5219a98dd47cf4..0f0650e5e149e3 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -8,6 +8,7 @@ package ld import ( + "bytes" "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/loader" @@ -17,6 +18,8 @@ import ( "fmt" "internal/buildcfg" "math" + "os" + "path/filepath" "slices" "sort" "strconv" @@ -1748,3 +1751,44 @@ func asmbPe(ctxt *Link) { pewrite(ctxt) } + +// peCreateExportFile creates a file with exported symbols for Windows .def files. +// ld will export all symbols, even those not marked for export, unless a .def file is provided. +func peCreateExportFile(ctxt *Link, libName string) (fname string) { + fname = filepath.Join(*flagTmpdir, "export_file.def") + var buf bytes.Buffer + + fmt.Fprintf(&buf, "LIBRARY %s\n", libName) + buf.WriteString("EXPORTS\n") + + ldr := ctxt.loader + var exports []string + for s := range ldr.ForAllCgoExportStatic() { + extname := ldr.SymExtname(s) + if !strings.HasPrefix(extname, "_cgoexp_") { + continue + } + if ldr.IsFileLocal(s) { + continue // Only export non-static symbols + } + // Retrieve the name of the initial symbol + // exported by cgo. + // The corresponding Go symbol is: + // _cgoexp_hashcode_symname. + name := strings.SplitN(extname, "_", 4)[3] + exports = append(exports, name) + } + if len(exports) == 0 { + // See runtime/cgo/windows.go for details. + exports = append(exports, "_cgo_stub_export") + } + sort.Strings(exports) + buf.WriteString(strings.Join(exports, "\n")) + + err := os.WriteFile(fname, buf.Bytes(), 0666) + if err != nil { + Errorf("WriteFile %s failed: %v", fname, err) + } + + return fname +} diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go index 1bce2cf9b6124d..da728e25455618 100644 --- a/src/cmd/link/internal/ld/xcoff.go +++ b/src/cmd/link/internal/ld/xcoff.go @@ -1779,10 +1779,7 @@ func xcoffCreateExportFile(ctxt *Link) (fname string) { var buf bytes.Buffer ldr := ctxt.loader - for s, nsym := loader.Sym(1), loader.Sym(ldr.NSym()); s < nsym; s++ { - if !ldr.AttrCgoExport(s) { - continue - } + for s := range ldr.ForAllCgoExportStatic() { extname := ldr.SymExtname(s) if !strings.HasPrefix(extname, "._cgoexp_") { continue diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 9f3ea3e553dd7c..103dad03001263 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -16,6 +16,7 @@ import ( "fmt" "internal/abi" "io" + "iter" "log" "math/bits" "os" @@ -1109,6 +1110,18 @@ func (l *Loader) SetAttrCgoExportStatic(i Sym, v bool) { } } +// ForAllCgoExportStatic returns an iterator over all symbols +// marked with the "cgo_export_static" compiler directive. +func (l *Loader) ForAllCgoExportStatic() iter.Seq[Sym] { + return func(yield func(Sym) bool) { + for s := range l.attrCgoExportStatic { + if !yield(s) { + break + } + } + } +} + // IsGeneratedSym returns true if a symbol's been previously marked as a // generator symbol through the SetIsGeneratedSym. The functions for generator // symbols are kept in the Link context. diff --git a/src/runtime/cgo/gcc_libinit_windows.c b/src/runtime/cgo/gcc_libinit_windows.c index 926f9168434638..7e7ff3e667266f 100644 --- a/src/runtime/cgo/gcc_libinit_windows.c +++ b/src/runtime/cgo/gcc_libinit_windows.c @@ -15,15 +15,6 @@ #include "libcgo.h" #include "libcgo_windows.h" -// Ensure there's one symbol marked __declspec(dllexport). -// If there are no exported symbols, the unfortunate behavior of -// the binutils linker is to also strip the relocations table, -// resulting in non-PIE binary. The other option is the -// --export-all-symbols flag, but we don't need to export all symbols -// and this may overflow the export table (#40795). -// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 -__declspec(dllexport) int _cgo_dummy_export; - static volatile LONG runtime_init_once_gate = 0; static volatile LONG runtime_init_once_done = 0; diff --git a/src/runtime/cgo/windows.go b/src/runtime/cgo/windows.go new file mode 100644 index 00000000000000..7ba61753dffda2 --- /dev/null +++ b/src/runtime/cgo/windows.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package cgo + +import _ "unsafe" // for go:linkname + +// _cgo_stub_export is only used to ensure there's at least one symbol +// in the .def file passed to the external linker. +// If there are no exported symbols, the unfortunate behavior of +// the binutils linker is to also strip the relocations table, +// resulting in non-PIE binary. The other option is the +// --export-all-symbols flag, but we don't need to export all symbols +// and this may overflow the export table (#40795). +// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 +// +//go:cgo_export_static _cgo_stub_export +//go:linkname _cgo_stub_export _cgo_stub_export +var _cgo_stub_export uintptr From 690fc2fb05e720850a474c72bf3a8a9a6638cef7 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Tue, 16 Sep 2025 10:25:58 +0200 Subject: [PATCH 010/152] internal/poll: remove buf field from operation WSASend and WSARecv functions capture the WSABuf structure before returning, so there is no need to keep a copy of it in the operation structure. Write and Read functions don't need it, they can operate directly on the byte slice. To be on the safe side, pin the input byte slice so that stack-allocated slices don't get moved while overlapped I/O is in progress. Cq-Include-Trybots: luci.golang.try:gotip-windows-amd64-longtest,gotip-windows-amd64-race Change-Id: I474bed94e11acafa0bdd8392b5dcf8993d8e1ed5 Reviewed-on: https://go-review.googlesource.com/c/go/+/704155 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Damien Neil --- src/internal/poll/fd_windows.go | 170 ++++++++++++++++++-------------- src/net/tcpsock_test.go | 7 +- 2 files changed, 100 insertions(+), 77 deletions(-) diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index dd9845d1b223c3..9323e49eb745d7 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -9,6 +9,7 @@ import ( "internal/race" "internal/syscall/windows" "io" + "runtime" "sync" "sync/atomic" "syscall" @@ -75,9 +76,6 @@ type operation struct { // fields used by runtime.netpoll runtimeCtx uintptr mode int32 - - // fields used only by net package - buf syscall.WSABuf } func (o *operation) setEvent() { @@ -107,9 +105,8 @@ func (fd *FD) overlapped(o *operation) *syscall.Overlapped { return &o.o } -func (o *operation) InitBuf(buf []byte) { - o.buf.Len = uint32(len(buf)) - o.buf.Buf = unsafe.SliceData(buf) +func newWsaBuf(b []byte) *syscall.WSABuf { + return &syscall.WSABuf{Buf: unsafe.SliceData(b), Len: uint32(len(b))} } var wsaBufsPool = sync.Pool{ @@ -362,6 +359,9 @@ type FD struct { isBlocking bool disassociated atomic.Bool + + readPinner runtime.Pinner + writePinner runtime.Pinner } // setOffset sets the offset fields of the overlapped object @@ -537,6 +537,11 @@ func (fd *FD) Read(buf []byte) (int, error) { defer fd.readUnlock() } + if len(buf) > 0 && !fd.isBlocking { + fd.readPinner.Pin(&buf[0]) + defer fd.readPinner.Unpin() + } + if len(buf) > maxRW { buf = buf[:maxRW] } @@ -547,10 +552,8 @@ func (fd *FD) Read(buf []byte) (int, error) { case kindConsole: n, err = fd.readConsole(buf) case kindFile, kindPipe: - o := &fd.rop - o.InitBuf(buf) - n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.ReadFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, fd.overlapped(o)) + n, err = fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + err = syscall.ReadFile(fd.Sysfd, buf, &qty, fd.overlapped(o)) return qty, err }) fd.addOffset(n) @@ -564,11 +567,9 @@ func (fd *FD) Read(buf []byte) (int, error) { } } case kindNet: - o := &fd.rop - o.InitBuf(buf) - n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) { + n, err = fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { var flags uint32 - err = syscall.WSARecv(fd.Sysfd, &o.buf, 1, &qty, &flags, &o.o, nil) + err = syscall.WSARecv(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, &o.o, nil) return qty, err }) if race.Enabled { @@ -656,7 +657,7 @@ func (fd *FD) readConsole(b []byte) (int, error) { } // Pread emulates the Unix pread system call. -func (fd *FD) Pread(b []byte, off int64) (int, error) { +func (fd *FD) Pread(buf []byte, off int64) (int, error) { if fd.kind == kindPipe { // Pread does not work with pipes return 0, syscall.ESPIPE @@ -667,8 +668,13 @@ func (fd *FD) Pread(b []byte, off int64) (int, error) { } defer fd.readWriteUnlock() - if len(b) > maxRW { - b = b[:maxRW] + if len(buf) > 0 && !fd.isBlocking { + fd.readPinner.Pin(&buf[0]) + defer fd.readPinner.Unpin() + } + + if len(buf) > maxRW { + buf = buf[:maxRW] } if fd.isBlocking { @@ -687,17 +693,15 @@ func (fd *FD) Pread(b []byte, off int64) (int, error) { curoffset := fd.offset defer fd.setOffset(curoffset) } - o := &fd.rop - o.InitBuf(b) fd.setOffset(off) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.ReadFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, &o.o) + n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + err = syscall.ReadFile(fd.Sysfd, buf, &qty, &o.o) return qty, err }) if err == syscall.ERROR_HANDLE_EOF { err = io.EOF } - if len(b) != 0 { + if len(buf) != 0 { err = fd.eofError(n, err) } return n, err @@ -715,14 +719,18 @@ func (fd *FD) ReadFrom(buf []byte) (int, syscall.Sockaddr, error) { return 0, nil, err } defer fd.readUnlock() - o := &fd.rop - o.InitBuf(buf) + + if !fd.isBlocking { + fd.readPinner.Pin(&buf[0]) + defer fd.readPinner.Unpin() + } + rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 - err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, rsa, &rsan, &o.o, nil) + err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) return qty, err }) err = fd.eofError(n, err) @@ -745,14 +753,18 @@ func (fd *FD) ReadFromInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) return 0, err } defer fd.readUnlock() - o := &fd.rop - o.InitBuf(buf) + + if !fd.isBlocking { + fd.readPinner.Pin(&buf[0]) + defer fd.readPinner.Unpin() + } + rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 - err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, rsa, &rsan, &o.o, nil) + err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) return qty, err }) err = fd.eofError(n, err) @@ -775,14 +787,18 @@ func (fd *FD) ReadFromInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) return 0, err } defer fd.readUnlock() - o := &fd.rop - o.InitBuf(buf) + + if !fd.isBlocking { + fd.readPinner.Pin(&buf[0]) + defer fd.readPinner.Unpin() + } + rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 - err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, rsa, &rsan, &o.o, nil) + err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) return qty, err }) err = fd.eofError(n, err) @@ -807,6 +823,11 @@ func (fd *FD) Write(buf []byte) (int, error) { defer fd.writeUnlock() } + if len(buf) > 0 && !fd.isBlocking { + fd.writePinner.Pin(&buf[0]) + defer fd.writePinner.Unpin() + } + var ntotal int for { max := len(buf) @@ -820,10 +841,8 @@ func (fd *FD) Write(buf []byte) (int, error) { case kindConsole: n, err = fd.writeConsole(b) case kindPipe, kindFile: - o := &fd.wop - o.InitBuf(b) - n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WriteFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, fd.overlapped(o)) + n, err = fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = syscall.WriteFile(fd.Sysfd, b, &qty, fd.overlapped(o)) return qty, err }) fd.addOffset(n) @@ -831,10 +850,8 @@ func (fd *FD) Write(buf []byte) (int, error) { if race.Enabled { race.ReleaseMerge(unsafe.Pointer(&ioSync)) } - o := &fd.wop - o.InitBuf(b) - n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WSASend(fd.Sysfd, &o.buf, 1, &qty, 0, &o.o, nil) + n, err = fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = syscall.WSASend(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, &o.o, nil) return qty, err }) } @@ -903,6 +920,11 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) { } defer fd.readWriteUnlock() + if len(buf) > 0 && !fd.isBlocking { + fd.writePinner.Pin(&buf[0]) + defer fd.writePinner.Unpin() + } + if fd.isBlocking { curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent) if err != nil { @@ -926,12 +948,9 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) { if max-ntotal > maxRW { max = ntotal + maxRW } - b := buf[ntotal:max] - o := &fd.wop - o.InitBuf(b) fd.setOffset(off + int64(ntotal)) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WriteFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, &o.o) + n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = syscall.WriteFile(fd.Sysfd, buf[ntotal:max], &qty, &o.o) return qty, err }) if n > 0 { @@ -978,25 +997,26 @@ func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) { if len(buf) == 0 { // handle zero-byte payload - o := &fd.wop - o.InitBuf(buf) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WSASendto(fd.Sysfd, &o.buf, 1, &qty, 0, sa, &o.o, nil) + n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = syscall.WSASendto(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa, &o.o, nil) return qty, err }) return n, err } + if !fd.isBlocking { + fd.writePinner.Pin(&buf[0]) + defer fd.writePinner.Unpin() + } + ntotal := 0 for len(buf) > 0 { b := buf if len(b) > maxRW { b = b[:maxRW] } - o := &fd.wop - o.InitBuf(b) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WSASendto(fd.Sysfd, &o.buf, 1, &qty, 0, sa, &o.o, nil) + n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = syscall.WSASendto(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa, &o.o, nil) return qty, err }) ntotal += int(n) @@ -1017,25 +1037,26 @@ func (fd *FD) WriteToInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) if len(buf) == 0 { // handle zero-byte payload - o := &fd.wop - o.InitBuf(buf) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = windows.WSASendtoInet4(fd.Sysfd, &o.buf, 1, &qty, 0, sa4, &o.o, nil) + n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = windows.WSASendtoInet4(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa4, &o.o, nil) return qty, err }) return n, err } + if !fd.isBlocking { + fd.writePinner.Pin(&buf[0]) + defer fd.writePinner.Unpin() + } + ntotal := 0 for len(buf) > 0 { b := buf if len(b) > maxRW { b = b[:maxRW] } - o := &fd.wop - o.InitBuf(b) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = windows.WSASendtoInet4(fd.Sysfd, &o.buf, 1, &qty, 0, sa4, &o.o, nil) + n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = windows.WSASendtoInet4(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa4, &o.o, nil) return qty, err }) ntotal += int(n) @@ -1056,25 +1077,26 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) if len(buf) == 0 { // handle zero-byte payload - o := &fd.wop - o.InitBuf(buf) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = windows.WSASendtoInet6(fd.Sysfd, &o.buf, 1, &qty, 0, sa6, &o.o, nil) + n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = windows.WSASendtoInet6(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa6, &o.o, nil) return qty, err }) return n, err } + if !fd.isBlocking { + fd.writePinner.Pin(&buf[0]) + defer fd.writePinner.Unpin() + } + ntotal := 0 for len(buf) > 0 { b := buf if len(b) > maxRW { b = b[:maxRW] } - o := &fd.wop - o.InitBuf(b) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = windows.WSASendtoInet6(fd.Sysfd, &o.buf, 1, &qty, 0, sa6, &o.o, nil) + n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + err = windows.WSASendtoInet6(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa6, &o.o, nil) return qty, err }) ntotal += int(n) @@ -1264,14 +1286,12 @@ func (fd *FD) RawRead(f func(uintptr) bool) error { // Use a zero-byte read as a way to get notified when this // socket is readable. h/t https://stackoverflow.com/a/42019668/332798 - o := &fd.rop - o.InitBuf(nil) - _, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + _, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { var flags uint32 if !fd.IsStream { flags |= windows.MSG_PEEK } - err = syscall.WSARecv(fd.Sysfd, &o.buf, 1, &qty, &flags, &o.o, nil) + err = syscall.WSARecv(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, &flags, &o.o, nil) return qty, err }) if err == windows.WSAEMSGSIZE { diff --git a/src/net/tcpsock_test.go b/src/net/tcpsock_test.go index 9ed49a925b4b39..085989c7499b60 100644 --- a/src/net/tcpsock_test.go +++ b/src/net/tcpsock_test.go @@ -475,6 +475,9 @@ func TestTCPReadWriteAllocs(t *testing.T) { t.Skipf("not supported on %s", runtime.GOOS) } + // Optimizations are required to remove the allocs. + testenv.SkipIfOptimizationOff(t) + ln, err := Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) @@ -509,7 +512,7 @@ func TestTCPReadWriteAllocs(t *testing.T) { } }) if allocs > 0 { - t.Fatalf("got %v; want 0", allocs) + t.Errorf("got %v; want 0", allocs) } var bufwrt [128]byte @@ -531,7 +534,7 @@ func TestTCPReadWriteAllocs(t *testing.T) { } }) if allocs > 0 { - t.Fatalf("got %v; want 0", allocs) + t.Errorf("got %v; want 0", allocs) } } From db3cb3fd9a09355a2f239dcb28c480b18bfa7f5e Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Sun, 28 Sep 2025 21:22:09 -0700 Subject: [PATCH 011/152] runtime: correct reference to getStackMap in comment Change-Id: I9b1fa390434dbda7d49a36b0114c68f942c11d3f Reviewed-on: https://go-review.googlesource.com/c/go/+/707575 Auto-Submit: Ian Lance Taylor Reviewed-by: Michael Knyszek Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI --- src/runtime/traceback.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 949d48c79a6df5..53f41bca0b11ff 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -429,7 +429,7 @@ func (u *unwinder) resolveInternal(innermost, isSyscall bool) { // gp._defer for a defer corresponding to this function, but that // is hard to do with defer records on the stack during a stack copy.) // Note: the +1 is to offset the -1 that - // stack.go:getStackMap does to back up a return + // (*stkframe).getStackMap does to back up a return // address make sure the pc is in the CALL instruction. } else { frame.continpc = 0 From db4fade7599d49dc85a7ef670499be0ccd62c58e Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Mon, 22 Sep 2025 14:05:23 +0200 Subject: [PATCH 012/152] crypto/internal/fips140/mlkem: make CAST conditional MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It taks north of 130µs on my machine, which is enough to be worth shaving off at init time. Change-Id: I6a6a696463de228bc3e7b9ca10c12ddbaabdf384 Reviewed-on: https://go-review.googlesource.com/c/go/+/707695 Auto-Submit: Filippo Valsorda Reviewed-by: Daniel McCarney Reviewed-by: Roland Shoemaker LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee --- src/crypto/internal/fips140/mlkem/cast.go | 13 ++++++------ .../internal/fips140/mlkem/mlkem1024.go | 5 +++++ src/crypto/internal/fips140/mlkem/mlkem768.go | 5 +++++ src/crypto/internal/fips140test/cast_test.go | 21 ++++++++++++++++--- 4 files changed, 34 insertions(+), 10 deletions(-) diff --git a/src/crypto/internal/fips140/mlkem/cast.go b/src/crypto/internal/fips140/mlkem/cast.go index a432d1fdab0e2b..ea089c1b76c0c0 100644 --- a/src/crypto/internal/fips140/mlkem/cast.go +++ b/src/crypto/internal/fips140/mlkem/cast.go @@ -9,9 +9,10 @@ import ( "crypto/internal/fips140" _ "crypto/internal/fips140/check" "errors" + "sync" ) -func init() { +var fipsSelfTest = sync.OnceFunc(func() { fips140.CAST("ML-KEM-768", func() error { var d = &[32]byte{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, @@ -40,14 +41,12 @@ func init() { dk := &DecapsulationKey768{} kemKeyGen(dk, d, z) ek := dk.EncapsulationKey() - Ke, c := ek.EncapsulateInternal(m) - Kd, err := dk.Decapsulate(c) - if err != nil { - return err - } + var cc [CiphertextSize768]byte + Ke, _ := kemEncaps(&cc, ek, m) + Kd := kemDecaps(dk, &cc) if !bytes.Equal(Ke, K) || !bytes.Equal(Kd, K) { return errors.New("unexpected result") } return nil }) -} +}) diff --git a/src/crypto/internal/fips140/mlkem/mlkem1024.go b/src/crypto/internal/fips140/mlkem/mlkem1024.go index 1419cf20fa9c67..edde161422cb6f 100644 --- a/src/crypto/internal/fips140/mlkem/mlkem1024.go +++ b/src/crypto/internal/fips140/mlkem/mlkem1024.go @@ -113,6 +113,7 @@ func GenerateKey1024() (*DecapsulationKey1024, error) { } func generateKey1024(dk *DecapsulationKey1024) (*DecapsulationKey1024, error) { + fipsSelfTest() var d [32]byte drbg.Read(d[:]) var z [32]byte @@ -126,6 +127,7 @@ func generateKey1024(dk *DecapsulationKey1024) (*DecapsulationKey1024, error) { // GenerateKeyInternal1024 is a derandomized version of GenerateKey1024, // exclusively for use in tests. func GenerateKeyInternal1024(d, z *[32]byte) *DecapsulationKey1024 { + fipsSelfTest() dk := &DecapsulationKey1024{} kemKeyGen1024(dk, d, z) return dk @@ -278,6 +280,7 @@ func (ek *EncapsulationKey1024) Encapsulate() (sharedKey, ciphertext []byte) { } func (ek *EncapsulationKey1024) encapsulate(cc *[CiphertextSize1024]byte) (sharedKey, ciphertext []byte) { + fipsSelfTest() var m [messageSize]byte drbg.Read(m[:]) // Note that the modulus check (step 2 of the encapsulation key check from @@ -289,6 +292,7 @@ func (ek *EncapsulationKey1024) encapsulate(cc *[CiphertextSize1024]byte) (share // EncapsulateInternal is a derandomized version of Encapsulate, exclusively for // use in tests. func (ek *EncapsulationKey1024) EncapsulateInternal(m *[32]byte) (sharedKey, ciphertext []byte) { + fipsSelfTest() cc := &[CiphertextSize1024]byte{} return kemEncaps1024(cc, ek, m) } @@ -394,6 +398,7 @@ func pkeEncrypt1024(cc *[CiphertextSize1024]byte, ex *encryptionKey1024, m *[mes // // The shared key must be kept secret. func (dk *DecapsulationKey1024) Decapsulate(ciphertext []byte) (sharedKey []byte, err error) { + fipsSelfTest() if len(ciphertext) != CiphertextSize1024 { return nil, errors.New("mlkem: invalid ciphertext length") } diff --git a/src/crypto/internal/fips140/mlkem/mlkem768.go b/src/crypto/internal/fips140/mlkem/mlkem768.go index 298660e4e977dd..088c2954de6a5c 100644 --- a/src/crypto/internal/fips140/mlkem/mlkem768.go +++ b/src/crypto/internal/fips140/mlkem/mlkem768.go @@ -172,6 +172,7 @@ func GenerateKey768() (*DecapsulationKey768, error) { } func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) { + fipsSelfTest() var d [32]byte drbg.Read(d[:]) var z [32]byte @@ -185,6 +186,7 @@ func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) { // GenerateKeyInternal768 is a derandomized version of GenerateKey768, // exclusively for use in tests. func GenerateKeyInternal768(d, z *[32]byte) *DecapsulationKey768 { + fipsSelfTest() dk := &DecapsulationKey768{} kemKeyGen(dk, d, z) return dk @@ -337,6 +339,7 @@ func (ek *EncapsulationKey768) Encapsulate() (sharedKey, ciphertext []byte) { } func (ek *EncapsulationKey768) encapsulate(cc *[CiphertextSize768]byte) (sharedKey, ciphertext []byte) { + fipsSelfTest() var m [messageSize]byte drbg.Read(m[:]) // Note that the modulus check (step 2 of the encapsulation key check from @@ -348,6 +351,7 @@ func (ek *EncapsulationKey768) encapsulate(cc *[CiphertextSize768]byte) (sharedK // EncapsulateInternal is a derandomized version of Encapsulate, exclusively for // use in tests. func (ek *EncapsulationKey768) EncapsulateInternal(m *[32]byte) (sharedKey, ciphertext []byte) { + fipsSelfTest() cc := &[CiphertextSize768]byte{} return kemEncaps(cc, ek, m) } @@ -453,6 +457,7 @@ func pkeEncrypt(cc *[CiphertextSize768]byte, ex *encryptionKey, m *[messageSize] // // The shared key must be kept secret. func (dk *DecapsulationKey768) Decapsulate(ciphertext []byte) (sharedKey []byte, err error) { + fipsSelfTest() if len(ciphertext) != CiphertextSize768 { return nil, errors.New("mlkem: invalid ciphertext length") } diff --git a/src/crypto/internal/fips140test/cast_test.go b/src/crypto/internal/fips140test/cast_test.go index b043a71f04effa..5bbc964b617b2b 100644 --- a/src/crypto/internal/fips140test/cast_test.go +++ b/src/crypto/internal/fips140test/cast_test.go @@ -48,8 +48,8 @@ var allCASTs = []string{ "HKDF-SHA2-256", "HMAC-SHA2-256", "KAS-ECC-SSC P-256", - "ML-KEM PCT", - "ML-KEM PCT", + "ML-KEM PCT", // -768 + "ML-KEM PCT", // -1024 "ML-KEM-768", "PBKDF2", "RSA sign and verify PCT", @@ -104,29 +104,44 @@ func TestAllCASTs(t *testing.T) { // TestConditionals causes the conditional CASTs and PCTs to be invoked. func TestConditionals(t *testing.T) { - mlkem.GenerateKey768() + // ML-KEM PCT + kMLKEM, err := mlkem.GenerateKey768() + if err != nil { + t.Error(err) + } else { + // ML-KEM-768 + kMLKEM.EncapsulationKey().Encapsulate() + } + // ECDH PCT kDH, err := ecdh.GenerateKey(ecdh.P256(), rand.Reader) if err != nil { t.Error(err) } else { + // KAS-ECC-SSC P-256 ecdh.ECDH(ecdh.P256(), kDH, kDH.PublicKey()) } + // ECDSA PCT kDSA, err := ecdsa.GenerateKey(ecdsa.P256(), rand.Reader) if err != nil { t.Error(err) } else { + // ECDSA P-256 SHA2-512 sign and verify ecdsa.SignDeterministic(ecdsa.P256(), sha256.New, kDSA, make([]byte, 32)) } + // Ed25519 sign and verify PCT k25519, err := ed25519.GenerateKey() if err != nil { t.Error(err) } else { + // Ed25519 sign and verify ed25519.Sign(k25519, make([]byte, 32)) } + // RSA sign and verify PCT kRSA, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Error(err) } else { + // RSASSA-PKCS-v1.5 2048-bit sign and verify rsa.SignPKCS1v15(kRSA, crypto.SHA256.String(), make([]byte, 32)) } t.Log("completed successfully") From fc88e18b4a781a8751799a123cdac8b29a92409d Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 11 Sep 2025 21:04:05 +0200 Subject: [PATCH 013/152] crypto/internal/fips140/entropy: add CPU jitter-based entropy source Heavily inspired by the BoringSSL implementation. Change-Id: I6a6a6964b22826d54700c8b3d555054163cef5fe Co-authored-by: Daniel Morsing Cq-Include-Trybots: luci.golang.try:gotip-linux-s390x,gotip-linux-ppc64_power10,gotip-linux-ppc64le_power10,gotip-linux-ppc64le_power8,gotip-linux-arm,gotip-darwin-arm64_15,gotip-windows-arm64,gotip-freebsd-amd64 Reviewed-on: https://go-review.googlesource.com/c/go/+/703015 LUCI-TryBot-Result: Go LUCI Reviewed-by: Roland Shoemaker Reviewed-by: Daniel McCarney Auto-Submit: Filippo Valsorda Reviewed-by: Cherry Mui --- src/crypto/internal/entropy/entropy.go | 6 +- src/crypto/internal/fips140/drbg/rand.go | 61 +++- .../internal/fips140/entropy/entropy.go | 202 ++++++++++++++ src/crypto/internal/fips140/entropy/sha384.go | 191 +++++++++++++ src/crypto/internal/fips140/fips140.go | 2 + .../internal/fips140deps/fipsdeps_test.go | 4 +- src/crypto/internal/fips140deps/time/time.go | 21 ++ .../internal/fips140deps/time/time_windows.go | 17 ++ .../internal/fips140test/entropy_test.go | 264 ++++++++++++++++++ src/go/build/deps_test.go | 5 + 10 files changed, 758 insertions(+), 15 deletions(-) create mode 100644 src/crypto/internal/fips140/entropy/entropy.go create mode 100644 src/crypto/internal/fips140/entropy/sha384.go create mode 100644 src/crypto/internal/fips140deps/time/time.go create mode 100644 src/crypto/internal/fips140deps/time/time_windows.go create mode 100644 src/crypto/internal/fips140test/entropy_test.go diff --git a/src/crypto/internal/entropy/entropy.go b/src/crypto/internal/entropy/entropy.go index 5319e9e47a7455..73fd5298007a11 100644 --- a/src/crypto/internal/entropy/entropy.go +++ b/src/crypto/internal/entropy/entropy.go @@ -3,9 +3,11 @@ // license that can be found in the LICENSE file. // Package entropy provides the passive entropy source for the FIPS 140-3 -// module. It is only used in FIPS mode by [crypto/internal/fips140/drbg.Read]. +// module. It is only used in FIPS mode by [crypto/internal/fips140/drbg.Read] +// from the FIPS 140-3 Go Cryptographic Module v1.0.0. Later versions of the +// module have an internal CPU jitter-based entropy source. // -// This complies with IG 9.3.A, Additional Comment 12, which until January 1, +// This complied with IG 9.3.A, Additional Comment 12, which until January 1, // 2026 allows new modules to meet an [earlier version] of Resolution 2(b): // "A software module that contains an approved DRBG that receives a LOAD // command (or its logical equivalent) with entropy obtained from [...] inside diff --git a/src/crypto/internal/fips140/drbg/rand.go b/src/crypto/internal/fips140/drbg/rand.go index c1a3ea0ae658ff..3ccb018e326047 100644 --- a/src/crypto/internal/fips140/drbg/rand.go +++ b/src/crypto/internal/fips140/drbg/rand.go @@ -9,21 +9,53 @@ package drbg import ( - "crypto/internal/entropy" "crypto/internal/fips140" + "crypto/internal/fips140/entropy" "crypto/internal/randutil" "crypto/internal/sysrand" "io" "sync" + "sync/atomic" ) -var drbgs = sync.Pool{ +// memory is a scratch buffer that is accessed between samples by the entropy +// source to expose it to memory access timings. +// +// We reuse it and share it between Seed calls to avoid the significant (~500µs) +// cost of zeroing a new allocation every time. The entropy source accesses it +// using atomics (and doesn't care about its contents). +// +// It should end up in the .noptrbss section, and become backed by physical pages +// at first use. This ensures that programs that do not use the FIPS 140-3 module +// do not incur any memory use or initialization penalties. +var memory entropy.ScratchBuffer + +func getEntropy() *[SeedSize]byte { + var retries int + seed, err := entropy.Seed(&memory) + for err != nil { + // The CPU jitter-based SP 800-90B entropy source has a non-negligible + // chance of failing the startup health tests. + // + // Each time it does, it enters a permanent failure state, and we + // restart it anew. This is not expected to happen more than a few times + // in a row. + if retries++; retries > 100 { + panic("fips140/drbg: failed to obtain initial entropy") + } + seed, err = entropy.Seed(&memory) + } + return &seed +} + +// getEntropy is very slow (~500µs), so we don't want it on the hot path. +// We keep both a persistent DRBG instance and a pool of additional instances. +// Occasional uses will use drbgInstance, even if the pool was emptied since the +// last use. Frequent concurrent uses will fill the pool and use it. +var drbgInstance atomic.Pointer[Counter] +var drbgPool = sync.Pool{ New: func() any { - var c *Counter - entropy.Depleted(func(seed *[48]byte) { - c = NewCounter(seed) - }) - return c + return NewCounter(getEntropy()) }, } @@ -44,8 +76,15 @@ func Read(b []byte) { additionalInput := new([SeedSize]byte) sysrand.Read(additionalInput[:16]) - drbg := drbgs.Get().(*Counter) - defer drbgs.Put(drbg) + drbg := drbgInstance.Swap(nil) + if drbg == nil { + drbg = drbgPool.Get().(*Counter) + } + defer func() { + if !drbgInstance.CompareAndSwap(nil, drbg) { + drbgPool.Put(drbg) + } + }() for len(b) > 0 { size := min(len(b), maxRequestSize) @@ -54,9 +93,7 @@ func Read(b []byte) { // Section 9.3.2: if Generate reports a reseed is required, the // additional input is passed to Reseed along with the entropy and // then nulled before the next Generate call. - entropy.Depleted(func(seed *[48]byte) { - drbg.Reseed(seed, additionalInput) - }) + drbg.Reseed(getEntropy(), additionalInput) additionalInput = nil continue } diff --git a/src/crypto/internal/fips140/entropy/entropy.go b/src/crypto/internal/fips140/entropy/entropy.go new file mode 100644 index 00000000000000..273f05c817aff8 --- /dev/null +++ b/src/crypto/internal/fips140/entropy/entropy.go @@ -0,0 +1,202 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package entropy implements a CPU jitter-based SP 800-90B entropy source. +package entropy + +import ( + "crypto/internal/fips140deps/time" + "errors" + "sync/atomic" + "unsafe" +) + +// Version returns the version of the entropy source. +// +// This is independent of the FIPS 140-3 module version, in order to reuse the +// ESV certificate across module versions. +func Version() string { + return "v1.0.0" +} + +// ScratchBuffer is a large buffer that will be written to using atomics, to +// generate noise from memory access timings. Its contents do not matter. +type ScratchBuffer [1 << 25]byte + +// Seed returns a 384-bit seed with full entropy. +// +// memory is passed in to allow changing the allocation strategy without +// modifying the frozen and certified entropy source in this package. +// +// Seed returns an error if the entropy source startup health tests fail, which +// has a non-negligible chance of happening. +func Seed(memory *ScratchBuffer) ([48]byte, error) { + // Collect w = 1024 samples, each certified to provide no less than h = 0.5 + // bits of entropy, for a total of hᵢₙ = w × h = 512 bits of entropy, over + // nᵢₙ = w × n = 8192 bits of input data. + var samples [1024]byte + if err := Samples(samples[:], memory); err != nil { + return [48]byte{}, err + } + + // Use a vetted unkeyed conditioning component, SHA-384, with nw = 384 and + // nₒᵤₜ = 384. Per the formula in SP 800-90B Section 3.1.5.1.2, the output + // entropy hₒᵤₜ is: + // + // sage: n_in = 8192 + // sage: n_out = 384 + // sage: nw = 384 + // sage: h_in = 512 + // sage: P_high = 2^(-h_in) + // sage: P_low = (1 - P_high) / (2^n_in - 1) + // sage: n = min(n_out, nw) + // sage: ψ = 2^(n_in - n) * P_low + P_high + // sage: U = 2^(n_in - n) + sqrt(2 * n * 2^(n_in - n) * ln(2)) + // sage: ω = U * P_low + // sage: h_out = -log(max(ψ, ω), 2) + // sage: h_out.n() + // 384.000000000000 + // + // According to Implementation Guidance D.K, Resolution 19, since + // + // - the conditioning component is vetted, + // - hᵢₙ = 512 ≥ nₒᵤₜ + 64 = 448, and + // - nₒᵤₜ ≤ security strength of SHA-384 = 384 (per SP 800-107 Rev. 1, Table 1), + // + // we can claim the output has full entropy. + return SHA384(&samples), nil +} + +// Samples starts a new entropy source, collects the requested number of +// samples, conducts startup health tests, and returns the samples or an error +// if the health tests fail. +// +// The health tests have a non-negligible chance of failing. +func Samples(samples []uint8, memory *ScratchBuffer) error { + if len(samples) < 1024 { + return errors.New("entropy: at least 1024 samples are required for startup health tests") + } + s := newSource(memory) + for range 4 { + // Warm up the source to avoid any initial bias. + _ = s.Sample() + } + for i := range samples { + samples[i] = s.Sample() + } + if err := RepetitionCountTest(samples); err != nil { + return err + } + if err := AdaptiveProportionTest(samples); err != nil { + return err + } + return nil +} + +type source struct { + memory *ScratchBuffer + lcgState uint32 + previous int64 +} + +func newSource(memory *ScratchBuffer) *source { + return &source{ + memory: memory, + lcgState: uint32(time.HighPrecisionNow()), + previous: time.HighPrecisionNow(), + } +} + +// touchMemory performs a write to memory at the given index. +// +// The memory slice is passed in and may be shared across sources e.g. to avoid +// the significant (~500µs) cost of zeroing a new allocation on every [Seed] call. +func touchMemory(memory *ScratchBuffer, idx uint32) { + idx = idx / 4 * 4 // align to 32 bits + u32 := (*uint32)(unsafe.Pointer(&memory[idx])) + last := atomic.LoadUint32(u32) + atomic.SwapUint32(u32, last+13) +} + +func (s *source) Sample() uint8 { + // Perform a few memory accesses in an unpredictable pattern to expose the + // next measurement to as much system noise as possible. + memory, lcgState := s.memory, s.lcgState + _ = memory[0] // hoist the nil check out of touchMemory + for range 64 { + lcgState = 1664525*lcgState + 1013904223 + // Discard the lower bits, which tend to fall into short cycles. + idx := (lcgState >> 6) & (1<<25 - 1) + touchMemory(memory, idx) + } + s.lcgState = lcgState + + t := time.HighPrecisionNow() + sample := t - s.previous + s.previous = t + + // Reduce the symbol space to 256 values, assuming most of the entropy is in + // the least-significant bits, which represent the highest-resolution timing + // differences. + return uint8(sample) +} + +// RepetitionCountTest implements the repetition count test from SP 800-90B +// Section 4.4.1. It returns an error if any symbol is repeated C = 41 or more +// times in a row. +// +// This C value is calculated from a target failure probability α = 2⁻²⁰ and a +// claimed min-entropy per symbol h = 0.5 bits, using the formula in SP 800-90B +// Section 4.4.1. +// +// sage: α = 2^-20 +// sage: H = 0.5 +// sage: 1 + ceil(-log(α, 2) / H) +// 41 +func RepetitionCountTest(samples []uint8) error { + x := samples[0] + count := 1 + for _, y := range samples[1:] { + if y == x { + count++ + if count >= 41 { + return errors.New("entropy: repetition count health test failed") + } + } else { + x = y + count = 1 + } + } + return nil +} + +// AdaptiveProportionTest implements the adaptive proportion test from SP 800-90B +// Section 4.4.2. It returns an error if any symbol appears C = 410 or more +// times in the last W = 512 samples. +// +// This C value is calculated from a target failure probability α = 2⁻²⁰, a +// window size W = 512, and a claimed min-entropy per symbol h = 0.5 bits, using +// the formula in SP 800-90B Section 4.4.2, equivalent to the Microsoft Excel +// formula 1+CRITBINOM(W, power(2,(−H)),1−α). +// +// sage: from scipy.stats import binom +// sage: α = 2^-20 +// sage: H = 0.5 +// sage: W = 512 +// sage: C = 1 + binom.ppf(1 - α, W, 2**(-H)) +// sage: ceil(C) +// 410 +func AdaptiveProportionTest(samples []uint8) error { + var counts [256]int + for i, x := range samples { + counts[x]++ + if i >= 512 { + counts[samples[i-512]]-- + } + if counts[x] >= 410 { + return errors.New("entropy: adaptive proportion health test failed") + } + } + return nil +} diff --git a/src/crypto/internal/fips140/entropy/sha384.go b/src/crypto/internal/fips140/entropy/sha384.go new file mode 100644 index 00000000000000..ec23cfc9ad3661 --- /dev/null +++ b/src/crypto/internal/fips140/entropy/sha384.go @@ -0,0 +1,191 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package entropy + +import "math/bits" + +// This file includes a SHA-384 implementation to insulate the entropy source +// from any changes in the FIPS 140-3 module's crypto/internal/fips140/sha512 +// package. We only support 1024-byte inputs. + +func SHA384(p *[1024]byte) [48]byte { + h := [8]uint64{ + 0xcbbb9d5dc1059ed8, + 0x629a292a367cd507, + 0x9159015a3070dd17, + 0x152fecd8f70e5939, + 0x67332667ffc00b31, + 0x8eb44a8768581511, + 0xdb0c2e0d64f98fa7, + 0x47b5481dbefa4fa4, + } + + sha384Block(&h, (*[128]byte)(p[0:128])) + sha384Block(&h, (*[128]byte)(p[128:256])) + sha384Block(&h, (*[128]byte)(p[256:384])) + sha384Block(&h, (*[128]byte)(p[384:512])) + sha384Block(&h, (*[128]byte)(p[512:640])) + sha384Block(&h, (*[128]byte)(p[640:768])) + sha384Block(&h, (*[128]byte)(p[768:896])) + sha384Block(&h, (*[128]byte)(p[896:1024])) + + var padlen [128]byte + padlen[0] = 0x80 + bePutUint64(padlen[112+8:], 1024*8) + sha384Block(&h, &padlen) + + var digest [48]byte + bePutUint64(digest[0:], h[0]) + bePutUint64(digest[8:], h[1]) + bePutUint64(digest[16:], h[2]) + bePutUint64(digest[24:], h[3]) + bePutUint64(digest[32:], h[4]) + bePutUint64(digest[40:], h[5]) + return digest +} + +var _K = [...]uint64{ + 0x428a2f98d728ae22, + 0x7137449123ef65cd, + 0xb5c0fbcfec4d3b2f, + 0xe9b5dba58189dbbc, + 0x3956c25bf348b538, + 0x59f111f1b605d019, + 0x923f82a4af194f9b, + 0xab1c5ed5da6d8118, + 0xd807aa98a3030242, + 0x12835b0145706fbe, + 0x243185be4ee4b28c, + 0x550c7dc3d5ffb4e2, + 0x72be5d74f27b896f, + 0x80deb1fe3b1696b1, + 0x9bdc06a725c71235, + 0xc19bf174cf692694, + 0xe49b69c19ef14ad2, + 0xefbe4786384f25e3, + 0x0fc19dc68b8cd5b5, + 0x240ca1cc77ac9c65, + 0x2de92c6f592b0275, + 0x4a7484aa6ea6e483, + 0x5cb0a9dcbd41fbd4, + 0x76f988da831153b5, + 0x983e5152ee66dfab, + 0xa831c66d2db43210, + 0xb00327c898fb213f, + 0xbf597fc7beef0ee4, + 0xc6e00bf33da88fc2, + 0xd5a79147930aa725, + 0x06ca6351e003826f, + 0x142929670a0e6e70, + 0x27b70a8546d22ffc, + 0x2e1b21385c26c926, + 0x4d2c6dfc5ac42aed, + 0x53380d139d95b3df, + 0x650a73548baf63de, + 0x766a0abb3c77b2a8, + 0x81c2c92e47edaee6, + 0x92722c851482353b, + 0xa2bfe8a14cf10364, + 0xa81a664bbc423001, + 0xc24b8b70d0f89791, + 0xc76c51a30654be30, + 0xd192e819d6ef5218, + 0xd69906245565a910, + 0xf40e35855771202a, + 0x106aa07032bbd1b8, + 0x19a4c116b8d2d0c8, + 0x1e376c085141ab53, + 0x2748774cdf8eeb99, + 0x34b0bcb5e19b48a8, + 0x391c0cb3c5c95a63, + 0x4ed8aa4ae3418acb, + 0x5b9cca4f7763e373, + 0x682e6ff3d6b2b8a3, + 0x748f82ee5defb2fc, + 0x78a5636f43172f60, + 0x84c87814a1f0ab72, + 0x8cc702081a6439ec, + 0x90befffa23631e28, + 0xa4506cebde82bde9, + 0xbef9a3f7b2c67915, + 0xc67178f2e372532b, + 0xca273eceea26619c, + 0xd186b8c721c0c207, + 0xeada7dd6cde0eb1e, + 0xf57d4f7fee6ed178, + 0x06f067aa72176fba, + 0x0a637dc5a2c898a6, + 0x113f9804bef90dae, + 0x1b710b35131c471b, + 0x28db77f523047d84, + 0x32caab7b40c72493, + 0x3c9ebe0a15c9bebc, + 0x431d67c49c100d4c, + 0x4cc5d4becb3e42b6, + 0x597f299cfc657e2a, + 0x5fcb6fab3ad6faec, + 0x6c44198c4a475817, +} + +func sha384Block(dh *[8]uint64, p *[128]byte) { + var w [80]uint64 + for i := range 80 { + if i < 16 { + w[i] = beUint64(p[i*8:]) + } else { + v1 := w[i-2] + t1 := bits.RotateLeft64(v1, -19) ^ bits.RotateLeft64(v1, -61) ^ (v1 >> 6) + v2 := w[i-15] + t2 := bits.RotateLeft64(v2, -1) ^ bits.RotateLeft64(v2, -8) ^ (v2 >> 7) + + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + } + + a, b, c, d, e, f, g, h := dh[0], dh[1], dh[2], dh[3], dh[4], dh[5], dh[6], dh[7] + + for i := range 80 { + t1 := h + (bits.RotateLeft64(e, -14) ^ bits.RotateLeft64(e, -18) ^ + bits.RotateLeft64(e, -41)) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + t2 := (bits.RotateLeft64(a, -28) ^ bits.RotateLeft64(a, -34) ^ + bits.RotateLeft64(a, -39)) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + dh[0] += a + dh[1] += b + dh[2] += c + dh[3] += d + dh[4] += e + dh[5] += f + dh[6] += g + dh[7] += h +} + +func beUint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func bePutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} diff --git a/src/crypto/internal/fips140/fips140.go b/src/crypto/internal/fips140/fips140.go index d03219b540e27f..76054b00684e2b 100644 --- a/src/crypto/internal/fips140/fips140.go +++ b/src/crypto/internal/fips140/fips140.go @@ -48,6 +48,8 @@ func Supported() error { } // See EnableFIPS in cmd/internal/obj/fips.go for commentary. + // Also, js/wasm and windows/386 don't have good enough timers + // for the CPU jitter entropy source. switch { case runtime.GOARCH == "wasm", runtime.GOOS == "windows" && runtime.GOARCH == "386", diff --git a/src/crypto/internal/fips140deps/fipsdeps_test.go b/src/crypto/internal/fips140deps/fipsdeps_test.go index 2c3bc8184e71bc..97552dc1ce10f1 100644 --- a/src/crypto/internal/fips140deps/fipsdeps_test.go +++ b/src/crypto/internal/fips140deps/fipsdeps_test.go @@ -88,7 +88,8 @@ func TestImports(t *testing.T) { } } - // Ensure that all packages except check and check's dependencies import check. + // Ensure that all packages except check, check's dependencies, and the + // entropy source (which is used only from .../fips140/drbg) import check. for pkg := range allPackages { switch pkg { case "crypto/internal/fips140/check": @@ -99,6 +100,7 @@ func TestImports(t *testing.T) { case "crypto/internal/fips140/sha3": case "crypto/internal/fips140/sha256": case "crypto/internal/fips140/sha512": + case "crypto/internal/fips140/entropy": default: if !importCheck[pkg] { t.Errorf("package %s does not import crypto/internal/fips140/check", pkg) diff --git a/src/crypto/internal/fips140deps/time/time.go b/src/crypto/internal/fips140deps/time/time.go new file mode 100644 index 00000000000000..eea37b772e4351 --- /dev/null +++ b/src/crypto/internal/fips140deps/time/time.go @@ -0,0 +1,21 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package time + +import "time" + +var start = time.Now() + +// HighPrecisionNow returns a high-resolution timestamp suitable for measuring +// small time differences. It uses the time package's monotonic clock. +// +// Its unit, epoch, and resolution are unspecified, and may change, but can be +// assumed to be sufficiently precise to measure time differences on the order +// of tens to hundreds of nanoseconds. +func HighPrecisionNow() int64 { + return int64(time.Since(start)) +} diff --git a/src/crypto/internal/fips140deps/time/time_windows.go b/src/crypto/internal/fips140deps/time/time_windows.go new file mode 100644 index 00000000000000..410ede4ee91705 --- /dev/null +++ b/src/crypto/internal/fips140deps/time/time_windows.go @@ -0,0 +1,17 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package time + +import "internal/syscall/windows" + +// HighPrecisionNow returns a high-resolution timestamp suitable for measuring +// small time differences. It uses Windows' QueryPerformanceCounter. +// +// Its unit, epoch, and resolution are unspecified, and may change, but can be +// assumed to be sufficiently precise to measure time differences on the order +// of tens to hundreds of nanoseconds. +func HighPrecisionNow() int64 { + return windows.QueryPerformanceCounter() +} diff --git a/src/crypto/internal/fips140test/entropy_test.go b/src/crypto/internal/fips140test/entropy_test.go new file mode 100644 index 00000000000000..76c24289520e17 --- /dev/null +++ b/src/crypto/internal/fips140test/entropy_test.go @@ -0,0 +1,264 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !fips140v1.0 + +package fipstest + +import ( + "bytes" + "crypto/internal/cryptotest" + "crypto/internal/fips140/drbg" + "crypto/internal/fips140/entropy" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "flag" + "fmt" + "internal/testenv" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +var flagEntropySamples = flag.String("entropy-samples", "", "store entropy samples with the provided `suffix`") +var flagNISTSP80090B = flag.Bool("nist-sp800-90b", false, "run NIST SP 800-90B tests (requires docker)") + +func TestEntropySamples(t *testing.T) { + cryptotest.MustSupportFIPS140(t) + + var seqSamples [1_000_000]uint8 + samplesOrTryAgain(t, seqSamples[:]) + seqSamplesName := fmt.Sprintf("entropy_samples_sequential_%s_%s_%s_%s_%s.bin", entropy.Version(), + runtime.GOOS, runtime.GOARCH, *flagEntropySamples, time.Now().Format("20060102T150405Z")) + if *flagEntropySamples != "" { + if err := os.WriteFile(seqSamplesName, seqSamples[:], 0644); err != nil { + t.Fatalf("failed to write samples to %q: %v", seqSamplesName, err) + } + t.Logf("wrote %s", seqSamplesName) + } + + var restartSamples [1000][1000]uint8 + for i := range restartSamples { + var samples [1024]uint8 + samplesOrTryAgain(t, samples[:]) + copy(restartSamples[i][:], samples[:]) + } + restartSamplesName := fmt.Sprintf("entropy_samples_restart_%s_%s_%s_%s_%s.bin", entropy.Version(), + runtime.GOOS, runtime.GOARCH, *flagEntropySamples, time.Now().Format("20060102T150405Z")) + if *flagEntropySamples != "" { + f, err := os.Create(restartSamplesName) + if err != nil { + t.Fatalf("failed to create %q: %v", restartSamplesName, err) + } + for i := range restartSamples { + if _, err := f.Write(restartSamples[i][:]); err != nil { + t.Fatalf("failed to write samples to %q: %v", restartSamplesName, err) + } + } + if err := f.Close(); err != nil { + t.Fatalf("failed to close %q: %v", restartSamplesName, err) + } + t.Logf("wrote %s", restartSamplesName) + } + + if *flagNISTSP80090B { + if *flagEntropySamples == "" { + t.Fatalf("-nist-sp800-90b requires -entropy-samples to be set too") + } + + // Check if the nist-sp800-90b docker image is already present, + // and build it otherwise. + if err := testenv.Command(t, + "docker", "image", "inspect", "nist-sp800-90b", + ).Run(); err != nil { + t.Logf("building nist-sp800-90b docker image") + dockerfile := filepath.Join(t.TempDir(), "Dockerfile.SP800-90B_EntropyAssessment") + if err := os.WriteFile(dockerfile, []byte(NISTSP80090BDockerfile), 0644); err != nil { + t.Fatalf("failed to write Dockerfile: %v", err) + } + out, err := testenv.Command(t, + "docker", "build", "-t", "nist-sp800-90b", "-f", dockerfile, "/var/empty", + ).CombinedOutput() + if err != nil { + t.Fatalf("failed to build nist-sp800-90b docker image: %v\n%s", err, out) + } + } + + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get current working directory: %v", err) + } + t.Logf("running ea_non_iid analysis") + out, err := testenv.Command(t, + "docker", "run", "--rm", "-v", fmt.Sprintf("%s:%s", pwd, pwd), "-w", pwd, + "nist-sp800-90b", "ea_non_iid", seqSamplesName, "8", + ).CombinedOutput() + if err != nil { + t.Fatalf("ea_non_iid failed: %v\n%s", err, out) + } + t.Logf("\n%s", out) + + H_I := string(out) + H_I = strings.TrimSpace(H_I[strings.LastIndexByte(H_I, ' ')+1:]) + t.Logf("running ea_restart analysis with H_I = %s", H_I) + out, err = testenv.Command(t, + "docker", "run", "--rm", "-v", fmt.Sprintf("%s:%s", pwd, pwd), "-w", pwd, + "nist-sp800-90b", "ea_restart", restartSamplesName, "8", H_I, + ).CombinedOutput() + if err != nil { + t.Fatalf("ea_restart failed: %v\n%s", err, out) + } + t.Logf("\n%s", out) + } +} + +var NISTSP80090BDockerfile = ` +FROM ubuntu:24.04 +RUN apt-get update && apt-get install -y build-essential git \ + libbz2-dev libdivsufsort-dev libjsoncpp-dev libgmp-dev libmpfr-dev libssl-dev \ + && rm -rf /var/lib/apt/lists/* +RUN git clone --depth 1 https://github.com/usnistgov/SP800-90B_EntropyAssessment.git +RUN cd SP800-90B_EntropyAssessment && git checkout 8924f158c97e7b805e0f95247403ad4c44b9cd6f +WORKDIR ./SP800-90B_EntropyAssessment/cpp/ +RUN make all +RUN cd selftest && ./selftest +RUN cp ea_non_iid ea_restart /usr/local/bin/ +` + +var memory entropy.ScratchBuffer + +// samplesOrTryAgain calls entropy.Samples up to 10 times until it succeeds. +// Samples has a non-negligible chance of failing the health tests, as required +// by SP 800-90B. +func samplesOrTryAgain(t *testing.T, samples []uint8) { + t.Helper() + for range 10 { + if err := entropy.Samples(samples, &memory); err != nil { + t.Logf("entropy.Samples() failed: %v", err) + continue + } + return + } + t.Fatal("entropy.Samples() failed 10 times in a row") +} + +func TestEntropySHA384(t *testing.T) { + var input [1024]uint8 + for i := range input { + input[i] = uint8(i) + } + want := sha512.Sum384(input[:]) + got := entropy.SHA384(&input) + if got != want { + t.Errorf("SHA384() = %x, want %x", got, want) + } +} + +func TestEntropyRepetitionCountTest(t *testing.T) { + good := bytes.Repeat(append(bytes.Repeat([]uint8{42}, 40), 1), 100) + if err := entropy.RepetitionCountTest(good); err != nil { + t.Errorf("RepetitionCountTest(good) = %v, want nil", err) + } + + bad := bytes.Repeat([]uint8{0}, 40) + bad = append(bad, bytes.Repeat([]uint8{1}, 40)...) + bad = append(bad, bytes.Repeat([]uint8{42}, 41)...) + bad = append(bad, bytes.Repeat([]uint8{2}, 40)...) + if err := entropy.RepetitionCountTest(bad); err == nil { + t.Error("RepetitionCountTest(bad) = nil, want error") + } + + bad = bytes.Repeat([]uint8{42}, 41) + if err := entropy.RepetitionCountTest(bad); err == nil { + t.Error("RepetitionCountTest(bad) = nil, want error") + } +} + +func TestEntropyAdaptiveProportionTest(t *testing.T) { + good := bytes.Repeat([]uint8{0}, 409) + good = append(good, bytes.Repeat([]uint8{1}, 512-409)...) + good = append(good, bytes.Repeat([]uint8{0}, 409)...) + if err := entropy.AdaptiveProportionTest(good); err != nil { + t.Errorf("AdaptiveProportionTest(good) = %v, want nil", err) + } + + // These fall out of the window. + bad := bytes.Repeat([]uint8{1}, 100) + bad = append(bad, bytes.Repeat([]uint8{1, 2, 3, 4, 5, 6}, 100)...) + // These are in the window. + bad = append(bad, bytes.Repeat([]uint8{42}, 410)...) + if err := entropy.AdaptiveProportionTest(bad[:len(bad)-1]); err != nil { + t.Errorf("AdaptiveProportionTest(bad[:len(bad)-1]) = %v, want nil", err) + } + if err := entropy.AdaptiveProportionTest(bad); err == nil { + t.Error("AdaptiveProportionTest(bad) = nil, want error") + } +} + +func TestEntropyUnchanged(t *testing.T) { + testenv.MustHaveSource(t) + + h := sha256.New() + root := os.DirFS("../fips140/entropy") + if err := fs.WalkDir(root, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + data, err := fs.ReadFile(root, path) + if err != nil { + return err + } + t.Logf("Hashing %s (%d bytes)", path, len(data)) + fmt.Fprintf(h, "%s %d\n", path, len(data)) + h.Write(data) + return nil + }); err != nil { + t.Fatalf("WalkDir: %v", err) + } + + // The crypto/internal/fips140/entropy package is certified as a FIPS 140-3 + // entropy source through the Entropy Source Validation program, + // independently of the FIPS 140-3 module. It must not change even across + // FIPS 140-3 module versions, in order to reuse the ESV certificate. + exp := "35976eb8a11678c79777da07aaab5511d4325701f837777df205f6e7b20c6821" + if got := hex.EncodeToString(h.Sum(nil)); got != exp { + t.Errorf("hash of crypto/internal/fips140/entropy = %s, want %s", got, exp) + } +} + +func TestEntropyRace(t *testing.T) { + // Check that concurrent calls to Seed don't trigger the race detector. + for range 2 { + go func() { + _, _ = entropy.Seed(&memory) + }() + } + // Same, with the higher-level DRBG. More concurrent calls to hit the Pool. + for range 16 { + go func() { + var b [64]byte + drbg.Read(b[:]) + }() + } +} + +var sink byte + +func BenchmarkEntropySeed(b *testing.B) { + for b.Loop() { + seed, err := entropy.Seed(&memory) + if err != nil { + b.Fatalf("entropy.Seed() failed: %v", err) + } + sink ^= seed[0] + } +} diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index d50a98d34c9092..c76b254b23ffc8 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -485,11 +485,16 @@ var depsRules = ` internal/byteorder < crypto/internal/fips140deps/byteorder; internal/cpu, internal/goarch < crypto/internal/fips140deps/cpu; internal/godebug < crypto/internal/fips140deps/godebug; + time, internal/syscall/windows < crypto/internal/fips140deps/time; + + crypto/internal/fips140deps/time, errors, math/bits, sync/atomic, unsafe + < crypto/internal/fips140/entropy; STR, hash, crypto/internal/impl, crypto/internal/entropy, crypto/internal/randutil, + crypto/internal/fips140/entropy, crypto/internal/fips140deps/byteorder, crypto/internal/fips140deps/cpu, crypto/internal/fips140deps/godebug From 75c87df58ebfb24592d7002ef71912f8708dc424 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Tue, 16 Sep 2025 10:52:49 +0200 Subject: [PATCH 014/152] internal/poll: pass the I/O mode instead of an overlapped object in execIO execIO callers should be agnostic to the fact that it uses an overlapped object. This will unlock future optimizations and simplifications. Change-Id: I0a58d992101fa74ac75e3538af04cbc44156f0d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/704175 Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/internal/poll/fd_windows.go | 64 ++++++++++++++------------- src/internal/poll/sendfile_windows.go | 8 +--- 2 files changed, 35 insertions(+), 37 deletions(-) diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index 9323e49eb745d7..a6ecdafc3404c5 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -250,12 +250,16 @@ func (fd *FD) cancelIO(o *operation) { // It supports both synchronous and asynchronous IO. // o.qty and o.flags are set to zero before calling submit // to avoid reusing the values from a previous call. -func (fd *FD) execIO(o *operation, submit func(o *operation) (uint32, error)) (int, error) { +func (fd *FD) execIO(mode int, submit func(o *operation) (uint32, error)) (int, error) { // Notify runtime netpoll about starting IO. - err := fd.pd.prepare(int(o.mode), fd.isFile) + err := fd.pd.prepare(mode, fd.isFile) if err != nil { return 0, err } + o := &fd.rop + if mode == 'w' { + o = &fd.wop + } // Start IO. if !fd.isBlocking && o.o.HEvent == 0 && !fd.pollable() { // If the handle is opened for overlapped IO but we can't @@ -552,7 +556,7 @@ func (fd *FD) Read(buf []byte) (int, error) { case kindConsole: n, err = fd.readConsole(buf) case kindFile, kindPipe: - n, err = fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err = fd.execIO('r', func(o *operation) (qty uint32, err error) { err = syscall.ReadFile(fd.Sysfd, buf, &qty, fd.overlapped(o)) return qty, err }) @@ -567,7 +571,7 @@ func (fd *FD) Read(buf []byte) (int, error) { } } case kindNet: - n, err = fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err = fd.execIO('r', func(o *operation) (qty uint32, err error) { var flags uint32 err = syscall.WSARecv(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, &o.o, nil) return qty, err @@ -694,7 +698,7 @@ func (fd *FD) Pread(buf []byte, off int64) (int, error) { defer fd.setOffset(curoffset) } fd.setOffset(off) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = syscall.ReadFile(fd.Sysfd, buf, &qty, &o.o) return qty, err }) @@ -727,7 +731,7 @@ func (fd *FD) ReadFrom(buf []byte) (int, syscall.Sockaddr, error) { rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) @@ -761,7 +765,7 @@ func (fd *FD) ReadFromInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) @@ -795,7 +799,7 @@ func (fd *FD) ReadFromInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) @@ -841,7 +845,7 @@ func (fd *FD) Write(buf []byte) (int, error) { case kindConsole: n, err = fd.writeConsole(b) case kindPipe, kindFile: - n, err = fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err = fd.execIO('w', func(o *operation) (qty uint32, err error) { err = syscall.WriteFile(fd.Sysfd, b, &qty, fd.overlapped(o)) return qty, err }) @@ -850,7 +854,7 @@ func (fd *FD) Write(buf []byte) (int, error) { if race.Enabled { race.ReleaseMerge(unsafe.Pointer(&ioSync)) } - n, err = fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err = fd.execIO('w', func(o *operation) (qty uint32, err error) { err = syscall.WSASend(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, &o.o, nil) return qty, err }) @@ -949,7 +953,7 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) { max = ntotal + maxRW } fd.setOffset(off + int64(ntotal)) - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = syscall.WriteFile(fd.Sysfd, buf[ntotal:max], &qty, &o.o) return qty, err }) @@ -979,7 +983,7 @@ func (fd *FD) Writev(buf *[][]byte) (int64, error) { } bufs := newWSABufs(buf) defer freeWSABufs(bufs) - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = syscall.WSASend(fd.Sysfd, &(*bufs)[0], uint32(len(*bufs)), &qty, 0, &o.o, nil) return qty, err }) @@ -997,7 +1001,7 @@ func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) { if len(buf) == 0 { // handle zero-byte payload - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = syscall.WSASendto(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa, &o.o, nil) return qty, err }) @@ -1015,7 +1019,7 @@ func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) { if len(b) > maxRW { b = b[:maxRW] } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = syscall.WSASendto(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa, &o.o, nil) return qty, err }) @@ -1037,7 +1041,7 @@ func (fd *FD) WriteToInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) if len(buf) == 0 { // handle zero-byte payload - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendtoInet4(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa4, &o.o, nil) return qty, err }) @@ -1055,7 +1059,7 @@ func (fd *FD) WriteToInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) if len(b) > maxRW { b = b[:maxRW] } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendtoInet4(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa4, &o.o, nil) return qty, err }) @@ -1077,7 +1081,7 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) if len(buf) == 0 { // handle zero-byte payload - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendtoInet6(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa6, &o.o, nil) return qty, err }) @@ -1095,7 +1099,7 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) if len(b) > maxRW { b = b[:maxRW] } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendtoInet6(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa6, &o.o, nil) return qty, err }) @@ -1112,17 +1116,16 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) // called when the descriptor is first created. This is here rather // than in the net package so that it can use fd.wop. func (fd *FD) ConnectEx(ra syscall.Sockaddr) error { - o := &fd.wop - _, err := fd.execIO(o, func(o *operation) (uint32, error) { + _, err := fd.execIO('w', func(o *operation) (uint32, error) { return 0, ConnectExFunc(fd.Sysfd, ra, nil, 0, nil, &o.o) }) return err } -func (fd *FD) acceptOne(s syscall.Handle, rawsa []syscall.RawSockaddrAny, o *operation) (string, error) { +func (fd *FD) acceptOne(s syscall.Handle, rawsa []syscall.RawSockaddrAny) (string, error) { // Submit accept request. rsan := uint32(unsafe.Sizeof(rawsa[0])) - _, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + _, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = AcceptFunc(fd.Sysfd, s, (*byte)(unsafe.Pointer(&rawsa[0])), 0, rsan, rsan, &qty, &o.o) return qty, err @@ -1150,7 +1153,6 @@ func (fd *FD) Accept(sysSocket func() (syscall.Handle, error)) (syscall.Handle, } defer fd.readUnlock() - o := &fd.rop var rawsa [2]syscall.RawSockaddrAny for { s, err := sysSocket() @@ -1158,7 +1160,7 @@ func (fd *FD) Accept(sysSocket func() (syscall.Handle, error)) (syscall.Handle, return syscall.InvalidHandle, nil, 0, "", err } - errcall, err := fd.acceptOne(s, rawsa[:], o) + errcall, err := fd.acceptOne(s, rawsa[:]) if err == nil { return s, rawsa[:], uint32(unsafe.Sizeof(rawsa[0])), "", nil } @@ -1286,7 +1288,7 @@ func (fd *FD) RawRead(f func(uintptr) bool) error { // Use a zero-byte read as a way to get notified when this // socket is readable. h/t https://stackoverflow.com/a/42019668/332798 - _, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + _, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { var flags uint32 if !fd.IsStream { flags |= windows.MSG_PEEK @@ -1381,7 +1383,7 @@ func (fd *FD) ReadMsg(p []byte, oob []byte, flags int) (int, int, int, syscall.S msg := newWSAMsg(p, oob, flags, true) defer freeWSAMsg(msg) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = windows.WSARecvMsg(fd.Sysfd, msg, &qty, &o.o, nil) return qty, err }) @@ -1406,7 +1408,7 @@ func (fd *FD) ReadMsgInet4(p []byte, oob []byte, flags int, sa4 *syscall.Sockadd msg := newWSAMsg(p, oob, flags, true) defer freeWSAMsg(msg) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = windows.WSARecvMsg(fd.Sysfd, msg, &qty, &o.o, nil) return qty, err }) @@ -1430,7 +1432,7 @@ func (fd *FD) ReadMsgInet6(p []byte, oob []byte, flags int, sa6 *syscall.Sockadd msg := newWSAMsg(p, oob, flags, true) defer freeWSAMsg(msg) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = windows.WSARecvMsg(fd.Sysfd, msg, &qty, &o.o, nil) return qty, err }) @@ -1461,7 +1463,7 @@ func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, err return 0, 0, err } } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendMsg(fd.Sysfd, msg, 0, nil, &o.o, nil) return qty, err }) @@ -1484,7 +1486,7 @@ func (fd *FD) WriteMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (in if sa != nil { msg.Namelen = sockaddrInet4ToRaw(msg.Name, sa) } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendMsg(fd.Sysfd, msg, 0, nil, &o.o, nil) return qty, err }) @@ -1507,7 +1509,7 @@ func (fd *FD) WriteMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (in if sa != nil { msg.Namelen = sockaddrInet6ToRaw(msg.Name, sa) } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendMsg(fd.Sysfd, msg, 0, nil, &o.o, nil) return qty, err }) diff --git a/src/internal/poll/sendfile_windows.go b/src/internal/poll/sendfile_windows.go index a052f4a1f82400..2bdfecf0134b62 100644 --- a/src/internal/poll/sendfile_windows.go +++ b/src/internal/poll/sendfile_windows.go @@ -62,18 +62,14 @@ func SendFile(fd *FD, src uintptr, size int64) (written int64, err error, handle // See https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-transmitfile const maxChunkSizePerCall = int64(0x7fffffff - 1) - o := &fd.wop for size > 0 { chunkSize := maxChunkSizePerCall if chunkSize > size { chunkSize = size } - off := startpos + written - o.o.Offset = uint32(off) - o.o.OffsetHigh = uint32(off >> 32) - - n, err := fd.execIO(o, func(o *operation) (uint32, error) { + fd.setOffset(startpos + written) + n, err := fd.execIO('w', func(o *operation) (uint32, error) { err := syscall.TransmitFile(fd.Sysfd, hsrc, uint32(chunkSize), 0, &o.o, nil, syscall.TF_WRITE_BEHIND) if err != nil { return 0, err From db10db6be361f4eaf5f81890e487a9cbf3ca5a53 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Tue, 16 Sep 2025 14:12:25 +0200 Subject: [PATCH 015/152] internal/poll: remove operation fields from FD Use a sync.Pool to reuse the overlapped object passed to the different Windows syscalls instead of keeping two of them in the FD struct. This reduces the size of the FD struct from 248 to 152 bytes. While here, pin the overlapped object for the duration of the overlapped IO operation to comply with the memory safety rules. Cq-Include-Trybots: luci.golang.try:gotip-windows-amd64-longtest,gotip-windows-amd64-race Change-Id: I0161d163f681fe94b822c0c885aaa42c449e5342 Reviewed-on: https://go-review.googlesource.com/c/go/+/704235 Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/internal/poll/fd_windows.go | 132 ++++++++++++++------------------ 1 file changed, 59 insertions(+), 73 deletions(-) diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index a6ecdafc3404c5..6443f6eb30b8a5 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -78,22 +78,6 @@ type operation struct { mode int32 } -func (o *operation) setEvent() { - h, err := windows.CreateEvent(nil, 0, 0, nil) - if err != nil { - // This shouldn't happen when all CreateEvent arguments are zero. - panic(err) - } - // Set the low bit so that the external IOCP doesn't receive the completion packet. - o.o.HEvent = h | 1 -} - -func (o *operation) close() { - if o.o.HEvent != 0 { - syscall.CloseHandle(o.o.HEvent) - } -} - func (fd *FD) overlapped(o *operation) *syscall.Overlapped { if fd.isBlocking { // Don't return the overlapped object if the file handle @@ -208,6 +192,12 @@ var wsaRsaPool = sync.Pool{ }, } +var operationPool = sync.Pool{ + New: func() any { + return new(operation) + }, +} + // waitIO waits for the IO operation o to complete. func (fd *FD) waitIO(o *operation) error { if fd.isBlocking { @@ -246,27 +236,57 @@ func (fd *FD) cancelIO(o *operation) { fd.pd.waitCanceled(int(o.mode)) } +// pin pins ptr for the duration of the IO operation. +// If fd is in blocking mode, pin does nothing. +func (fd *FD) pin(mode int, ptr any) { + if fd.isBlocking { + return + } + if mode == 'r' { + fd.readPinner.Pin(ptr) + } else { + fd.writePinner.Pin(ptr) + } +} + // execIO executes a single IO operation o. // It supports both synchronous and asynchronous IO. -// o.qty and o.flags are set to zero before calling submit -// to avoid reusing the values from a previous call. func (fd *FD) execIO(mode int, submit func(o *operation) (uint32, error)) (int, error) { + if mode == 'r' { + defer fd.readPinner.Unpin() + } else { + defer fd.writePinner.Unpin() + } // Notify runtime netpoll about starting IO. err := fd.pd.prepare(mode, fd.isFile) if err != nil { return 0, err } - o := &fd.rop - if mode == 'w' { - o = &fd.wop + o := operationPool.Get().(*operation) + defer operationPool.Put(o) + *o = operation{ + o: syscall.Overlapped{ + OffsetHigh: uint32(fd.offset >> 32), + Offset: uint32(fd.offset), + }, + runtimeCtx: fd.pd.runtimeCtx, + mode: int32(mode), } // Start IO. - if !fd.isBlocking && o.o.HEvent == 0 && !fd.pollable() { + if !fd.isBlocking && !fd.pollable() { // If the handle is opened for overlapped IO but we can't // use the runtime poller, then we need to use an // event to wait for the IO to complete. - o.setEvent() + h, err := windows.CreateEvent(nil, 0, 0, nil) + if err != nil { + // This shouldn't happen when all CreateEvent arguments are zero. + panic(err) + } + // Set the low bit so that the external IOCP doesn't receive the completion packet. + o.o.HEvent = h | 1 + defer syscall.CloseHandle(h) } + fd.pin(mode, o) qty, err := submit(o) var waitErr error // Blocking operations shouldn't return ERROR_IO_PENDING. @@ -321,11 +341,6 @@ type FD struct { // System file descriptor. Immutable until Close. Sysfd syscall.Handle - // Read operation. - rop operation - // Write operation. - wop operation - // I/O poller. pd pollDesc @@ -364,6 +379,8 @@ type FD struct { disassociated atomic.Bool + // readPinner and writePinner are automatically unpinned + // before execIO returns. readPinner runtime.Pinner writePinner runtime.Pinner } @@ -383,8 +400,6 @@ type FD struct { // using an external mechanism. func (fd *FD) setOffset(off int64) { fd.offset = off - fd.rop.o.OffsetHigh, fd.rop.o.Offset = uint32(off>>32), uint32(off) - fd.wop.o.OffsetHigh, fd.wop.o.Offset = uint32(off>>32), uint32(off) } // addOffset adds the given offset to the current offset. @@ -435,8 +450,6 @@ func (fd *FD) Init(net string, pollable bool) error { } fd.isFile = fd.kind != kindNet fd.isBlocking = !pollable - fd.rop.mode = 'r' - fd.wop.mode = 'w' // It is safe to add overlapped handles that also perform I/O // outside of the runtime poller. The runtime poller will ignore @@ -445,8 +458,6 @@ func (fd *FD) Init(net string, pollable bool) error { if err != nil { return err } - fd.rop.runtimeCtx = fd.pd.runtimeCtx - fd.wop.runtimeCtx = fd.pd.runtimeCtx if fd.kind != kindNet || socketCanUseSetFileCompletionNotificationModes { // Non-socket handles can use SetFileCompletionNotificationModes without problems. err := syscall.SetFileCompletionNotificationModes(fd.Sysfd, @@ -485,8 +496,6 @@ func (fd *FD) destroy() error { if fd.Sysfd == syscall.InvalidHandle { return syscall.EINVAL } - fd.rop.close() - fd.wop.close() // Poller may want to unregister fd in readiness notification mechanism, // so this must be executed before fd.CloseFunc. fd.pd.close() @@ -541,9 +550,8 @@ func (fd *FD) Read(buf []byte) (int, error) { defer fd.readUnlock() } - if len(buf) > 0 && !fd.isBlocking { - fd.readPinner.Pin(&buf[0]) - defer fd.readPinner.Unpin() + if len(buf) > 0 { + fd.pin('r', &buf[0]) } if len(buf) > maxRW { @@ -672,9 +680,8 @@ func (fd *FD) Pread(buf []byte, off int64) (int, error) { } defer fd.readWriteUnlock() - if len(buf) > 0 && !fd.isBlocking { - fd.readPinner.Pin(&buf[0]) - defer fd.readPinner.Unpin() + if len(buf) > 0 { + fd.pin('r', &buf[0]) } if len(buf) > maxRW { @@ -724,10 +731,7 @@ func (fd *FD) ReadFrom(buf []byte) (int, syscall.Sockaddr, error) { } defer fd.readUnlock() - if !fd.isBlocking { - fd.readPinner.Pin(&buf[0]) - defer fd.readPinner.Unpin() - } + fd.pin('r', &buf[0]) rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) @@ -758,10 +762,7 @@ func (fd *FD) ReadFromInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) } defer fd.readUnlock() - if !fd.isBlocking { - fd.readPinner.Pin(&buf[0]) - defer fd.readPinner.Unpin() - } + fd.pin('r', &buf[0]) rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) @@ -792,10 +793,7 @@ func (fd *FD) ReadFromInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) } defer fd.readUnlock() - if !fd.isBlocking { - fd.readPinner.Pin(&buf[0]) - defer fd.readPinner.Unpin() - } + fd.pin('r', &buf[0]) rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) @@ -827,11 +825,9 @@ func (fd *FD) Write(buf []byte) (int, error) { defer fd.writeUnlock() } - if len(buf) > 0 && !fd.isBlocking { - fd.writePinner.Pin(&buf[0]) - defer fd.writePinner.Unpin() + if len(buf) > 0 { + fd.pin('w', &buf[0]) } - var ntotal int for { max := len(buf) @@ -924,9 +920,8 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) { } defer fd.readWriteUnlock() - if len(buf) > 0 && !fd.isBlocking { - fd.writePinner.Pin(&buf[0]) - defer fd.writePinner.Unpin() + if len(buf) > 0 { + fd.pin('w', &buf[0]) } if fd.isBlocking { @@ -1008,10 +1003,7 @@ func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) { return n, err } - if !fd.isBlocking { - fd.writePinner.Pin(&buf[0]) - defer fd.writePinner.Unpin() - } + fd.pin('w', &buf[0]) ntotal := 0 for len(buf) > 0 { @@ -1048,10 +1040,7 @@ func (fd *FD) WriteToInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) return n, err } - if !fd.isBlocking { - fd.writePinner.Pin(&buf[0]) - defer fd.writePinner.Unpin() - } + fd.pin('w', &buf[0]) ntotal := 0 for len(buf) > 0 { @@ -1088,10 +1077,7 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) return n, err } - if !fd.isBlocking { - fd.writePinner.Pin(&buf[0]) - defer fd.writePinner.Unpin() - } + fd.pin('w', &buf[0]) ntotal := 0 for len(buf) > 0 { From 742f92063e5acc3671a0bd9982e7678d864008f0 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 29 Sep 2025 16:14:24 -0400 Subject: [PATCH 016/152] cmd/compile, runtime: always enable Wasm signext and satconv features These features have been standardized since at least Wasm 2.0. Always enable them. The corresponding GOWASM settings are now no-op. Change-Id: I0e59f21696a69a4e289127988aad629a720b002b Reviewed-on: https://go-review.googlesource.com/c/go/+/707855 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/_gen/Wasm.rules | 9 +- src/cmd/compile/internal/ssa/rewriteWasm.go | 109 ------------------- src/cmd/compile/internal/wasm/ssa.go | 21 +--- src/internal/buildcfg/cfg.go | 25 ++--- src/runtime/conv_wasm_test.go | 45 ++++---- src/runtime/sys_wasm.s | 58 ---------- 6 files changed, 38 insertions(+), 229 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules index f3bd8d8b4f18f1..f632a01109f764 100644 --- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules @@ -55,12 +55,9 @@ (ZeroExt32to64 x:(I64Load32U _ _)) => x (ZeroExt16to(64|32) x:(I64Load16U _ _)) => x (ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x -(SignExt32to64 x) && buildcfg.GOWASM.SignExt => (I64Extend32S x) -(SignExt8to(64|32|16) x) && buildcfg.GOWASM.SignExt => (I64Extend8S x) -(SignExt16to(64|32) x) && buildcfg.GOWASM.SignExt => (I64Extend16S x) -(SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32])) -(SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) -(SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) +(SignExt32to64 x) => (I64Extend32S x) +(SignExt8to(64|32|16) x) => (I64Extend8S x) +(SignExt16to(64|32) x) => (I64Extend16S x) (ZeroExt32to64 x) => (I64And x (I64Const [0xffffffff])) (ZeroExt16to(64|32) x) => (I64And x (I64Const [0xffff])) (ZeroExt8to(64|32|16) x) => (I64And x (I64Const [0xff])) diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index c3c5528aaa6ab4..a164a6eee555b9 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -2,7 +2,6 @@ package ssa -import "internal/buildcfg" import "math" import "cmd/compile/internal/types" @@ -3202,8 +3201,6 @@ func rewriteValueWasm_OpRsh8x8(v *Value) bool { } func rewriteValueWasm_OpSignExt16to32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt16to32 x:(I64Load16S _ _)) // result: x for { @@ -3215,34 +3212,16 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool { return true } // match: (SignExt16to32 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend16S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend16S) v.AddArg(x) return true } - // match: (SignExt16to32 x) - // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(48) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt16to64(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt16to64 x:(I64Load16S _ _)) // result: x for { @@ -3254,34 +3233,16 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool { return true } // match: (SignExt16to64 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend16S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend16S) v.AddArg(x) return true } - // match: (SignExt16to64 x) - // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(48) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt32to64(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt32to64 x:(I64Load32S _ _)) // result: x for { @@ -3293,34 +3254,16 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool { return true } // match: (SignExt32to64 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend32S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend32S) v.AddArg(x) return true } - // match: (SignExt32to64 x) - // result: (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(32) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt8to16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt8to16 x:(I64Load8S _ _)) // result: x for { @@ -3332,34 +3275,16 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool { return true } // match: (SignExt8to16 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend8S) v.AddArg(x) return true } - // match: (SignExt8to16 x) - // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(56) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt8to32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt8to32 x:(I64Load8S _ _)) // result: x for { @@ -3371,34 +3296,16 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool { return true } // match: (SignExt8to32 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend8S) v.AddArg(x) return true } - // match: (SignExt8to32 x) - // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(56) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt8to64(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt8to64 x:(I64Load8S _ _)) // result: x for { @@ -3410,29 +3317,13 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool { return true } // match: (SignExt8to64 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend8S) v.AddArg(x) return true } - // match: (SignExt8to64 x) - // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(56) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSlicemask(v *Value) bool { v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index daee82f1fd7366..1e3b318e8c9fe0 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -14,7 +14,6 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/wasm" - "internal/buildcfg" ) /* @@ -425,27 +424,11 @@ func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) { case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S: getValue64(s, v.Args[0]) - if buildcfg.GOWASM.SatConv { - s.Prog(v.Op.Asm()) - } else { - if v.Op == ssa.OpWasmI64TruncSatF32S { - s.Prog(wasm.AF64PromoteF32) - } - p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS} - } + s.Prog(v.Op.Asm()) case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U: getValue64(s, v.Args[0]) - if buildcfg.GOWASM.SatConv { - s.Prog(v.Op.Asm()) - } else { - if v.Op == ssa.OpWasmI64TruncSatF32U { - s.Prog(wasm.AF64PromoteF32) - } - p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU} - } + s.Prog(v.Op.Asm()) case ssa.OpWasmF32DemoteF64: getValue64(s, v.Args[0]) diff --git a/src/internal/buildcfg/cfg.go b/src/internal/buildcfg/cfg.go index 9ab29568d22704..a75960b8e6c034 100644 --- a/src/internal/buildcfg/cfg.go +++ b/src/internal/buildcfg/cfg.go @@ -321,18 +321,13 @@ func goriscv64() int { } type gowasmFeatures struct { - SatConv bool - SignExt bool + // Legacy features, now always enabled + //SatConv bool + //SignExt bool } func (f gowasmFeatures) String() string { var flags []string - if f.SatConv { - flags = append(flags, "satconv") - } - if f.SignExt { - flags = append(flags, "signext") - } return strings.Join(flags, ",") } @@ -340,9 +335,9 @@ func gowasm() (f gowasmFeatures) { for opt := range strings.SplitSeq(envOr("GOWASM", ""), ",") { switch opt { case "satconv": - f.SatConv = true + // ignore, always enabled case "signext": - f.SignExt = true + // ignore, always enabled case "": // ignore default: @@ -452,12 +447,10 @@ func gogoarchTags() []string { return list case "wasm": var list []string - if GOWASM.SatConv { - list = append(list, GOARCH+".satconv") - } - if GOWASM.SignExt { - list = append(list, GOARCH+".signext") - } + // SatConv is always enabled + list = append(list, GOARCH+".satconv") + // SignExt is always enabled + list = append(list, GOARCH+".signext") return list } return nil diff --git a/src/runtime/conv_wasm_test.go b/src/runtime/conv_wasm_test.go index 5054fca04dc40a..3979a7b618028b 100644 --- a/src/runtime/conv_wasm_test.go +++ b/src/runtime/conv_wasm_test.go @@ -11,6 +11,8 @@ import ( var res int64 var ures uint64 +// TODO: This test probably should be in a different place. + func TestFloatTruncation(t *testing.T) { testdata := []struct { input float64 @@ -21,36 +23,37 @@ func TestFloatTruncation(t *testing.T) { // max +- 1 { input: 0x7fffffffffffffff, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0x8000000000000000, }, // For out-of-bounds conversion, the result is implementation-dependent. - // This test verifies the implementation of wasm architecture. + // This test verifies the implementation of wasm architecture, which is, + // saturating to the min/max value. { input: 0x8000000000000000, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0x8000000000000000, }, { input: 0x7ffffffffffffffe, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0x8000000000000000, }, // neg max +- 1 { input: -0x8000000000000000, convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, { input: -0x8000000000000001, convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, { input: -0x7fffffffffffffff, convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, // trunc point +- 1 { @@ -60,7 +63,7 @@ func TestFloatTruncation(t *testing.T) { }, { input: 0x7ffffffffffffe00, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0x8000000000000000, }, { @@ -72,48 +75,48 @@ func TestFloatTruncation(t *testing.T) { { input: -0x7ffffffffffffdff, convInt64: -0x7ffffffffffffc00, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, { input: -0x7ffffffffffffe00, convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, { input: -0x7ffffffffffffdfe, convInt64: -0x7ffffffffffffc00, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, // umax +- 1 { input: 0xffffffffffffffff, - convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convInt64: 0x7fffffffffffffff, + convUInt64: 0xffffffffffffffff, }, { input: 0x10000000000000000, - convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convInt64: 0x7fffffffffffffff, + convUInt64: 0xffffffffffffffff, }, { input: 0xfffffffffffffffe, - convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convInt64: 0x7fffffffffffffff, + convUInt64: 0xffffffffffffffff, }, // umax trunc +- 1 { input: 0xfffffffffffffbff, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0xfffffffffffff800, }, { input: 0xfffffffffffffc00, - convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convInt64: 0x7fffffffffffffff, + convUInt64: 0xffffffffffffffff, }, { input: 0xfffffffffffffbfe, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0xfffffffffffff800, }, } diff --git a/src/runtime/sys_wasm.s b/src/runtime/sys_wasm.s index b7965ec3fa4138..95c162eb857ac4 100644 --- a/src/runtime/sys_wasm.s +++ b/src/runtime/sys_wasm.s @@ -22,64 +22,6 @@ TEXT runtime·wasmDiv(SB), NOSPLIT, $0-0 I64DivS Return -TEXT runtime·wasmTruncS(SB), NOSPLIT, $0-0 - Get R0 - Get R0 - F64Ne // NaN - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - F64Const $0x7ffffffffffffc00p0 // Maximum truncated representation of 0x7fffffffffffffff - F64Gt - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - F64Const $-0x7ffffffffffffc00p0 // Minimum truncated representation of -0x8000000000000000 - F64Lt - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - I64TruncF64S - Return - -TEXT runtime·wasmTruncU(SB), NOSPLIT, $0-0 - Get R0 - Get R0 - F64Ne // NaN - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - F64Const $0xfffffffffffff800p0 // Maximum truncated representation of 0xffffffffffffffff - F64Gt - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - F64Const $0. - F64Lt - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - I64TruncF64U - Return - TEXT runtime·exitThread(SB), NOSPLIT, $0-0 UNDEF From 6e95748335bdd074c5b48fe9c3af4ced18c388dd Mon Sep 17 00:00:00 2001 From: qmuntal Date: Fri, 12 Sep 2025 16:23:40 +0200 Subject: [PATCH 017/152] cmd/link/internal/arm64: support Mach-O ARM64_RELOC_POINTER_TO_GOT in internal linking ARM64_RELOC_POINTER_TO_GOT is the arm64 version of X86_64_RELOC_GOT, which has been support for many years now. The standard library still doesn't need it, but I've found it necessary when statically linking against a library I own. Change-Id: I8eb7bf3c74aed663a1fc00b5dd986057130f7f7a Reviewed-on: https://go-review.googlesource.com/c/go/+/703315 LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee Reviewed-by: Cherry Mui --- src/cmd/link/internal/arm64/asm.go | 11 +++++++++++ src/cmd/link/internal/ld/macho.go | 1 + 2 files changed, 12 insertions(+) diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index 68474b4484f1af..b2572fd1040b0b 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -277,6 +277,17 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade su.SetRelocSym(rIdx, syms.GOT) su.SetRelocAdd(rIdx, int64(ldr.SymGot(targ))) return true + + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_POINTER_TO_GOT*2 + pcrel: + if targType != sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", ldr.SymName(targ)) + } + ld.AddGotSym(target, ldr, syms, targ, 0) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())+int64(ldr.SymGot(targ))) + return true } // Reread the reloc to incorporate any changes in type above. diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 431dad9d6bcbaa..6920f42015b344 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -111,6 +111,7 @@ const ( MACHO_ARM64_RELOC_PAGEOFF12 = 4 MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 = 5 MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6 + MACHO_ARM64_RELOC_POINTER_TO_GOT = 7 MACHO_ARM64_RELOC_ADDEND = 10 MACHO_GENERIC_RELOC_VANILLA = 0 MACHO_FAKE_GOTPCREL = 100 From 7c8166d02d36a5dfcdbe3dd1b148412cceacf9f2 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Fri, 12 Sep 2025 16:31:01 +0200 Subject: [PATCH 018/152] cmd/link/internal/arm64: support Mach-O ARM64_RELOC_SUBTRACTOR in internal linking ARM64_RELOC_SUBTRACTOR is the arm64 version of X86_64_RELOC_SUBTRACTOR, which has been recently implemented in CL 660715. The standard library still doesn't need it, but I've found it necessary when statically linking against a library I own. Change-Id: I138281b12f2304e3673f7dc92f7137e48bf68fdd Reviewed-on: https://go-review.googlesource.com/c/go/+/703316 Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/link/internal/arm64/asm.go | 22 ++++++++++++++++++++++ src/cmd/link/internal/ld/macho.go | 1 + 2 files changed, 23 insertions(+) diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index b2572fd1040b0b..8d8ea8ac542c50 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -224,6 +224,28 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade } return true + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_SUBTRACTOR*2: + // ARM64_RELOC_SUBTRACTOR must be followed by ARM64_RELOC_UNSIGNED. + // The pair of relocations resolves to the difference between two + // symbol addresses (each relocation specifies a symbol). + outer, off := ld.FoldSubSymbolOffset(ldr, targ) + if outer != s { + // TODO: support subtracted symbol in different section. + ldr.Errorf(s, "unsupported ARM64_RELOC_SUBTRACTOR reloc: target %s, outer %s", ldr.SymName(targ), ldr.SymName(outer)) + break + } + su := ldr.MakeSymbolUpdater(s) + relocs := su.Relocs() + if rIdx+1 >= relocs.Count() || relocs.At(rIdx+1).Type() != objabi.MachoRelocOffset+ld.MACHO_ARM64_RELOC_UNSIGNED*2 || relocs.At(rIdx+1).Off() != r.Off() { + ldr.Errorf(s, "unexpected ARM64_RELOC_SUBTRACTOR reloc, must be followed by ARM64_RELOC_UNSIGNED at same offset") + break + } + su.SetRelocType(rIdx+1, objabi.R_PCREL) + su.SetRelocAdd(rIdx+1, r.Add()+int64(r.Off())+int64(r.Siz())-off) + // Remove the other relocation + su.SetRelocSiz(rIdx, 0) + return true + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_BRANCH26*2 + pcrel: su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_CALLARM64) diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 6920f42015b344..c26263466616f5 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -106,6 +106,7 @@ const ( MACHO_ARM_RELOC_SECTDIFF = 2 MACHO_ARM_RELOC_BR24 = 5 MACHO_ARM64_RELOC_UNSIGNED = 0 + MACHO_ARM64_RELOC_SUBTRACTOR = 1 MACHO_ARM64_RELOC_BRANCH26 = 2 MACHO_ARM64_RELOC_PAGE21 = 3 MACHO_ARM64_RELOC_PAGEOFF12 = 4 From a846bb0aa523c8781248161b63bc2ab6a245cec1 Mon Sep 17 00:00:00 2001 From: Julien Cretel Date: Mon, 29 Sep 2025 16:57:53 +0000 Subject: [PATCH 019/152] errors: add AsType Fixes #51945 Change-Id: Icda169782e796578eba728938134a85b5827d3b6 GitHub-Last-Rev: c6ff335ee1ffb6b7975141795a4632a55247299d GitHub-Pull-Request: golang/go#75621 Reviewed-on: https://go-review.googlesource.com/c/go/+/707235 Reviewed-by: Carlos Amedee Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI Reviewed-by: Sean Liao --- api/next/51945.txt | 1 + doc/next/6-stdlib/99-minor/errors/51945.md | 2 + src/errors/errors.go | 10 +- src/errors/example_test.go | 12 ++ src/errors/wrap.go | 61 ++++++++++ src/errors/wrap_test.go | 126 +++++++++++++++++++++ 6 files changed, 207 insertions(+), 5 deletions(-) create mode 100644 api/next/51945.txt create mode 100644 doc/next/6-stdlib/99-minor/errors/51945.md diff --git a/api/next/51945.txt b/api/next/51945.txt new file mode 100644 index 00000000000000..7db1f093e5a2a5 --- /dev/null +++ b/api/next/51945.txt @@ -0,0 +1 @@ +pkg errors, func AsType[$0 error](error) ($0, bool) #51945 diff --git a/doc/next/6-stdlib/99-minor/errors/51945.md b/doc/next/6-stdlib/99-minor/errors/51945.md new file mode 100644 index 00000000000000..44ac7222e6d990 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/errors/51945.md @@ -0,0 +1,2 @@ +The new [AsType] function is a generic version of [As]. It is type-safe, faster, +and, in most cases, easier to use. diff --git a/src/errors/errors.go b/src/errors/errors.go index 5059be12ed441f..8b926cfe148c44 100644 --- a/src/errors/errors.go +++ b/src/errors/errors.go @@ -41,12 +41,12 @@ // // because the former will succeed if err wraps [io/fs.ErrExist]. // -// [As] examines the tree of its first argument looking for an error that can be -// assigned to its second argument, which must be a pointer. If it succeeds, it -// performs the assignment and returns true. Otherwise, it returns false. The form +// [AsType] examines the tree of its argument looking for an error whose +// type matches its type argument. If it succeeds, it returns the +// corresponding value of that type and true. Otherwise, it returns the +// zero value of that type and false. The form // -// var perr *fs.PathError -// if errors.As(err, &perr) { +// if perr, ok := errors.AsType[*fs.PathError](err); ok { // fmt.Println(perr.Path) // } // diff --git a/src/errors/example_test.go b/src/errors/example_test.go index 278df8c7da6e5a..92ef36b1010edb 100644 --- a/src/errors/example_test.go +++ b/src/errors/example_test.go @@ -102,6 +102,18 @@ func ExampleAs() { // Failed at path: non-existing } +func ExampleAsType() { + if _, err := os.Open("non-existing"); err != nil { + if pathError, ok := errors.AsType[*fs.PathError](err); ok { + fmt.Println("Failed at path:", pathError.Path) + } else { + fmt.Println(err) + } + } + // Output: + // Failed at path: non-existing +} + func ExampleUnwrap() { err1 := errors.New("error1") err2 := fmt.Errorf("error2: [%w]", err1) diff --git a/src/errors/wrap.go b/src/errors/wrap.go index eec9591dae7b93..2ebb951f1de93d 100644 --- a/src/errors/wrap.go +++ b/src/errors/wrap.go @@ -80,6 +80,10 @@ func is(err, target error, targetComparable bool) bool { // As finds the first error in err's tree that matches target, and if one is found, sets // target to that error value and returns true. Otherwise, it returns false. // +// For most uses, prefer [AsType]. As is equivalent to [AsType] but sets its target +// argument rather than returning the matching error and doesn't require its target +// argument to implement error. +// // The tree consists of err itself, followed by the errors obtained by repeatedly // calling its Unwrap() error or Unwrap() []error method. When err wraps multiple // errors, As examines err followed by a depth-first traversal of its children. @@ -145,3 +149,60 @@ func as(err error, target any, targetVal reflectlite.Value, targetType reflectli } var errorType = reflectlite.TypeOf((*error)(nil)).Elem() + +// AsType finds the first error in err's tree that matches the type E, and +// if one is found, returns that error value and true. Otherwise, it +// returns the zero value of E and false. +// +// The tree consists of err itself, followed by the errors obtained by +// repeatedly calling its Unwrap() error or Unwrap() []error method. When +// err wraps multiple errors, AsType examines err followed by a +// depth-first traversal of its children. +// +// An error err matches the type E if the type assertion err.(E) holds, +// or if the error has a method As(any) bool such that err.As(target) +// returns true when target is a non-nil *E. In the latter case, the As +// method is responsible for setting target. +func AsType[E error](err error) (E, bool) { + if err == nil { + var zero E + return zero, false + } + var pe *E // lazily initialized + return asType(err, &pe) +} + +func asType[E error](err error, ppe **E) (_ E, _ bool) { + for { + if e, ok := err.(E); ok { + return e, true + } + if x, ok := err.(interface{ As(any) bool }); ok { + if *ppe == nil { + *ppe = new(E) + } + if x.As(*ppe) { + return **ppe, true + } + } + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + if err == nil { + continue + } + if x, ok := asType(err, ppe); ok { + return x, true + } + } + return + default: + return + } + } +} diff --git a/src/errors/wrap_test.go b/src/errors/wrap_test.go index 58ed95fd9a0fc7..81c795a6bb8b18 100644 --- a/src/errors/wrap_test.go +++ b/src/errors/wrap_test.go @@ -239,6 +239,123 @@ func TestAsValidation(t *testing.T) { } } +func TestAsType(t *testing.T) { + var errT errorT + var errP *fs.PathError + type timeout interface { + Timeout() bool + error + } + _, errF := os.Open("non-existing") + poserErr := &poser{"oh no", nil} + + testAsType(t, + nil, + errP, + false, + ) + testAsType(t, + wrapped{"pitied the fool", errorT{"T"}}, + errorT{"T"}, + true, + ) + testAsType(t, + errF, + errF, + true, + ) + testAsType(t, + errT, + errP, + false, + ) + testAsType(t, + wrapped{"wrapped", nil}, + errT, + false, + ) + testAsType(t, + &poser{"error", nil}, + errorT{"poser"}, + true, + ) + testAsType(t, + &poser{"path", nil}, + poserPathErr, + true, + ) + testAsType(t, + poserErr, + poserErr, + true, + ) + testAsType(t, + errors.New("err"), + timeout(nil), + false, + ) + testAsType(t, + errF, + errF.(timeout), + true) + testAsType(t, + wrapped{"path error", errF}, + errF.(timeout), + true, + ) + testAsType(t, + multiErr{}, + errT, + false, + ) + testAsType(t, + multiErr{errors.New("a"), errorT{"T"}}, + errorT{"T"}, + true, + ) + testAsType(t, + multiErr{errorT{"T"}, errors.New("a")}, + errorT{"T"}, + true, + ) + testAsType(t, + multiErr{errorT{"a"}, errorT{"b"}}, + errorT{"a"}, + true, + ) + testAsType(t, + multiErr{multiErr{errors.New("a"), errorT{"a"}}, errorT{"b"}}, + errorT{"a"}, + true, + ) + testAsType(t, + multiErr{wrapped{"path error", errF}}, + errF.(timeout), + true, + ) + testAsType(t, + multiErr{nil}, + errT, + false, + ) +} + +type compError interface { + comparable + error +} + +func testAsType[E compError](t *testing.T, err error, want E, wantOK bool) { + t.Helper() + name := fmt.Sprintf("AsType[%T](Errorf(..., %v))", want, err) + t.Run(name, func(t *testing.T) { + got, gotOK := errors.AsType[E](err) + if gotOK != wantOK || got != want { + t.Fatalf("got %v, %t; want %v, %t", got, gotOK, want, wantOK) + } + }) +} + func BenchmarkIs(b *testing.B) { err1 := errors.New("1") err2 := multiErr{multiErr{multiErr{err1, errorT{"a"}}, errorT{"b"}}} @@ -260,6 +377,15 @@ func BenchmarkAs(b *testing.B) { } } +func BenchmarkAsType(b *testing.B) { + err := multiErr{multiErr{multiErr{errors.New("a"), errorT{"a"}}, errorT{"b"}}} + for range b.N { + if _, ok := errors.AsType[errorT](err); !ok { + b.Fatal("AsType failed") + } + } +} + func TestUnwrap(t *testing.T) { err1 := errors.New("1") erra := wrapped{"wrap 2", err1} From 300d9d2714164e455abc7990d52de9de6b084df1 Mon Sep 17 00:00:00 2001 From: Steve Muir Date: Thu, 18 Sep 2025 07:54:57 -0700 Subject: [PATCH 020/152] runtime: initialise debug settings much earlier in startup process This is necessary specifically to set the value of `debug.decoratemappings` sufficiently early in the startup sequence that all memory ranges allocated can be named appropriately using the new Linux-specific naming API introduced in #71546. Example output (on ARM64): https://gist.github.com/9muir/3667654b9c3f52e8be92756219371672 Fixes: #75324 Change-Id: Ic0b16233f54a45adef1660c4d0df59af2f5af86a Reviewed-on: https://go-review.googlesource.com/c/go/+/703476 Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek --- src/runtime/decoratemappings_test.go | 72 ++++++++++++++++++++++++++++ src/runtime/export_test.go | 4 ++ src/runtime/proc.go | 24 ++++++++-- src/runtime/runtime1.go | 15 +++--- src/runtime/set_vma_name_linux.go | 6 ++- src/runtime/set_vma_name_stub.go | 2 + 6 files changed, 109 insertions(+), 14 deletions(-) create mode 100644 src/runtime/decoratemappings_test.go diff --git a/src/runtime/decoratemappings_test.go b/src/runtime/decoratemappings_test.go new file mode 100644 index 00000000000000..7d1121c125df08 --- /dev/null +++ b/src/runtime/decoratemappings_test.go @@ -0,0 +1,72 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "os" + "regexp" + "runtime" + "testing" +) + +func validateMapLabels(t *testing.T, labels []string) { + // These are the specific region labels that need get added during the + // runtime phase. Hence they are the ones that need to be confirmed as + // present at the time the test reads its own region labels, which + // is sufficient to validate that the default `decoratemappings` value + // (enabled) was set early enough in the init process. + regions := map[string]bool{ + "allspans array": false, + "gc bits": false, + "heap": false, + "heap index": false, + "heap reservation": false, + "immortal metadata": false, + "page alloc": false, + "page alloc index": false, + "page summary": false, + "scavenge index": false, + } + for _, label := range labels { + if _, ok := regions[label]; !ok { + t.Logf("unexpected region label found: \"%s\"", label) + } + regions[label] = true + } + for label, found := range regions { + if !found { + t.Logf("region label missing: \"%s\"", label) + } + } +} + +func TestDecorateMappings(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("decoratemappings is only supported on Linux") + // /proc/self/maps is also Linux-specific + } + + var labels []string + if rawMaps, err := os.ReadFile("/proc/self/maps"); err != nil { + t.Fatalf("failed to read /proc/self/maps: %v", err) + } else { + t.Logf("maps:%s\n", string(rawMaps)) + matches := regexp.MustCompile("[^[]+ \\[anon: Go: (.+)\\]\n").FindAllSubmatch(rawMaps, -1) + for _, match_pair := range matches { + // match_pair consists of the matching substring and the parenthesized group + labels = append(labels, string(match_pair[1])) + } + } + t.Logf("DebugDecorateMappings: %v", *runtime.DebugDecorateMappings) + if *runtime.DebugDecorateMappings != 0 && runtime.SetVMANameSupported() { + validateMapLabels(t, labels) + } else { + if len(labels) > 0 { + t.Errorf("unexpected mapping labels present: %v", labels) + } else { + t.Skip("mapping labels absent as expected") + } + } +} diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 2a701115685411..9f2fcacc30ee5c 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1936,3 +1936,7 @@ func (t *TraceStackTable) Reset() { func TraceStack(gp *G, tab *TraceStackTable) { traceStack(0, gp, (*traceStackTable)(tab)) } + +var DebugDecorateMappings = &debug.decoratemappings + +func SetVMANameSupported() bool { return setVMANameSupported() } diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 2c42cad6c10e8a..4b1ab1af6a5075 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -789,7 +789,9 @@ func cpuinit(env string) { // getGodebugEarly extracts the environment variable GODEBUG from the environment on // Unix-like operating systems and returns it. This function exists to extract GODEBUG // early before much of the runtime is initialized. -func getGodebugEarly() string { +// +// Returns nil, false if OS doesn't provide env vars early in the init sequence. +func getGodebugEarly() (string, bool) { const prefix = "GODEBUG=" var env string switch GOOS { @@ -807,12 +809,16 @@ func getGodebugEarly() string { s := unsafe.String(p, findnull(p)) if stringslite.HasPrefix(s, prefix) { - env = gostring(p)[len(prefix):] + env = gostringnocopy(p)[len(prefix):] break } } + break + + default: + return "", false } - return env + return env, true } // The bootstrap sequence is: @@ -859,12 +865,15 @@ func schedinit() { // The world starts stopped. worldStopped() + godebug, parsedGodebug := getGodebugEarly() + if parsedGodebug { + parseRuntimeDebugVars(godebug) + } ticks.init() // run as early as possible moduledataverify() stackinit() randinit() // must run before mallocinit, alginit, mcommoninit mallocinit() - godebug := getGodebugEarly() cpuinit(godebug) // must run before alginit alginit() // maps, hash, rand must not be used before this call mcommoninit(gp.m, -1) @@ -880,7 +889,12 @@ func schedinit() { goenvs() secure() checkfds() - parsedebugvars() + if !parsedGodebug { + // Some platforms, e.g., Windows, didn't make env vars available "early", + // so try again now. + parseRuntimeDebugVars(gogetenv("GODEBUG")) + } + finishDebugVarsSetup() gcinit() // Allocate stack space that can be used when crashing due to bad stack diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 424745d2357dc9..15b546783b5e53 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -402,7 +402,7 @@ var dbgvars = []*dbgVar{ {name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1}, } -func parsedebugvars() { +func parseRuntimeDebugVars(godebug string) { // defaults debug.cgocheck = 1 debug.invalidptr = 1 @@ -420,12 +420,6 @@ func parsedebugvars() { } debug.traceadvanceperiod = defaultTraceAdvancePeriod - godebug := gogetenv("GODEBUG") - - p := new(string) - *p = godebug - godebugEnv.Store(p) - // apply runtime defaults, if any for _, v := range dbgvars { if v.def != 0 { @@ -437,7 +431,6 @@ func parsedebugvars() { } } } - // apply compile-time GODEBUG settings parsegodebug(godebugDefault, nil) @@ -463,6 +456,12 @@ func parsedebugvars() { if debug.gccheckmark > 0 { debug.asyncpreemptoff = 1 } +} + +func finishDebugVarsSetup() { + p := new(string) + *p = gogetenv("GODEBUG") + godebugEnv.Store(p) setTraceback(gogetenv("GOTRACEBACK")) traceback_env = traceback_cache diff --git a/src/runtime/set_vma_name_linux.go b/src/runtime/set_vma_name_linux.go index 9b6654f33299a0..03c7739c3465c9 100644 --- a/src/runtime/set_vma_name_linux.go +++ b/src/runtime/set_vma_name_linux.go @@ -14,9 +14,13 @@ import ( var prSetVMAUnsupported atomic.Bool +func setVMANameSupported() bool { + return !prSetVMAUnsupported.Load() +} + // setVMAName calls prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, len, name) func setVMAName(start unsafe.Pointer, length uintptr, name string) { - if debug.decoratemappings == 0 || prSetVMAUnsupported.Load() { + if debug.decoratemappings == 0 || !setVMANameSupported() { return } diff --git a/src/runtime/set_vma_name_stub.go b/src/runtime/set_vma_name_stub.go index 38f65fd592ba34..6cb01ebf50dcef 100644 --- a/src/runtime/set_vma_name_stub.go +++ b/src/runtime/set_vma_name_stub.go @@ -10,3 +10,5 @@ import "unsafe" // setVMAName isn’t implemented func setVMAName(start unsafe.Pointer, len uintptr, name string) {} + +func setVMANameSupported() bool { return false } From 97da068774d5aa9147e63eb146350145c73bfc3d Mon Sep 17 00:00:00 2001 From: Jake Bailey Date: Sun, 7 Sep 2025 22:21:15 -0700 Subject: [PATCH 021/152] cmd/compile: eliminate nil checks on .dict arg The first arg of a generic function is the dictionary. This dictionary is never nil, but it gets a nil check becuase the dict arg is treated as a slice during construction. cmp.Compare[go.shape.int] was: 00006 (+41) TESTB AX, (AX) 00007 (+52) CMPQ CX, BX 00008 (52) JGT 14 00009 (+55) JGE 12 00010 (+56) MOVL $1, AX 00011 (56) RET 00012 (+58) XORL AX, AX 00013 (58) RET 00014 (+53) MOVQ $-1, AX 00015 (53) RET Note how the function begins with a TESTB that loads the dict to perform the nil check. This CL eliminates that nil check. For most generic functions, this doesn't matter too much, but not infrequently are generic functions written which never actually use the dictionary (like cmp.Compare), so I suspect this might help in hot code to avoid repeatedly touching the dictionary in memory, and in cases where the generic function is not inlined (and thus the dict dropped). compilecmp shows these changes (deduped): cmp.Compare[go.shape.float64] 73 -> 72 (-1.37%) cmp.Compare[go.shape.int] 26 -> 24 (-7.69%) cmp.Compare[go.shape.int32] 25 -> 23 (-8.00%) cmp.Compare[go.shape.int64] 26 -> 24 (-7.69%) cmp.Compare[go.shape.string] 142 -> 141 (-0.70%) cmp.Compare[go.shape.uint16] 26 -> 24 (-7.69%) cmp.Compare[go.shape.uint] 26 -> 24 (-7.69%) cmp.Compare[go.shape.uint32] 25 -> 23 (-8.00%) cmp.Compare[go.shape.uint64] 26 -> 24 (-7.69%) cmp.Compare[go.shape.uint8] 25 -> 23 (-8.00%) cmp.Compare[go.shape.uintptr] 26 -> 24 (-7.69%) cmp.Less[go.shape.float64] 35 -> 34 (-2.86%) cmp.Less[go.shape.int32] 8 -> 6 (-25.00%) cmp.Less[go.shape.int64] 9 -> 7 (-22.22%) cmp.Less[go.shape.int] 9 -> 7 (-22.22%) cmp.Less[go.shape.string] 112 -> 110 (-1.79%) cmp.Less[go.shape.uint16] 9 -> 7 (-22.22%) cmp.Less[go.shape.uint32] 8 -> 6 (-25.00%) cmp.Less[go.shape.uint64] 9 -> 7 (-22.22%) internal/synctest.Associate[go.shape.struct 114 -> 113 (-0.88%) internal/trace.(*dataTable[go.shape.uint64,go.shape.string]).insert 805 -> 791 (-1.74%) internal/trace.(*dataTable[go.shape.uint64,go.shape.struct 858 -> 852 (-0.70%) main.(*gState[go.shape.int64]).stop 2111 -> 2085 (-1.23%) main.(*gState[go.shape.int64]).unblock 941 -> 923 (-1.91%) runtime.fmax[go.shape.float32] 85 -> 83 (-2.35%) runtime.fmax[go.shape.float64] 89 -> 87 (-2.25%) runtime.fmin[go.shape.float32] 85 -> 83 (-2.35%) runtime.fmin[go.shape.float64] 89 -> 87 (-2.25%) slices.BinarySearch[go.shape.[]string,go.shape.string] 346 -> 337 (-2.60%) slices.Concat[go.shape.[]uint8,go.shape.uint8] 462 -> 453 (-1.95%) slices.ContainsFunc[go.shape.[]*cmd/vendor/github.com/google/pprof/profile.Sample,go.shape.*uint8] 170 -> 169 (-0.59%) slices.ContainsFunc[go.shape.[]*debug/dwarf.StructField,go.shape.*uint8] 170 -> 169 (-0.59%) slices.ContainsFunc[go.shape.[]*go/ast.Field,go.shape.*uint8] 170 -> 169 (-0.59%) slices.ContainsFunc[go.shape.[]string,go.shape.string] 186 -> 181 (-2.69%) slices.Contains[go.shape.[]*cmd/compile/internal/syntax.BranchStmt,go.shape.*cmd/compile/internal/syntax.BranchStmt] 44 -> 42 (-4.55%) slices.Contains[go.shape.[]cmd/compile/internal/syntax.Type,go.shape.interface 223 -> 219 (-1.79%) slices.Contains[go.shape.[]crypto/tls.CurveID,go.shape.uint16] 44 -> 42 (-4.55%) slices.Contains[go.shape.[]crypto/tls.SignatureScheme,go.shape.uint16] 44 -> 42 (-4.55%) slices.Contains[go.shape.[]*go/ast.BranchStmt,go.shape.*go/ast.BranchStmt] 44 -> 42 (-4.55%) slices.Contains[go.shape.[]go/types.Type,go.shape.interface 223 -> 219 (-1.79%) slices.Contains[go.shape.[]int,go.shape.int] 44 -> 42 (-4.55%) slices.Contains[go.shape.[]string,go.shape.string] 223 -> 219 (-1.79%) slices.Contains[go.shape.[]uint16,go.shape.uint16] 44 -> 42 (-4.55%) slices.Contains[go.shape.[]uint8,go.shape.uint8] 44 -> 42 (-4.55%) slices.Insert[go.shape.[]string,go.shape.string] 1189 -> 1170 (-1.60%) slices.medianCmpFunc[go.shape.struct 1118 -> 1113 (-0.45%) slices.medianCmpFunc[go.shape.struct 1214 -> 1209 (-0.41%) slices.medianCmpFunc[go.shape.struct 889 -> 887 (-0.22%) slices.medianCmpFunc[go.shape.struct 901 -> 874 (-3.00%) slices.order2Ordered[go.shape.float64] 89 -> 87 (-2.25%) slices.order2Ordered[go.shape.uint16] 75 -> 70 (-6.67%) slices.partialInsertionSortOrdered[go.shape.string] 1115 -> 1110 (-0.45%) slices.partialInsertionSortOrdered[go.shape.uint16] 358 -> 352 (-1.68%) slices.partitionEqualOrdered[go.shape.int] 208 -> 203 (-2.40%) slices.partitionEqualOrdered[go.shape.int32] 208 -> 198 (-4.81%) slices.partitionEqualOrdered[go.shape.int64] 208 -> 203 (-2.40%) slices.partitionEqualOrdered[go.shape.uint32] 208 -> 198 (-4.81%) slices.partitionEqualOrdered[go.shape.uint64] 208 -> 203 (-2.40%) slices.partitionOrdered[go.shape.float64] 538 -> 533 (-0.93%) slices.partitionOrdered[go.shape.int] 437 -> 427 (-2.29%) slices.partitionOrdered[go.shape.int64] 437 -> 427 (-2.29%) slices.partitionOrdered[go.shape.uint16] 447 -> 442 (-1.12%) slices.partitionOrdered[go.shape.uint64] 437 -> 427 (-2.29%) slices.rotateCmpFunc[go.shape.struct 1045 -> 1029 (-1.53%) slices.rotateCmpFunc[go.shape.struct 1205 -> 1163 (-3.49%) slices.rotateCmpFunc[go.shape.struct 1226 -> 1176 (-4.08%) slices.rotateCmpFunc[go.shape.struct 1322 -> 1272 (-3.78%) slices.rotateCmpFunc[go.shape.struct 1419 -> 1400 (-1.34%) slices.rotateCmpFunc[go.shape.*uint8] 549 -> 538 (-2.00%) slices.rotateLeft[go.shape.string] 603 -> 588 (-2.49%) slices.rotateLeft[go.shape.uint8] 255 -> 250 (-1.96%) slices.siftDownOrdered[go.shape.int] 181 -> 171 (-5.52%) slices.siftDownOrdered[go.shape.int32] 181 -> 171 (-5.52%) slices.siftDownOrdered[go.shape.int64] 181 -> 171 (-5.52%) slices.siftDownOrdered[go.shape.string] 614 -> 592 (-3.58%) slices.siftDownOrdered[go.shape.uint32] 181 -> 171 (-5.52%) slices.siftDownOrdered[go.shape.uint64] 181 -> 171 (-5.52%) time.parseRFC3339[go.shape.string] 1774 -> 1758 (-0.90%) unique.(*canonMap[go.shape.struct 280 -> 276 (-1.43%) unique.clone[go.shape.struct 311 -> 293 (-5.79%) weak.Make[go.shape.6880e4598856efac32416085c0172278cf0fb9e5050ce6518bd9b7f7d1662440] 136 -> 134 (-1.47%) weak.Make[go.shape.struct 136 -> 134 (-1.47%) weak.Make[go.shape.uint8] 136 -> 134 (-1.47%) Change-Id: I43dcea5f2aa37372f773e5edc6a2ef1dee0a8db7 Reviewed-on: https://go-review.googlesource.com/c/go/+/706655 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Keith Randall Reviewed-by: Keith Randall Auto-Submit: Keith Randall --- .../compile/internal/ssa/_gen/generic.rules | 3 ++ src/cmd/compile/internal/ssa/rewrite.go | 6 +++ .../compile/internal/ssa/rewritegeneric.go | 15 +++++++ test/codegen/generics.go | 40 +++++++++++++++++++ 4 files changed, 64 insertions(+) create mode 100644 test/codegen/generics.go diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 58872ca85a3961..b16aa473cd5961 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -2079,6 +2079,9 @@ && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr +// .dict args are always non-nil. +(NilCheck ptr:(Arg {sym}) _) && isDictArgSym(sym) => ptr + // Nil checks of nil checks are redundant. // See comment at the end of https://go-review.googlesource.com/c/go/+/537775. (NilCheck ptr:(NilCheck _ _) _ ) => ptr diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 6d83ba565317a3..880c2223ef2229 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -6,9 +6,11 @@ package ssa import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/reflectdata" "cmd/compile/internal/rttype" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/s390x" @@ -2744,3 +2746,7 @@ func panicBoundsCToAux(p PanicBoundsC) Aux { func panicBoundsCCToAux(p PanicBoundsCC) Aux { return p } + +func isDictArgSym(sym Sym) bool { + return sym.(*ir.Name).Sym().Name == typecheck.LocalDictName +} diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 7e23194e6aba12..5e0135be3a5be5 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -21393,6 +21393,21 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { v.copyOf(ptr) return true } + // match: (NilCheck ptr:(Arg {sym}) _) + // cond: isDictArgSym(sym) + // result: ptr + for { + ptr := v_0 + if ptr.Op != OpArg { + break + } + sym := auxToSym(ptr.Aux) + if !(isDictArgSym(sym)) { + break + } + v.copyOf(ptr) + return true + } // match: (NilCheck ptr:(NilCheck _ _) _ ) // result: ptr for { diff --git a/test/codegen/generics.go b/test/codegen/generics.go new file mode 100644 index 00000000000000..45c4ca8d6aee61 --- /dev/null +++ b/test/codegen/generics.go @@ -0,0 +1,40 @@ +// asmcheck + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +import "cmp" + +func isNaN[T cmp.Ordered](x T) bool { + return x != x +} + +func compare[T cmp.Ordered](x, y T) int { + // amd64:-"TESTB" + // arm64:-"MOVB" + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN { + if yNaN { + return 0 + } + return -1 + } + if yNaN { + return +1 + } + if x < y { + return -1 + } + if x > y { + return +1 + } + return 0 +} + +func usesCompare(a, b int) int { + return compare(a, b) +} From 08afc50bea9a94e86adfc8cd852c6ae5b698cdaa Mon Sep 17 00:00:00 2001 From: Aidan Welch Date: Thu, 25 Sep 2025 21:00:45 +0000 Subject: [PATCH 022/152] mime: extend "builtinTypes" to include a more complete list of common types Implement all agreed upon types, using IANA's listed media types to decide when there is a disagreement in type. Except in the case of `.wav` where `audio/wav` is used. Fixes #69530 Change-Id: Iec99a6ceb534073be83c8390f48799bec3e4cfc7 GitHub-Last-Rev: e314c5ec6d9aba753dca5f6dbb9d1741bac43227 GitHub-Pull-Request: golang/go#69533 Reviewed-on: https://go-review.googlesource.com/c/go/+/614376 Reviewed-by: Damien Neil Reviewed-by: Emmanuel Odeke Auto-Submit: Sean Liao Reviewed-by: Sean Liao LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee --- src/mime/type.go | 93 +++++++++++++++++++++++++++++++++++-------- src/mime/type_test.go | 46 ++++++++++++++++++++- 2 files changed, 121 insertions(+), 18 deletions(-) diff --git a/src/mime/type.go b/src/mime/type.go index c86ebd3442c1dc..ac7b0447da3cf9 100644 --- a/src/mime/type.go +++ b/src/mime/type.go @@ -17,7 +17,7 @@ var ( mimeTypesLower sync.Map // map[string]string; ".z" => "application/x-compress" // extensions maps from MIME type to list of lowercase file - // extensions: "image/jpeg" => [".jpg", ".jpeg"] + // extensions: "image/jpeg" => [".jfif", ".jpg", ".jpeg", ".pjp", ".pjpeg"] extensionsMu sync.Mutex // Guards stores (but not loads) on extensions. extensions sync.Map // map[string][]string; slice values are append-only. ) @@ -50,23 +50,82 @@ func setMimeTypes(lowerExt, mixExt map[string]string) { } } +// A type is listed here if both Firefox and Chrome included them in their own +// lists. In the case where they contradict they are deconflicted using IANA's +// listed media types https://www.iana.org/assignments/media-types/media-types.xhtml +// +// Chrome's MIME mappings to file extensions are defined at +// https://chromium.googlesource.com/chromium/src.git/+/refs/heads/main/net/base/mime_util.cc +// +// Firefox's MIME types can be found at +// https://github.com/mozilla-firefox/firefox/blob/main/netwerk/mime/nsMimeTypes.h +// and the mappings to file extensions at +// https://github.com/mozilla-firefox/firefox/blob/main/uriloader/exthandler/nsExternalHelperAppService.cpp var builtinTypesLower = map[string]string{ - ".avif": "image/avif", - ".css": "text/css; charset=utf-8", - ".gif": "image/gif", - ".htm": "text/html; charset=utf-8", - ".html": "text/html; charset=utf-8", - ".jpeg": "image/jpeg", - ".jpg": "image/jpeg", - ".js": "text/javascript; charset=utf-8", - ".json": "application/json", - ".mjs": "text/javascript; charset=utf-8", - ".pdf": "application/pdf", - ".png": "image/png", - ".svg": "image/svg+xml", - ".wasm": "application/wasm", - ".webp": "image/webp", - ".xml": "text/xml; charset=utf-8", + ".ai": "application/postscript", + ".apk": "application/vnd.android.package-archive", + ".apng": "image/apng", + ".avif": "image/avif", + ".bin": "application/octet-stream", + ".bmp": "image/bmp", + ".com": "application/octet-stream", + ".css": "text/css; charset=utf-8", + ".csv": "text/csv; charset=utf-8", + ".doc": "application/msword", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".ehtml": "text/html; charset=utf-8", + ".eml": "message/rfc822", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".flac": "audio/flac", + ".gif": "image/gif", + ".gz": "application/gzip", + ".htm": "text/html; charset=utf-8", + ".html": "text/html; charset=utf-8", + ".ico": "image/vnd.microsoft.icon", + ".ics": "text/calendar; charset=utf-8", + ".jfif": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".js": "text/javascript; charset=utf-8", + ".json": "application/json", + ".m4a": "audio/mp4", + ".mjs": "text/javascript; charset=utf-8", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".oga": "audio/ogg", + ".ogg": "audio/ogg", + ".ogv": "video/ogg", + ".opus": "audio/ogg", + ".pdf": "application/pdf", + ".pjp": "image/jpeg", + ".pjpeg": "image/jpeg", + ".png": "image/png", + ".ppt": "application/vnd.ms-powerpoint", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".ps": "application/postscript", + ".rdf": "application/rdf+xml", + ".rtf": "application/rtf", + ".shtml": "text/html; charset=utf-8", + ".svg": "image/svg+xml", + ".text": "text/plain; charset=utf-8", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".txt": "text/plain; charset=utf-8", + ".vtt": "text/vtt; charset=utf-8", + ".wasm": "application/wasm", + ".wav": "audio/wav", + ".webm": "audio/webm", + ".webp": "image/webp", + ".xbl": "text/xml; charset=utf-8", + ".xbm": "image/x-xbitmap", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xls": "application/vnd.ms-excel", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xml": "text/xml; charset=utf-8", + ".xsl": "text/xml; charset=utf-8", + ".zip": "application/zip", } var once sync.Once // guards initMime diff --git a/src/mime/type_test.go b/src/mime/type_test.go index 6bdf37b6359d20..f4ec8c8754e69f 100644 --- a/src/mime/type_test.go +++ b/src/mime/type_test.go @@ -208,7 +208,51 @@ func TestExtensionsByType2(t *testing.T) { typ string want []string }{ - {typ: "image/jpeg", want: []string{".jpeg", ".jpg"}}, + {typ: "application/postscript", want: []string{".ai", ".eps", ".ps"}}, + {typ: "application/vnd.android.package-archive", want: []string{".apk"}}, + {typ: "image/apng", want: []string{".apng"}}, + {typ: "image/avif", want: []string{".avif"}}, + {typ: "application/octet-stream", want: []string{".bin", ".com", ".exe"}}, + {typ: "image/bmp", want: []string{".bmp"}}, + {typ: "text/css; charset=utf-8", want: []string{".css"}}, + {typ: "text/csv; charset=utf-8", want: []string{".csv"}}, + {typ: "application/msword", want: []string{".doc"}}, + {typ: "application/vnd.openxmlformats-officedocument.wordprocessingml.document", want: []string{".docx"}}, + {typ: "text/html; charset=utf-8", want: []string{".ehtml", ".htm", ".html", ".shtml"}}, + {typ: "message/rfc822", want: []string{".eml"}}, + {typ: "audio/flac", want: []string{".flac"}}, + {typ: "image/gif", want: []string{".gif"}}, + {typ: "application/gzip", want: []string{".gz"}}, + {typ: "image/vnd.microsoft.icon", want: []string{".ico"}}, + {typ: "text/calendar; charset=utf-8", want: []string{".ics"}}, + {typ: "image/jpeg", want: []string{".jfif", ".jpeg", ".jpg", ".pjp", ".pjpeg"}}, + {typ: "text/javascript; charset=utf-8", want: []string{".js", ".mjs"}}, + {typ: "application/json", want: []string{".json"}}, + {typ: "audio/mp4", want: []string{".m4a"}}, + {typ: "audio/mpeg", want: []string{".mp3"}}, + {typ: "video/mp4", want: []string{".mp4"}}, + {typ: "audio/ogg", want: []string{".oga", ".ogg", ".opus"}}, + {typ: "video/ogg", want: []string{".ogv"}}, + {typ: "application/pdf", want: []string{".pdf"}}, + {typ: "image/png", want: []string{".png"}}, + {typ: "application/vnd.ms-powerpoint", want: []string{".ppt"}}, + {typ: "application/vnd.openxmlformats-officedocument.presentationml.presentation", want: []string{".pptx"}}, + {typ: "application/rdf+xml", want: []string{".rdf"}}, + {typ: "application/rtf", want: []string{".rtf"}}, + {typ: "image/svg+xml", want: []string{".svg"}}, + {typ: "text/plain; charset=utf-8", want: []string{".text", ".txt"}}, + {typ: "image/tiff", want: []string{".tif", ".tiff"}}, + {typ: "text/vtt; charset=utf-8", want: []string{".vtt"}}, + {typ: "application/wasm", want: []string{".wasm"}}, + {typ: "audio/wav", want: []string{".wav"}}, + {typ: "audio/webm", want: []string{".webm"}}, + {typ: "image/webp", want: []string{".webp"}}, + {typ: "text/xml; charset=utf-8", want: []string{".xbl", ".xml", ".xsl"}}, + {typ: "image/x-xbitmap", want: []string{".xbm"}}, + {typ: "application/xhtml+xml", want: []string{".xht", ".xhtml"}}, + {typ: "application/vnd.ms-excel", want: []string{".xls"}}, + {typ: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", want: []string{".xlsx"}}, + {typ: "application/zip", want: []string{".zip"}}, } for _, tt := range tests { From 19cc1022ba4e9ddf172c04107fa613e6d50a7eba Mon Sep 17 00:00:00 2001 From: Julien Cretel Date: Mon, 22 Sep 2025 18:05:12 +0000 Subject: [PATCH 023/152] mime: reduce allocs incurred by ParseMediaType MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change is mostly gardening. It simplifies ParseMediaType and its helper functions and reduces the amount of allocations they incur. Here are some benchmark results: goos: darwin goarch: amd64 pkg: mime cpu: Intel(R) Core(TM) i7-6700HQ CPU @ 2.60GHz │ old │ new │ │ sec/op │ sec/op vs base │ ParseMediaType-8 55.26µ ± 1% 54.54µ ± 1% -1.30% (p=0.000 n=20) ParseMediaTypeBogus-8 3.551µ ± 0% 3.427µ ± 0% -3.48% (p=0.000 n=20) geomean 14.01µ 13.67µ -2.39% │ old │ new │ │ B/op │ B/op vs base │ ParseMediaType-8 38.48Ki ± 0% 37.38Ki ± 0% -2.85% (p=0.000 n=20) ParseMediaTypeBogus-8 2.531Ki ± 0% 2.469Ki ± 0% -2.47% (p=0.000 n=20) geomean 9.869Ki 9.606Ki -2.66% │ old │ new │ │ allocs/op │ allocs/op vs base │ ParseMediaType-8 457.0 ± 0% 425.0 ± 0% -7.00% (p=0.000 n=20) ParseMediaTypeBogus-8 25.00 ± 0% 21.00 ± 0% -16.00% (p=0.000 n=20) geomean 106.9 94.47 -11.62% Change-Id: I51198b40396afa51531794a57c50aa88975eae1d GitHub-Last-Rev: c44e2a2577386d1d776498d29e31821326e20b92 GitHub-Pull-Request: golang/go#75565 Reviewed-on: https://go-review.googlesource.com/c/go/+/705715 Reviewed-by: Emmanuel Odeke Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI Reviewed-by: Sean Liao Reviewed-by: Damien Neil Auto-Submit: Emmanuel Odeke --- src/mime/mediatype.go | 67 ++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 32 deletions(-) diff --git a/src/mime/mediatype.go b/src/mime/mediatype.go index 66684a68b23961..c6006b614f319e 100644 --- a/src/mime/mediatype.go +++ b/src/mime/mediatype.go @@ -98,24 +98,32 @@ func FormatMediaType(t string, param map[string]string) string { func checkMediaTypeDisposition(s string) error { typ, rest := consumeToken(s) if typ == "" { - return errors.New("mime: no media type") + return errNoMediaType } if rest == "" { return nil } - if !strings.HasPrefix(rest, "/") { - return errors.New("mime: expected slash after first token") + var ok bool + if rest, ok = strings.CutPrefix(rest, "/"); !ok { + return errNoSlashAfterFirstToken } - subtype, rest := consumeToken(rest[1:]) + subtype, rest := consumeToken(rest) if subtype == "" { - return errors.New("mime: expected token after slash") + return errNoTokenAfterSlash } if rest != "" { - return errors.New("mime: unexpected content after media subtype") + return errUnexpectedContentAfterMediaSubtype } return nil } +var ( + errNoMediaType = errors.New("mime: no media type") + errNoSlashAfterFirstToken = errors.New("mime: expected slash after first token") + errNoTokenAfterSlash = errors.New("mime: expected token after slash") + errUnexpectedContentAfterMediaSubtype = errors.New("mime: unexpected content after media subtype") +) + // ErrInvalidMediaParameter is returned by [ParseMediaType] if // the media type value was found but there was an error parsing // the optional parameters @@ -169,7 +177,6 @@ func ParseMediaType(v string) (mediatype string, params map[string]string, err e if continuation == nil { continuation = make(map[string]map[string]string) } - var ok bool if pmap, ok = continuation[baseName]; !ok { continuation[baseName] = make(map[string]string) pmap = continuation[baseName] @@ -177,7 +184,7 @@ func ParseMediaType(v string) (mediatype string, params map[string]string, err e } if v, exists := pmap[key]; exists && v != value { // Duplicate parameter names are incorrect, but we allow them if they are equal. - return "", nil, errors.New("mime: duplicate parameter name") + return "", nil, errDuplicateParamName } pmap[key] = value v = rest @@ -227,27 +234,28 @@ func ParseMediaType(v string) (mediatype string, params map[string]string, err e return } +var errDuplicateParamName = errors.New("mime: duplicate parameter name") + func decode2231Enc(v string) (string, bool) { - sv := strings.SplitN(v, "'", 3) - if len(sv) != 3 { + charset, v, ok := strings.Cut(v, "'") + if !ok { return "", false } - // TODO: ignoring lang in sv[1] for now. If anybody needs it we'll + // TODO: ignoring the language part for now. If anybody needs it, we'll // need to decide how to expose it in the API. But I'm not sure // anybody uses it in practice. - charset := strings.ToLower(sv[0]) - if len(charset) == 0 { + _, extOtherVals, ok := strings.Cut(v, "'") + if !ok { return "", false } - if charset != "us-ascii" && charset != "utf-8" { - // TODO: unsupported encoding + charset = strings.ToLower(charset) + switch charset { + case "us-ascii", "utf-8": + default: + // Empty or unsupported encoding. return "", false } - encv, err := percentHexUnescape(sv[2]) - if err != nil { - return "", false - } - return encv, true + return percentHexUnescape(extOtherVals) } // consumeToken consumes a token from the beginning of provided @@ -309,11 +317,11 @@ func consumeValue(v string) (value, rest string) { func consumeMediaParam(v string) (param, value, rest string) { rest = strings.TrimLeftFunc(v, unicode.IsSpace) - if !strings.HasPrefix(rest, ";") { + var ok bool + if rest, ok = strings.CutPrefix(rest, ";"); !ok { return "", "", v } - rest = rest[1:] // consume semicolon rest = strings.TrimLeftFunc(rest, unicode.IsSpace) param, rest = consumeToken(rest) param = strings.ToLower(param) @@ -322,10 +330,9 @@ func consumeMediaParam(v string) (param, value, rest string) { } rest = strings.TrimLeftFunc(rest, unicode.IsSpace) - if !strings.HasPrefix(rest, "=") { + if rest, ok = strings.CutPrefix(rest, "="); !ok { return "", "", v } - rest = rest[1:] // consume equals sign rest = strings.TrimLeftFunc(rest, unicode.IsSpace) value, rest2 := consumeValue(rest) if value == "" && rest2 == rest { @@ -335,7 +342,7 @@ func consumeMediaParam(v string) (param, value, rest string) { return param, value, rest } -func percentHexUnescape(s string) (string, error) { +func percentHexUnescape(s string) (string, bool) { // Count %, check that they're well-formed. percents := 0 for i := 0; i < len(s); { @@ -345,16 +352,12 @@ func percentHexUnescape(s string) (string, error) { } percents++ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { - s = s[i:] - if len(s) > 3 { - s = s[0:3] - } - return "", fmt.Errorf("mime: bogus characters after %%: %q", s) + return "", false } i += 3 } if percents == 0 { - return s, nil + return s, true } t := make([]byte, len(s)-2*percents) @@ -371,7 +374,7 @@ func percentHexUnescape(s string) (string, error) { i++ } } - return string(t), nil + return string(t), true } func ishex(c byte) bool { From fcb893fc4b615774f8cdd050e17ad227998e512a Mon Sep 17 00:00:00 2001 From: Youlin Feng Date: Fri, 5 Sep 2025 22:48:48 +0800 Subject: [PATCH 024/152] cmd/compile/internal/ssa: remove redundant "type:" prefix check Remove redundant "type:" prefix check on symbol names in isFixedLoad, also refactor some duplicate code into methods. Change-Id: I8358422596eea8c39d1a30a554bd0aae8b570038 Reviewed-on: https://go-review.googlesource.com/c/go/+/701275 Reviewed-by: Keith Randall Reviewed-by: Keith Randall Auto-Submit: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee --- src/cmd/compile/internal/ssa/rewrite.go | 18 ++++++------------ src/cmd/internal/obj/link.go | 20 +++++++++++++++++++- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 880c2223ef2229..47f225c7aeb7c0 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -2059,12 +2059,12 @@ func isFixedLoad(v *Value, sym Sym, off int64) bool { return false } - if strings.HasPrefix(lsym.Name, "type:") { + if ti := lsym.TypeInfo(); ti != nil { // Type symbols do not contain information about their fields, unlike the cases above. // Hand-implement field accesses. // TODO: can this be replaced with reflectdata.writeType and just use the code above? - t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type) + t := ti.Type.(*types.Type) for _, f := range rttype.Type.Fields() { if f.Offset == off && copyCompatibleType(v.Type, f.Type) { @@ -2118,12 +2118,12 @@ func rewriteFixedLoad(v *Value, sym Sym, sb *Value, off int64) *Value { base.Fatalf("fixedLoad data not known for %s:%d", sym, off) } - if strings.HasPrefix(lsym.Name, "type:") { + if ti := lsym.TypeInfo(); ti != nil { // Type symbols do not contain information about their fields, unlike the cases above. // Hand-implement field accesses. // TODO: can this be replaced with reflectdata.writeType and just use the code above? - t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type) + t := ti.Type.(*types.Type) ptrSizedOpConst := OpConst64 if f.Config.PtrSize == 4 { @@ -2613,10 +2613,7 @@ func isDirectType1(v *Value) bool { return isDirectType2(v.Args[0]) case OpAddr: lsym := v.Aux.(*obj.LSym) - if lsym.Extra == nil { - return false - } - if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok { + if ti := lsym.TypeInfo(); ti != nil { return types.IsDirectIface(ti.Type.(*types.Type)) } } @@ -2649,10 +2646,7 @@ func isDirectIface1(v *Value, depth int) bool { return isDirectIface2(v.Args[0], depth-1) case OpAddr: lsym := v.Aux.(*obj.LSym) - if lsym.Extra == nil { - return false - } - if ii, ok := (*lsym.Extra).(*obj.ItabInfo); ok { + if ii := lsym.ItabInfo(); ii != nil { return types.IsDirectIface(ii.Type.(*types.Type)) } case OpConstNil: diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 6513e116872a0a..816fed026f35ad 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -464,7 +464,7 @@ type LSym struct { P []byte R []Reloc - Extra *interface{} // *FuncInfo, *VarInfo, *FileInfo, or *TypeInfo, if present + Extra *interface{} // *FuncInfo, *VarInfo, *FileInfo, *TypeInfo, or *ItabInfo, if present Pkg string PkgIdx int32 @@ -604,6 +604,15 @@ func (s *LSym) NewTypeInfo() *TypeInfo { return t } +// TypeInfo returns the *TypeInfo associated with s, or else nil. +func (s *LSym) TypeInfo() *TypeInfo { + if s.Extra == nil { + return nil + } + t, _ := (*s.Extra).(*TypeInfo) + return t +} + // An ItabInfo contains information for a symbol // that contains a runtime.itab. type ItabInfo struct { @@ -620,6 +629,15 @@ func (s *LSym) NewItabInfo() *ItabInfo { return t } +// ItabInfo returns the *ItabInfo associated with s, or else nil. +func (s *LSym) ItabInfo() *ItabInfo { + if s.Extra == nil { + return nil + } + i, _ := (*s.Extra).(*ItabInfo) + return i +} + // WasmImport represents a WebAssembly (WASM) imported function with // parameters and results translated into WASM types based on the Go function // declaration. From 4ff8a457dbe68388a6e2451c78c7ea615f570cda Mon Sep 17 00:00:00 2001 From: Joel Sing Date: Mon, 15 Sep 2025 20:10:05 +1000 Subject: [PATCH 025/152] test/codegen: codify handling of floating point constants on arm64 While here, reorder Float32ConstantStore/Float64ConstantStore for consistency. Change-Id: Ic1b3e9f9474965d15bc94518d78d1a4a7bda93f3 Reviewed-on: https://go-review.googlesource.com/c/go/+/703756 Reviewed-by: Keith Randall Auto-Submit: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee Auto-Submit: Joel Sing Reviewed-by: Keith Randall --- test/codegen/floats.go | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/test/codegen/floats.go b/test/codegen/floats.go index 29969c8dc08eaa..666c983b56ac04 100644 --- a/test/codegen/floats.go +++ b/test/codegen/floats.go @@ -217,14 +217,36 @@ func Float32Max(a, b float32) float32 { // Constant Optimizations // // ------------------------ // +func Float32ConstantZero() float32 { + // arm64:"FMOVS\tZR," + return 0.0 +} + +func Float32ConstantChipFloat() float32 { + // arm64:"FMOVS\t[$]\\(2\\.25\\)," + return 2.25 +} + func Float32Constant() float32 { + // arm64:"FMOVS\t[$]f32\\.42440000\\(SB\\)" // ppc64x/power8:"FMOVS\t[$]f32\\.42440000\\(SB\\)" // ppc64x/power9:"FMOVS\t[$]f32\\.42440000\\(SB\\)" // ppc64x/power10:"XXSPLTIDP\t[$]1111752704," return 49.0 } +func Float64ConstantZero() float64 { + // arm64:"FMOVD\tZR," + return 0.0 +} + +func Float64ConstantChipFloat() float64 { + // arm64:"FMOVD\t[$]\\(2\\.25\\)," + return 2.25 +} + func Float64Constant() float64 { + // arm64:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" // ppc64x/power8:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" // ppc64x/power9:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" // ppc64x/power10:"XXSPLTIDP\t[$]1111752704," @@ -244,11 +266,12 @@ func Float64DenormalFloat32Constant() float64 { return 0x1p-127 } -func Float64ConstantStore(p *float64) { - // amd64: "MOVQ\t[$]4617801906721357038" +func Float32ConstantStore(p *float32) { + // amd64:"MOVL\t[$]1085133554" *p = 5.432 } -func Float32ConstantStore(p *float32) { - // amd64: "MOVL\t[$]1085133554" + +func Float64ConstantStore(p *float64) { + // amd64:"MOVQ\t[$]4617801906721357038" *p = 5.432 } From c9257151e5600af10cdd6c6db907ed83811a54a4 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Wed, 24 Sep 2025 08:58:30 +0200 Subject: [PATCH 026/152] runtime: unify ppc64/ppc64le library entry point Cq-Include-Trybots: luci.golang.try:gotip-linux-ppc64le_power10 Change-Id: Ifd7861488b1b47a5d30163552b51838f2bef7248 Reviewed-on: https://go-review.googlesource.com/c/go/+/706395 Reviewed-by: Carlos Amedee Reviewed-by: Keith Randall Reviewed-by: Keith Randall Auto-Submit: Keith Randall LUCI-TryBot-Result: Go LUCI --- src/runtime/asm_ppc64x.s | 57 ++++++++++++ src/runtime/rt0_aix_ppc64.s | 151 +------------------------------- src/runtime/rt0_linux_ppc64le.s | 49 +---------- 3 files changed, 60 insertions(+), 197 deletions(-) diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index fc70fa82046056..3fbf11b5e9a2ad 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -9,6 +9,63 @@ #include "funcdata.h" #include "textflag.h" #include "asm_ppc64x.h" +#include "cgo/abi_ppc64x.h" + + +TEXT _rt0_ppc64x_lib(SB),NOSPLIT|NOFRAME,$0 + // This is called with ELFv2 calling conventions. Convert to Go. + // Allocate argument storage for call to newosproc0. + STACK_AND_SAVE_HOST_TO_GO_ABI(16) + + MOVD R3, _rt0_ppc64x_lib_argc<>(SB) + MOVD R4, _rt0_ppc64x_lib_argv<>(SB) + + // Synchronous initialization. + MOVD $runtime·libpreinit(SB), R12 + MOVD R12, CTR + BL (CTR) + + // Create a new thread to do the runtime initialization and return. + MOVD _cgo_sys_thread_create(SB), R12 + CMP $0, R12 + BEQ nocgo + MOVD $_rt0_ppc64x_lib_go(SB), R3 + MOVD $0, R4 + MOVD R12, CTR + BL (CTR) + BR done + +nocgo: + MOVD $0x800000, R12 // stacksize = 8192KB + MOVD R12, 8+FIXED_FRAME(R1) + MOVD $_rt0_ppc64x_lib_go(SB), R12 + MOVD R12, 16+FIXED_FRAME(R1) + MOVD $runtime·newosproc0(SB),R12 + MOVD R12, CTR + BL (CTR) + +done: + // Restore and return to ELFv2 caller. + UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(16) + RET + +#ifdef GO_PPC64X_HAS_FUNCDESC +DEFINE_PPC64X_FUNCDESC(_rt0_ppc64x_lib_go, __rt0_ppc64x_lib_go) +TEXT __rt0_ppc64x_lib_go(SB),NOSPLIT,$0 +#else +TEXT _rt0_ppc64x_lib_go(SB),NOSPLIT,$0 +#endif + MOVD _rt0_ppc64x_lib_argc<>(SB), R3 + MOVD _rt0_ppc64x_lib_argv<>(SB), R4 + MOVD $runtime·rt0_go(SB), R12 + MOVD R12, CTR + BR (CTR) + +DATA _rt0_ppc64x_lib_argc<>(SB)/8, $0 +GLOBL _rt0_ppc64x_lib_argc<>(SB),NOPTR, $8 +DATA _rt0_ppc64x_lib_argv<>(SB)/8, $0 +GLOBL _rt0_ppc64x_lib_argv<>(SB),NOPTR, $8 + #ifdef GOOS_aix #define cgoCalleeStackSize 48 diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s index 74c57bb1dc9136..32f8c72156c9ee 100644 --- a/src/runtime/rt0_aix_ppc64.s +++ b/src/runtime/rt0_aix_ppc64.s @@ -41,152 +41,5 @@ TEXT _main(SB),NOSPLIT,$-8 MOVD R12, CTR BR (CTR) -// Paramater save space required to cross-call into _cgo_sys_thread_create -#define PARAM_SPACE 16 - -TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8 - // Start with standard C stack frame layout and linkage. - MOVD LR, R0 - MOVD R0, 16(R1) // Save LR in caller's frame. - MOVW CR, R0 // Save CR in caller's frame - MOVD R0, 8(R1) - - MOVDU R1, -344-PARAM_SPACE(R1) // Allocate frame. - - // Preserve callee-save registers. - MOVD R14, 48+PARAM_SPACE(R1) - MOVD R15, 56+PARAM_SPACE(R1) - MOVD R16, 64+PARAM_SPACE(R1) - MOVD R17, 72+PARAM_SPACE(R1) - MOVD R18, 80+PARAM_SPACE(R1) - MOVD R19, 88+PARAM_SPACE(R1) - MOVD R20, 96+PARAM_SPACE(R1) - MOVD R21,104+PARAM_SPACE(R1) - MOVD R22, 112+PARAM_SPACE(R1) - MOVD R23, 120+PARAM_SPACE(R1) - MOVD R24, 128+PARAM_SPACE(R1) - MOVD R25, 136+PARAM_SPACE(R1) - MOVD R26, 144+PARAM_SPACE(R1) - MOVD R27, 152+PARAM_SPACE(R1) - MOVD R28, 160+PARAM_SPACE(R1) - MOVD R29, 168+PARAM_SPACE(R1) - MOVD g, 176+PARAM_SPACE(R1) // R30 - MOVD R31, 184+PARAM_SPACE(R1) - FMOVD F14, 192+PARAM_SPACE(R1) - FMOVD F15, 200+PARAM_SPACE(R1) - FMOVD F16, 208+PARAM_SPACE(R1) - FMOVD F17, 216+PARAM_SPACE(R1) - FMOVD F18, 224+PARAM_SPACE(R1) - FMOVD F19, 232+PARAM_SPACE(R1) - FMOVD F20, 240+PARAM_SPACE(R1) - FMOVD F21, 248+PARAM_SPACE(R1) - FMOVD F22, 256+PARAM_SPACE(R1) - FMOVD F23, 264+PARAM_SPACE(R1) - FMOVD F24, 272+PARAM_SPACE(R1) - FMOVD F25, 280+PARAM_SPACE(R1) - FMOVD F26, 288+PARAM_SPACE(R1) - FMOVD F27, 296+PARAM_SPACE(R1) - FMOVD F28, 304+PARAM_SPACE(R1) - FMOVD F29, 312+PARAM_SPACE(R1) - FMOVD F30, 320+PARAM_SPACE(R1) - FMOVD F31, 328+PARAM_SPACE(R1) - - // Synchronous initialization. - MOVD $runtime·reginit(SB), R12 - MOVD R12, CTR - BL (CTR) - - MOVBZ runtime·isarchive(SB), R3 // Check buildmode = c-archive - CMP $0, R3 - BEQ done - - MOVD R14, _rt0_ppc64_aix_lib_argc<>(SB) - MOVD R15, _rt0_ppc64_aix_lib_argv<>(SB) - - MOVD $runtime·libpreinit(SB), R12 - MOVD R12, CTR - BL (CTR) - - // Create a new thread to do the runtime initialization and return. - MOVD _cgo_sys_thread_create(SB), R12 - CMP $0, R12 - BEQ nocgo - MOVD $_rt0_ppc64_aix_lib_go(SB), R3 - MOVD $0, R4 - MOVD R2, 40(R1) - MOVD 8(R12), R2 - MOVD (R12), R12 - MOVD R12, CTR - BL (CTR) - MOVD 40(R1), R2 - BR done - -nocgo: - MOVD $0x800000, R12 // stacksize = 8192KB - MOVD R12, 8(R1) - MOVD $_rt0_ppc64_aix_lib_go(SB), R12 - MOVD R12, 16(R1) - MOVD $runtime·newosproc0(SB),R12 - MOVD R12, CTR - BL (CTR) - -done: - // Restore saved registers. - MOVD 48+PARAM_SPACE(R1), R14 - MOVD 56+PARAM_SPACE(R1), R15 - MOVD 64+PARAM_SPACE(R1), R16 - MOVD 72+PARAM_SPACE(R1), R17 - MOVD 80+PARAM_SPACE(R1), R18 - MOVD 88+PARAM_SPACE(R1), R19 - MOVD 96+PARAM_SPACE(R1), R20 - MOVD 104+PARAM_SPACE(R1), R21 - MOVD 112+PARAM_SPACE(R1), R22 - MOVD 120+PARAM_SPACE(R1), R23 - MOVD 128+PARAM_SPACE(R1), R24 - MOVD 136+PARAM_SPACE(R1), R25 - MOVD 144+PARAM_SPACE(R1), R26 - MOVD 152+PARAM_SPACE(R1), R27 - MOVD 160+PARAM_SPACE(R1), R28 - MOVD 168+PARAM_SPACE(R1), R29 - MOVD 176+PARAM_SPACE(R1), g // R30 - MOVD 184+PARAM_SPACE(R1), R31 - FMOVD 196+PARAM_SPACE(R1), F14 - FMOVD 200+PARAM_SPACE(R1), F15 - FMOVD 208+PARAM_SPACE(R1), F16 - FMOVD 216+PARAM_SPACE(R1), F17 - FMOVD 224+PARAM_SPACE(R1), F18 - FMOVD 232+PARAM_SPACE(R1), F19 - FMOVD 240+PARAM_SPACE(R1), F20 - FMOVD 248+PARAM_SPACE(R1), F21 - FMOVD 256+PARAM_SPACE(R1), F22 - FMOVD 264+PARAM_SPACE(R1), F23 - FMOVD 272+PARAM_SPACE(R1), F24 - FMOVD 280+PARAM_SPACE(R1), F25 - FMOVD 288+PARAM_SPACE(R1), F26 - FMOVD 296+PARAM_SPACE(R1), F27 - FMOVD 304+PARAM_SPACE(R1), F28 - FMOVD 312+PARAM_SPACE(R1), F29 - FMOVD 320+PARAM_SPACE(R1), F30 - FMOVD 328+PARAM_SPACE(R1), F31 - - ADD $344+PARAM_SPACE, R1 - - MOVD 8(R1), R0 - MOVFL R0, $0xff - MOVD 16(R1), R0 - MOVD R0, LR - RET - -DEFINE_PPC64X_FUNCDESC(_rt0_ppc64_aix_lib_go, __rt0_ppc64_aix_lib_go) - -TEXT __rt0_ppc64_aix_lib_go(SB),NOSPLIT,$0 - MOVD _rt0_ppc64_aix_lib_argc<>(SB), R3 - MOVD _rt0_ppc64_aix_lib_argv<>(SB), R4 - MOVD $runtime·rt0_go(SB), R12 - MOVD R12, CTR - BR (CTR) - -DATA _rt0_ppc64_aix_lib_argc<>(SB)/8, $0 -GLOBL _rt0_ppc64_aix_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_ppc64_aix_lib_argv<>(SB)/8, $0 -GLOBL _rt0_ppc64_aix_lib_argv<>(SB),NOPTR, $8 +TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$0 + JMP _rt0_ppc64x_lib(SB) diff --git a/src/runtime/rt0_linux_ppc64le.s b/src/runtime/rt0_linux_ppc64le.s index 4b7d8e1b940a1e..3a6e8863b2da1d 100644 --- a/src/runtime/rt0_linux_ppc64le.s +++ b/src/runtime/rt0_linux_ppc64le.s @@ -5,60 +5,13 @@ #include "go_asm.h" #include "textflag.h" #include "asm_ppc64x.h" -#include "cgo/abi_ppc64x.h" TEXT _rt0_ppc64le_linux(SB),NOSPLIT,$0 XOR R0, R0 // Make sure R0 is zero before _main BR _main<>(SB) TEXT _rt0_ppc64le_linux_lib(SB),NOSPLIT|NOFRAME,$0 - // This is called with ELFv2 calling conventions. Convert to Go. - // Allocate argument storage for call to newosproc0. - STACK_AND_SAVE_HOST_TO_GO_ABI(16) - - MOVD R3, _rt0_ppc64le_linux_lib_argc<>(SB) - MOVD R4, _rt0_ppc64le_linux_lib_argv<>(SB) - - // Synchronous initialization. - MOVD $runtime·libpreinit(SB), R12 - MOVD R12, CTR - BL (CTR) - - // Create a new thread to do the runtime initialization and return. - MOVD _cgo_sys_thread_create(SB), R12 - CMP $0, R12 - BEQ nocgo - MOVD $_rt0_ppc64le_linux_lib_go(SB), R3 - MOVD $0, R4 - MOVD R12, CTR - BL (CTR) - BR done - -nocgo: - MOVD $0x800000, R12 // stacksize = 8192KB - MOVD R12, 8+FIXED_FRAME(R1) - MOVD $_rt0_ppc64le_linux_lib_go(SB), R12 - MOVD R12, 16+FIXED_FRAME(R1) - MOVD $runtime·newosproc0(SB),R12 - MOVD R12, CTR - BL (CTR) - -done: - // Restore and return to ELFv2 caller. - UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(16) - RET - -TEXT _rt0_ppc64le_linux_lib_go(SB),NOSPLIT,$0 - MOVD _rt0_ppc64le_linux_lib_argc<>(SB), R3 - MOVD _rt0_ppc64le_linux_lib_argv<>(SB), R4 - MOVD $runtime·rt0_go(SB), R12 - MOVD R12, CTR - BR (CTR) - -DATA _rt0_ppc64le_linux_lib_argc<>(SB)/8, $0 -GLOBL _rt0_ppc64le_linux_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_ppc64le_linux_lib_argv<>(SB)/8, $0 -GLOBL _rt0_ppc64le_linux_lib_argv<>(SB),NOPTR, $8 + JMP _rt0_ppc64x_lib(SB) TEXT _main<>(SB),NOSPLIT,$-8 // In a statically linked binary, the stack contains argc, From eb1c7f6e69b0e62067ff22a0656cedff792c8438 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Wed, 24 Sep 2025 10:34:25 +0200 Subject: [PATCH 027/152] runtime: move loong64 library entry point to os-agnostic file The library entry point for loong64 is agnostic to the OS, so move it to asm_loong64.s. This is similar to what we do for other architectures. Cq-Include-Trybots: luci.golang.try:gotip-linux-loong64 Change-Id: I6915eb76d3ea72a779e05e78d85f24793169c61f Reviewed-on: https://go-review.googlesource.com/c/go/+/706416 Reviewed-by: abner chenc Reviewed-by: Cherry Mui Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI --- src/runtime/asm_loong64.s | 51 +++++++++++++++++++++++++++++++++ src/runtime/rt0_linux_loong64.s | 50 ++------------------------------ 2 files changed, 53 insertions(+), 48 deletions(-) diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s index ee7f825e1f6681..586bf89a5d3729 100644 --- a/src/runtime/asm_loong64.s +++ b/src/runtime/asm_loong64.s @@ -6,6 +6,57 @@ #include "go_tls.h" #include "funcdata.h" #include "textflag.h" +#include "cgo/abi_loong64.h" + +// When building with -buildmode=c-shared, this symbol is called when the shared +// library is loaded. +TEXT _rt0_loong64_lib(SB),NOSPLIT,$168 + // Preserve callee-save registers. + SAVE_R22_TO_R31(3*8) + SAVE_F24_TO_F31(13*8) + + // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go + MOVV R0, g + + MOVV R4, _rt0_loong64_lib_argc<>(SB) + MOVV R5, _rt0_loong64_lib_argv<>(SB) + + // Synchronous initialization. + MOVV $runtime·libpreinit(SB), R19 + JAL (R19) + + // Create a new thread to do the runtime initialization and return. + MOVV _cgo_sys_thread_create(SB), R19 + BEQ R19, nocgo + MOVV $_rt0_loong64_lib_go(SB), R4 + MOVV $0, R5 + JAL (R19) + JMP restore + +nocgo: + MOVV $0x800000, R4 // stacksize = 8192KB + MOVV $_rt0_loong64_lib_go(SB), R5 + MOVV R4, 8(R3) + MOVV R5, 16(R3) + MOVV $runtime·newosproc0(SB), R19 + JAL (R19) + +restore: + // Restore callee-save registers. + RESTORE_R22_TO_R31(3*8) + RESTORE_F24_TO_F31(13*8) + RET + +TEXT _rt0_loong64_lib_go(SB),NOSPLIT,$0 + MOVV _rt0_loong64_lib_argc<>(SB), R4 + MOVV _rt0_loong64_lib_argv<>(SB), R5 + MOVV $runtime·rt0_go(SB),R19 + JMP (R19) + +DATA _rt0_loong64_lib_argc<>(SB)/8, $0 +GLOBL _rt0_loong64_lib_argc<>(SB),NOPTR, $8 +DATA _rt0_loong64_lib_argv<>(SB)/8, $0 +GLOBL _rt0_loong64_lib_argv<>(SB),NOPTR, $8 #define REGCTXT R29 diff --git a/src/runtime/rt0_linux_loong64.s b/src/runtime/rt0_linux_loong64.s index b52f7d530a6a98..d8da4461a2f56d 100644 --- a/src/runtime/rt0_linux_loong64.s +++ b/src/runtime/rt0_linux_loong64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. #include "textflag.h" -#include "cgo/abi_loong64.h" TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0 // In a statically linked binary, the stack contains argc, @@ -16,53 +15,8 @@ TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0 // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. -TEXT _rt0_loong64_linux_lib(SB),NOSPLIT,$168 - // Preserve callee-save registers. - SAVE_R22_TO_R31(3*8) - SAVE_F24_TO_F31(13*8) - - // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go - MOVV R0, g - - MOVV R4, _rt0_loong64_linux_lib_argc<>(SB) - MOVV R5, _rt0_loong64_linux_lib_argv<>(SB) - - // Synchronous initialization. - MOVV $runtime·libpreinit(SB), R19 - JAL (R19) - - // Create a new thread to do the runtime initialization and return. - MOVV _cgo_sys_thread_create(SB), R19 - BEQ R19, nocgo - MOVV $_rt0_loong64_linux_lib_go(SB), R4 - MOVV $0, R5 - JAL (R19) - JMP restore - -nocgo: - MOVV $0x800000, R4 // stacksize = 8192KB - MOVV $_rt0_loong64_linux_lib_go(SB), R5 - MOVV R4, 8(R3) - MOVV R5, 16(R3) - MOVV $runtime·newosproc0(SB), R19 - JAL (R19) - -restore: - // Restore callee-save registers. - RESTORE_R22_TO_R31(3*8) - RESTORE_F24_TO_F31(13*8) - RET - -TEXT _rt0_loong64_linux_lib_go(SB),NOSPLIT,$0 - MOVV _rt0_loong64_linux_lib_argc<>(SB), R4 - MOVV _rt0_loong64_linux_lib_argv<>(SB), R5 - MOVV $runtime·rt0_go(SB),R19 - JMP (R19) - -DATA _rt0_loong64_linux_lib_argc<>(SB)/8, $0 -GLOBL _rt0_loong64_linux_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_loong64_linux_lib_argv<>(SB)/8, $0 -GLOBL _rt0_loong64_linux_lib_argv<>(SB),NOPTR, $8 +TEXT _rt0_loong64_linux_lib(SB),NOSPLIT,$0 + JMP _rt0_loong64_lib(SB) TEXT main(SB),NOSPLIT|NOFRAME,$0 // in external linking, glibc jumps to main with argc in R4 From be0fed8a5fc4e34f2c6caf503830bcdf904ded54 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 30 Sep 2025 15:11:58 -0400 Subject: [PATCH 028/152] cmd/go/testdata/script/test_fuzz_fuzztime.txt: disable This test features a 5s timeout, which is far too close to the natural variance in scheduling on an overloaded CI builder machine to make a reliable test. Skipping. Updates #72104 Change-Id: I52133a2d101808c923e316e0c7fdce9edbb31b10 Reviewed-on: https://go-review.googlesource.com/c/go/+/708075 Auto-Submit: Alan Donovan LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob --- src/cmd/go/testdata/script/test_fuzz_fuzztime.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt b/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt index 027c434a322ec1..3cc23985a3934a 100644 --- a/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt +++ b/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt @@ -1,3 +1,5 @@ +skip # a 5s timeout is never going to be reliable (go.dev/issue/72140) + [!fuzz] skip [short] skip env GOCACHE=$WORK/cache From 3f451f2c54c87db8b8f30e4d5224933f7895f453 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20G=2E=20MARAND?= Date: Wed, 1 Oct 2025 12:46:16 +0000 Subject: [PATCH 029/152] testing/synctest: fix inverted test failure message in TestContextAfterFunc Fixes #75685 Change-Id: I5592becfde6aaca3d7f0e2f09bc7a9785228523e GitHub-Last-Rev: 0ff7bd31ecfc23222dae70194621397330f3c2da GitHub-Pull-Request: golang/go#75687 Reviewed-on: https://go-review.googlesource.com/c/go/+/708275 Reviewed-by: Alan Donovan Auto-Submit: Alan Donovan LUCI-TryBot-Result: Go LUCI Auto-Submit: Damien Neil Auto-Submit: Sean Liao Reviewed-by: Damien Neil Reviewed-by: Sean Liao --- src/testing/synctest/example_test.go | 2 +- src/testing/synctest/synctest.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/testing/synctest/example_test.go b/src/testing/synctest/example_test.go index 843377ea88ef71..a86d87fcecdca1 100644 --- a/src/testing/synctest/example_test.go +++ b/src/testing/synctest/example_test.go @@ -66,7 +66,7 @@ func TestContextAfterFunc(t *testing.T) { cancel() synctest.Wait() if !afterFuncCalled { - t.Fatalf("before context is canceled: AfterFunc not called") + t.Fatalf("after context is canceled: AfterFunc not called") } }) } diff --git a/src/testing/synctest/synctest.go b/src/testing/synctest/synctest.go index 707383f9c75499..9f499515b8881a 100644 --- a/src/testing/synctest/synctest.go +++ b/src/testing/synctest/synctest.go @@ -147,7 +147,7 @@ // cancel() // synctest.Wait() // if !afterFuncCalled { -// t.Fatalf("before context is canceled: AfterFunc not called") +// t.Fatalf("after context is canceled: AfterFunc not called") // } // }) // } From 8ad27fb656ab162546137b512c61f6a26a90a6c5 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 1 Oct 2025 15:07:57 -0400 Subject: [PATCH 030/152] doc/go_spec.html: update date (addressing comment from review of CL 704737) Change-Id: I483dea046f664035e79c51729203c9a9ff0cbc59 Reviewed-on: https://go-review.googlesource.com/c/go/+/708299 Auto-Submit: Alan Donovan Reviewed-by: Robert Griesemer LUCI-TryBot-Result: Go LUCI --- doc/go_spec.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/go_spec.html b/doc/go_spec.html index b67eaf9999ab1e..92afe1cee0baef 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ From 633dd1d475e7346b43d87abc987a8c7f256e827d Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Mon, 15 Sep 2025 12:56:11 -0700 Subject: [PATCH 031/152] encoding/json: fix Decoder.InputOffset regression in goexperiment.jsonv2 The Decoder.InputOffset method was always ambiguous about the exact offset returned since anything between the end of the previous token to the start of the next token could be a valid result. Empirically, it seems that the behavior was to report the end of the previous token unless Decoder.More is called, in which case it reports the start of the next token. This is an odd semantic since a relatively side-effect free method like More is not quite so side-effect free. However, our goal is to preserve historical v1 semantic when possible regardless of whether it made sense. Note that jsontext.Decoder.InputOffset consistently always reports the end of the previous token. Users can explicitly choose the exact position they want by inspecting the UnreadBuffer. Fixes #75468 Change-Id: I1e946e83c9d29dfc09f2913ff8d6b2b80632f292 Reviewed-on: https://go-review.googlesource.com/c/go/+/703856 Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao --- src/encoding/json/stream_test.go | 64 +++++++++++++++++++++++++++++ src/encoding/json/v2_stream.go | 22 +++++++++- src/encoding/json/v2_stream_test.go | 64 +++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+), 1 deletion(-) diff --git a/src/encoding/json/stream_test.go b/src/encoding/json/stream_test.go index 9e5d48d39d2fcf..0e937cfaa13c78 100644 --- a/src/encoding/json/stream_test.go +++ b/src/encoding/json/stream_test.go @@ -557,3 +557,67 @@ func TestTokenTruncation(t *testing.T) { } } } + +func TestDecoderInputOffset(t *testing.T) { + const input = ` [ + [ ] , [ "one" ] , [ "one" , "two" ] , + { } , { "alpha" : "bravo" } , { "alpha" : "bravo" , "fizz" : "buzz" } + ] ` + wantOffsets := []int64{ + 0, 1, 2, 5, 6, 7, 8, 9, 12, 13, 18, 19, 20, 21, 24, 25, 30, 31, + 38, 39, 40, 41, 46, 47, 48, 49, 52, 53, 60, 61, 70, 71, 72, 73, + 76, 77, 84, 85, 94, 95, 103, 104, 112, 113, 114, 116, 117, 117, + 117, 117, + } + wantMores := []bool{ + true, true, false, true, true, false, true, true, true, false, + true, false, true, true, true, false, true, true, true, true, + true, false, false, false, false, + } + + d := NewDecoder(strings.NewReader(input)) + checkOffset := func() { + t.Helper() + got := d.InputOffset() + if len(wantOffsets) == 0 { + t.Fatalf("InputOffset = %d, want nil", got) + } + want := wantOffsets[0] + if got != want { + t.Fatalf("InputOffset = %d, want %d", got, want) + } + wantOffsets = wantOffsets[1:] + } + checkMore := func() { + t.Helper() + got := d.More() + if len(wantMores) == 0 { + t.Fatalf("More = %v, want nil", got) + } + want := wantMores[0] + if got != want { + t.Fatalf("More = %v, want %v", got, want) + } + wantMores = wantMores[1:] + } + checkOffset() + checkMore() + checkOffset() + for { + if _, err := d.Token(); err == io.EOF { + break + } else if err != nil { + t.Fatalf("Token error: %v", err) + } + checkOffset() + checkMore() + checkOffset() + } + checkOffset() + checkMore() + checkOffset() + + if len(wantOffsets)+len(wantMores) > 0 { + t.Fatal("unconsumed testdata") + } +} diff --git a/src/encoding/json/v2_stream.go b/src/encoding/json/v2_stream.go index 28e72c0a529e15..dcee553ee13336 100644 --- a/src/encoding/json/v2_stream.go +++ b/src/encoding/json/v2_stream.go @@ -20,6 +20,10 @@ type Decoder struct { dec *jsontext.Decoder opts jsonv2.Options err error + + // hadPeeked reports whether [Decoder.More] was called. + // It is reset by [Decoder.Decode] and [Decoder.Token]. + hadPeeked bool } // NewDecoder returns a new decoder that reads from r. @@ -76,6 +80,7 @@ func (dec *Decoder) Decode(v any) error { } return dec.err } + dec.hadPeeked = false return jsonv2.Unmarshal(b, v, dec.opts) } @@ -206,6 +211,7 @@ func (dec *Decoder) Token() (Token, error) { } return nil, transformSyntacticError(err) } + dec.hadPeeked = false switch k := tok.Kind(); k { case 'n': return nil, nil @@ -230,6 +236,7 @@ func (dec *Decoder) Token() (Token, error) { // More reports whether there is another element in the // current array or object being parsed. func (dec *Decoder) More() bool { + dec.hadPeeked = true k := dec.dec.PeekKind() return k > 0 && k != ']' && k != '}' } @@ -238,5 +245,18 @@ func (dec *Decoder) More() bool { // The offset gives the location of the end of the most recently returned token // and the beginning of the next token. func (dec *Decoder) InputOffset() int64 { - return dec.dec.InputOffset() + offset := dec.dec.InputOffset() + if dec.hadPeeked { + // Historically, InputOffset reported the location of + // the end of the most recently returned token + // unless [Decoder.More] is called, in which case, it reported + // the beginning of the next token. + unreadBuffer := dec.dec.UnreadBuffer() + trailingTokens := bytes.TrimLeft(unreadBuffer, " \n\r\t") + if len(trailingTokens) > 0 { + leadingWhitespace := len(unreadBuffer) - len(trailingTokens) + offset += int64(leadingWhitespace) + } + } + return offset } diff --git a/src/encoding/json/v2_stream_test.go b/src/encoding/json/v2_stream_test.go index b8951f82054e96..0885631fb5937f 100644 --- a/src/encoding/json/v2_stream_test.go +++ b/src/encoding/json/v2_stream_test.go @@ -537,3 +537,67 @@ func TestTokenTruncation(t *testing.T) { } } } + +func TestDecoderInputOffset(t *testing.T) { + const input = ` [ + [ ] , [ "one" ] , [ "one" , "two" ] , + { } , { "alpha" : "bravo" } , { "alpha" : "bravo" , "fizz" : "buzz" } + ] ` + wantOffsets := []int64{ + 0, 1, 2, 5, 6, 7, 8, 9, 12, 13, 18, 19, 20, 21, 24, 25, 30, 31, + 38, 39, 40, 41, 46, 47, 48, 49, 52, 53, 60, 61, 70, 71, 72, 73, + 76, 77, 84, 85, 94, 95, 103, 104, 112, 113, 114, 116, 117, 117, + 117, 117, + } + wantMores := []bool{ + true, true, false, true, true, false, true, true, true, false, + true, false, true, true, true, false, true, true, true, true, + true, false, false, false, false, + } + + d := NewDecoder(strings.NewReader(input)) + checkOffset := func() { + t.Helper() + got := d.InputOffset() + if len(wantOffsets) == 0 { + t.Fatalf("InputOffset = %d, want nil", got) + } + want := wantOffsets[0] + if got != want { + t.Fatalf("InputOffset = %d, want %d", got, want) + } + wantOffsets = wantOffsets[1:] + } + checkMore := func() { + t.Helper() + got := d.More() + if len(wantMores) == 0 { + t.Fatalf("More = %v, want nil", got) + } + want := wantMores[0] + if got != want { + t.Fatalf("More = %v, want %v", got, want) + } + wantMores = wantMores[1:] + } + checkOffset() + checkMore() + checkOffset() + for { + if _, err := d.Token(); err == io.EOF { + break + } else if err != nil { + t.Fatalf("Token error: %v", err) + } + checkOffset() + checkMore() + checkOffset() + } + checkOffset() + checkMore() + checkOffset() + + if len(wantOffsets)+len(wantMores) > 0 { + t.Fatal("unconsumed testdata") + } +} From 5799c139a77e9c3a5750c90ebda538131f4517d6 Mon Sep 17 00:00:00 2001 From: Daniel McCarney Date: Mon, 29 Sep 2025 14:31:38 -0400 Subject: [PATCH 032/152] crypto/tls: rm marshalEncryptedClientHelloConfigList dead code This package internal function has no call sites. Change-Id: I262058199fd2f387ef3b5e21099421720cc5413e Reviewed-on: https://go-review.googlesource.com/c/go/+/707815 TryBot-Bypass: Daniel McCarney Auto-Submit: Roland Shoemaker Auto-Submit: Daniel McCarney Reviewed-by: Carlos Amedee Reviewed-by: Roland Shoemaker --- src/crypto/tls/ech.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/crypto/tls/ech.go b/src/crypto/tls/ech.go index 76727a890896a0..d3472d8dc4aafb 100644 --- a/src/crypto/tls/ech.go +++ b/src/crypto/tls/ech.go @@ -568,16 +568,6 @@ func parseECHExt(ext []byte) (echType echExtType, cs echCipher, configID uint8, return echType, cs, configID, bytes.Clone(encap), bytes.Clone(payload), nil } -func marshalEncryptedClientHelloConfigList(configs []EncryptedClientHelloKey) ([]byte, error) { - builder := cryptobyte.NewBuilder(nil) - builder.AddUint16LengthPrefixed(func(builder *cryptobyte.Builder) { - for _, c := range configs { - builder.AddBytes(c.Config) - } - }) - return builder.Bytes() -} - func (c *Conn) processECHClientHello(outer *clientHelloMsg, echKeys []EncryptedClientHelloKey) (*clientHelloMsg, *echServerContext, error) { echType, echCiphersuite, configID, encap, payload, err := parseECHExt(outer.encryptedClientHello) if err != nil { From 84db201ae18c889acdefe20c8a903b188328f16d Mon Sep 17 00:00:00 2001 From: Mateusz Poliwczak Date: Fri, 26 Sep 2025 20:47:45 +0200 Subject: [PATCH 033/152] cmd/compile: propagate len([]T{}) to make builtin to allow stack allocation Updates #75620 Change-Id: I6a6a6964af4512e30eb4806e1dc7b0fd0835744f Reviewed-on: https://go-review.googlesource.com/c/go/+/707255 Reviewed-by: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Auto-Submit: Keith Randall Reviewed-by: Carlos Amedee --- src/cmd/compile/internal/escape/escape.go | 13 ++++++++++++- test/escape_make_non_const.go | 15 +++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 6b34830b3dd5e6..59250edfef0078 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -563,7 +563,10 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) { if ro == nil { base.Fatalf("no ReassignOracle for function %v with closure parent %v", fn, fn.ClosureParent) } - if s := ro.StaticValue(*r); s.Op() == ir.OLITERAL { + + s := ro.StaticValue(*r) + switch s.Op() { + case ir.OLITERAL: lit, ok := s.(*ir.BasicLit) if !ok || lit.Val().Kind() != constant.Int { base.Fatalf("unexpected BasicLit Kind") @@ -577,6 +580,14 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) { assignTemp(n.Pos(), *r, n.PtrInit()) *r = ir.NewBasicLit(n.Pos(), (*r).Type(), lit.Val()) } + case ir.OLEN: + x := ro.StaticValue(s.(*ir.UnaryExpr).X) + if x.Op() == ir.OSLICELIT { + x := x.(*ir.CompLitExpr) + // Preserve any side effects of the original expression, then update the value. + assignTemp(n.Pos(), *r, n.PtrInit()) + *r = ir.NewBasicLit(n.Pos(), types.Types[types.TINT], constant.MakeInt64(x.Len)) + } } } case ir.OCONVIFACE: diff --git a/test/escape_make_non_const.go b/test/escape_make_non_const.go index 7a9b28d5e37a7e..11854ac4f4731a 100644 --- a/test/escape_make_non_const.go +++ b/test/escape_make_non_const.go @@ -106,3 +106,18 @@ type m struct { func newM(l int) m { // ERROR "can inline" return m{make(map[string]int, l)} // ERROR "make.*escapes to heap" } + +//go:noinline +func testLenOfSliceLit() { + ints := []int{0, 1, 2, 3, 4, 5} // ERROR "\[\]int\{\.\.\.\} does not escape"' + _ = make([]int, len(ints)) // ERROR "make\(\[\]int, 6\) does not escape" + _ = allocLenOf(ints) // ERROR "inlining call", "make\(\[\]int, 6\) does not escape" + + _ = make([]int, 2, len(ints)) // ERROR "make\(\[\]int, 2, 6\) does not escape" + _ = make([]int, len(ints), 2) // ERROR "make\(\[\]int, len\(ints\), 2\) does not escape" + _ = make([]int, 10, len(ints)) // ERROR "make\(\[\]int, 10, 6\) does not escape" +} + +func allocLenOf(s []int) []int { // ERROR "can inline" "s does not escape" + return make([]int, len(s)) // ERROR "escapes to heap" +} From 8c68a1c1abade565c6719159858e76f9b122ddc8 Mon Sep 17 00:00:00 2001 From: Vlad Saioc Date: Thu, 2 Oct 2025 11:57:58 +0000 Subject: [PATCH 034/152] runtime,net/http/pprof: goroutine leak detection by using the garbage collector Proposal #74609 Change-Id: I97a754b128aac1bc5b7b9ab607fcd5bb390058c8 GitHub-Last-Rev: 60f2a192badf415112246de8bc6c0084085314f6 GitHub-Pull-Request: golang/go#74622 Reviewed-on: https://go-review.googlesource.com/c/go/+/688335 LUCI-TryBot-Result: Go LUCI Reviewed-by: t hepudds Auto-Submit: Michael Knyszek Reviewed-by: Michael Knyszek Reviewed-by: Carlos Amedee --- src/cmd/link/internal/loader/loader.go | 3 + .../exp_goroutineleakprofile_off.go | 8 + .../exp_goroutineleakprofile_on.go | 8 + src/internal/goexperiment/flags.go | 3 + src/net/http/pprof/pprof.go | 8 + src/runtime/chan.go | 32 +- src/runtime/crash_test.go | 16 + src/runtime/goroutineleakprofile_test.go | 595 ++++++ src/runtime/mbitmap.go | 22 + src/runtime/mgc.go | 306 ++- src/runtime/mgcmark.go | 105 +- src/runtime/mgcmark_greenteagc.go | 2 +- src/runtime/mgcmark_nogreenteagc.go | 2 +- src/runtime/mprof.go | 56 + src/runtime/pprof/pprof.go | 45 +- src/runtime/pprof/runtime.go | 6 + src/runtime/preempt.go | 3 +- src/runtime/proc.go | 16 +- src/runtime/runtime2.go | 121 +- src/runtime/select.go | 12 +- src/runtime/sema.go | 27 +- src/runtime/sizeof_test.go | 2 +- src/runtime/stack.go | 17 +- .../commonpatterns.go | 277 +++ .../testgoroutineleakprofile/goker/LICENSE | 21 + .../testgoroutineleakprofile/goker/README.md | 1847 +++++++++++++++++ .../goker/cockroach10214.go | 145 ++ .../goker/cockroach1055.go | 115 + .../goker/cockroach10790.go | 98 + .../goker/cockroach13197.go | 82 + .../goker/cockroach13755.go | 66 + .../goker/cockroach1462.go | 167 ++ .../goker/cockroach16167.go | 108 + .../goker/cockroach18101.go | 60 + .../goker/cockroach2448.go | 125 ++ .../goker/cockroach24808.go | 78 + .../goker/cockroach25456.go | 92 + .../goker/cockroach35073.go | 124 ++ .../goker/cockroach35931.go | 135 ++ .../goker/cockroach3710.go | 122 ++ .../goker/cockroach584.go | 62 + .../goker/cockroach6181.go | 87 + .../goker/cockroach7504.go | 183 ++ .../goker/cockroach9935.go | 77 + .../goker/etcd10492.go | 72 + .../goker/etcd5509.go | 126 ++ .../goker/etcd6708.go | 100 + .../goker/etcd6857.go | 81 + .../goker/etcd6873.go | 98 + .../goker/etcd7492.go | 163 ++ .../goker/etcd7902.go | 87 + .../goker/grpc1275.go | 111 + .../goker/grpc1424.go | 105 + .../goker/grpc1460.go | 84 + .../goker/grpc3017.go | 123 ++ .../testgoroutineleakprofile/goker/grpc660.go | 65 + .../testgoroutineleakprofile/goker/grpc795.go | 74 + .../testgoroutineleakprofile/goker/grpc862.go | 105 + .../goker/hugo3251.go | 81 + .../goker/hugo5379.go | 317 +++ .../goker/istio16224.go | 129 ++ .../goker/istio17860.go | 144 ++ .../goker/istio18454.go | 154 ++ .../goker/kubernetes10182.go | 95 + .../goker/kubernetes11298.go | 118 ++ .../goker/kubernetes13135.go | 166 ++ .../goker/kubernetes1321.go | 100 + .../goker/kubernetes25331.go | 72 + .../goker/kubernetes26980.go | 87 + .../goker/kubernetes30872.go | 223 ++ .../goker/kubernetes38669.go | 83 + .../goker/kubernetes5316.go | 68 + .../goker/kubernetes58107.go | 108 + .../goker/kubernetes62464.go | 120 ++ .../goker/kubernetes6632.go | 86 + .../goker/kubernetes70277.go | 97 + .../testgoroutineleakprofile/goker/main.go | 39 + .../goker/moby17176.go | 68 + .../goker/moby21233.go | 146 ++ .../goker/moby25384.go | 59 + .../goker/moby27782.go | 242 +++ .../goker/moby28462.go | 125 ++ .../goker/moby30408.go | 67 + .../goker/moby33781.go | 71 + .../goker/moby36114.go | 53 + .../goker/moby4951.go | 101 + .../goker/moby7559.go | 55 + .../goker/serving2137.go | 122 ++ .../goker/syncthing4829.go | 87 + .../goker/syncthing5795.go | 103 + .../testdata/testgoroutineleakprofile/main.go | 39 + .../testgoroutineleakprofile/simple.go | 253 +++ .../testgoroutineleakprofile/stresstests.go | 89 + src/runtime/traceback.go | 6 +- src/runtime/tracestatus.go | 2 +- 95 files changed, 10766 insertions(+), 89 deletions(-) create mode 100644 src/internal/goexperiment/exp_goroutineleakprofile_off.go create mode 100644 src/internal/goexperiment/exp_goroutineleakprofile_on.go create mode 100644 src/runtime/goroutineleakprofile_test.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/README.md create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/main.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/main.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/simple.go create mode 100644 src/runtime/testdata/testgoroutineleakprofile/stresstests.go diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 103dad03001263..0ed20d1becbb03 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -2450,6 +2450,9 @@ var blockedLinknames = map[string][]string{ "sync_test.runtime_blockUntilEmptyCleanupQueue": {"sync_test"}, "time.runtimeIsBubbled": {"time"}, "unique.runtime_blockUntilEmptyCleanupQueue": {"unique"}, + // Experimental features + "runtime.goroutineLeakGC": {"runtime/pprof"}, + "runtime.goroutineleakcount": {"runtime/pprof"}, // Others "net.newWindowsFile": {"net"}, // pushed from os "testing/synctest.testingSynctestTest": {"testing/synctest"}, // pushed from testing diff --git a/src/internal/goexperiment/exp_goroutineleakprofile_off.go b/src/internal/goexperiment/exp_goroutineleakprofile_off.go new file mode 100644 index 00000000000000..63eafe9e6c74db --- /dev/null +++ b/src/internal/goexperiment/exp_goroutineleakprofile_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.goroutineleakprofile + +package goexperiment + +const GoroutineLeakProfile = false +const GoroutineLeakProfileInt = 0 diff --git a/src/internal/goexperiment/exp_goroutineleakprofile_on.go b/src/internal/goexperiment/exp_goroutineleakprofile_on.go new file mode 100644 index 00000000000000..28a662eceb46f8 --- /dev/null +++ b/src/internal/goexperiment/exp_goroutineleakprofile_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.goroutineleakprofile + +package goexperiment + +const GoroutineLeakProfile = true +const GoroutineLeakProfileInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index 84dbf594b8792a..232a17135d2cc5 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -118,4 +118,7 @@ type Flags struct { // SizeSpecializedMalloc enables malloc implementations that are specialized per size class. SizeSpecializedMalloc bool + + // GoroutineLeakProfile enables the collection of goroutine leak profiles. + GoroutineLeakProfile bool } diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go index 635d3ad9d9f132..e5a46ed253cf8b 100644 --- a/src/net/http/pprof/pprof.go +++ b/src/net/http/pprof/pprof.go @@ -77,6 +77,7 @@ import ( "fmt" "html" "internal/godebug" + "internal/goexperiment" "internal/profile" "io" "log" @@ -353,6 +354,7 @@ func collectProfile(p *pprof.Profile) (*profile.Profile, error) { var profileSupportsDelta = map[handler]bool{ "allocs": true, "block": true, + "goroutineleak": true, "goroutine": true, "heap": true, "mutex": true, @@ -372,6 +374,12 @@ var profileDescriptions = map[string]string{ "trace": "A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace.", } +func init() { + if goexperiment.GoroutineLeakProfile { + profileDescriptions["goroutineleak"] = "Stack traces of all leaked goroutines. Use debug=2 as a query parameter to export in the same format as an unrecovered panic." + } +} + type profileEntry struct { Name string Href string diff --git a/src/runtime/chan.go b/src/runtime/chan.go index 639d29dc8337f0..3320c248b458b6 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -263,11 +263,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { } // No stack splits between assigning elem and enqueuing mysg // on gp.waiting where copystack can find it. - mysg.elem = ep + mysg.elem.set(ep) mysg.waitlink = nil mysg.g = gp mysg.isSelect = false - mysg.c = c + mysg.c.set(c) gp.waiting = mysg gp.param = nil c.sendq.enqueue(mysg) @@ -298,7 +298,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { if mysg.releasetime > 0 { blockevent(mysg.releasetime-t0, 2) } - mysg.c = nil + mysg.c.set(nil) releaseSudog(mysg) if closed { if c.closed == 0 { @@ -336,9 +336,9 @@ func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz } } - if sg.elem != nil { + if sg.elem.get() != nil { sendDirect(c.elemtype, sg, ep) - sg.elem = nil + sg.elem.set(nil) } gp := sg.g unlockf() @@ -395,7 +395,7 @@ func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) { // Once we read sg.elem out of sg, it will no longer // be updated if the destination's stack gets copied (shrunk). // So make sure that no preemption points can happen between read & use. - dst := sg.elem + dst := sg.elem.get() typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) // No need for cgo write barrier checks because dst is always // Go memory. @@ -406,7 +406,7 @@ func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) { // dst is on our stack or the heap, src is on another stack. // The channel is locked, so src will not move during this // operation. - src := sg.elem + src := sg.elem.get() typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) memmove(dst, src, t.Size_) } @@ -441,9 +441,9 @@ func closechan(c *hchan) { if sg == nil { break } - if sg.elem != nil { - typedmemclr(c.elemtype, sg.elem) - sg.elem = nil + if sg.elem.get() != nil { + typedmemclr(c.elemtype, sg.elem.get()) + sg.elem.set(nil) } if sg.releasetime != 0 { sg.releasetime = cputicks() @@ -463,7 +463,7 @@ func closechan(c *hchan) { if sg == nil { break } - sg.elem = nil + sg.elem.set(nil) if sg.releasetime != 0 { sg.releasetime = cputicks() } @@ -642,13 +642,13 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) } // No stack splits between assigning elem and enqueuing mysg // on gp.waiting where copystack can find it. - mysg.elem = ep + mysg.elem.set(ep) mysg.waitlink = nil gp.waiting = mysg mysg.g = gp mysg.isSelect = false - mysg.c = c + mysg.c.set(c) gp.param = nil c.recvq.enqueue(mysg) if c.timer != nil { @@ -680,7 +680,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) } success := mysg.success gp.param = nil - mysg.c = nil + mysg.c.set(nil) releaseSudog(mysg) return true, success } @@ -727,14 +727,14 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { typedmemmove(c.elemtype, ep, qp) } // copy data from sender to queue - typedmemmove(c.elemtype, qp, sg.elem) + typedmemmove(c.elemtype, qp, sg.elem.get()) c.recvx++ if c.recvx == c.dataqsiz { c.recvx = 0 } c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz } - sg.elem = nil + sg.elem.set(nil) gp := sg.g unlockf() gp.param = unsafe.Pointer(sg) diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index 2db86e0562d6ae..2b8ca549ad84f2 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -186,6 +186,22 @@ func buildTestProg(t *testing.T, binary string, flags ...string) (string, error) t.Logf("running %v", cmd) cmd.Dir = "testdata/" + binary cmd = testenv.CleanCmdEnv(cmd) + + // If tests need any experimental flags, add them here. + // + // TODO(vsaioc): Remove `goroutineleakprofile` once the feature is no longer experimental. + edited := false + for i := range cmd.Env { + e := cmd.Env[i] + if _, vars, ok := strings.Cut(e, "GOEXPERIMENT="); ok { + cmd.Env[i] = "GOEXPERIMENT=" + vars + ",goroutineleakprofile" + edited, _ = true, vars + } + } + if !edited { + cmd.Env = append(cmd.Env, "GOEXPERIMENT=goroutineleakprofile") + } + out, err := cmd.CombinedOutput() if err != nil { target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out) diff --git a/src/runtime/goroutineleakprofile_test.go b/src/runtime/goroutineleakprofile_test.go new file mode 100644 index 00000000000000..9857d725deef32 --- /dev/null +++ b/src/runtime/goroutineleakprofile_test.go @@ -0,0 +1,595 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "regexp" + "strings" + "testing" +) + +func TestGoroutineLeakProfile(t *testing.T) { + // Goroutine leak test case. + // + // Test cases can be configured with test name, the name of the entry point function, + // a set of expected leaks identified by regular expressions, and the number of times + // the test should be repeated. + // + // Repeated runs reduce flakiness in some tests. + type testCase struct { + name string + simple bool + repetitions int + expectedLeaks map[*regexp.Regexp]bool + + // flakyLeaks are goroutine leaks that are too flaky to be reliably detected. + // Still, they might pop up every once in a while. The test will pass regardless + // if they occur or nor, as they are not unexpected. + // + // Note that all flaky leaks are true positives, i.e. real goroutine leaks, + // and it is only their detection that is unreliable due to scheduling + // non-determinism. + flakyLeaks map[*regexp.Regexp]struct{} + } + + // makeAnyTest is a short-hand for creating test cases. + // Each of the leaks in the list is identified by a regular expression. + // If a leak is flaky, it is added to the flakyLeaks map. + makeAnyTest := func(name string, flaky bool, repetitions int, leaks ...string) testCase { + tc := testCase{ + name: name, + expectedLeaks: make(map[*regexp.Regexp]bool, len(leaks)), + flakyLeaks: make(map[*regexp.Regexp]struct{}, len(leaks)), + // Make sure the test is repeated at least once. + repetitions: repetitions | 1, + } + + for _, leak := range leaks { + if !flaky { + tc.expectedLeaks[regexp.MustCompile(leak)] = false + } else { + tc.flakyLeaks[regexp.MustCompile(leak)] = struct{}{} + } + } + + return tc + } + + // makeTest is a short-hand for creating non-flaky test cases. + makeTest := func(name string, leaks ...string) testCase { + tcase := makeAnyTest(name, false, 2, leaks...) + tcase.simple = true + return tcase + } + + // makeFlakyTest is a short-hand for creating flaky test cases. + makeFlakyTest := func(name string, leaks ...string) testCase { + if testing.Short() { + return makeAnyTest(name, true, 2, leaks...) + } + return makeAnyTest(name, true, 10, leaks...) + } + + goroutineHeader := regexp.MustCompile(`goroutine \d+ \[`) + + // extractLeaks takes the output of a test and splits it into a + // list of strings denoting goroutine leaks. + // + // If the input is: + // + // goroutine 1 [wait reason (leaked)]: + // main.leaked() + // ./testdata/testgoroutineleakprofile/foo.go:37 +0x100 + // created by main.main() + // ./testdata/testgoroutineleakprofile/main.go:10 +0x20 + // + // goroutine 2 [wait reason (leaked)]: + // main.leaked2() + // ./testdata/testgoroutineleakprofile/foo.go:37 +0x100 + // created by main.main() + // ./testdata/testgoroutineleakprofile/main.go:10 +0x20 + // + // The output is (as a list of strings): + // + // leaked() [wait reason] + // leaked2() [wait reason] + extractLeaks := func(output string) []string { + stacks := strings.Split(output, "\n\ngoroutine") + var leaks []string + for _, stack := range stacks { + lines := strings.Split(stack, "\n") + if len(lines) < 5 { + // Expecting at least the following lines (where n=len(lines)-1): + // + // [0] goroutine n [wait reason (leaked)] + // ... + // [n-3] bottom.leak.frame(...) + // [n-2] ./bottom/leak/frame/source.go:line + // [n-1] created by go.instruction() + // [n] ./go/instruction/source.go:line + continue + } + + if !strings.Contains(lines[0], "(leaked)") { + // Ignore non-leaked goroutines. + continue + } + + // Get the wait reason from the goroutine header. + header := lines[0] + waitReason := goroutineHeader.ReplaceAllString(header, "[") + waitReason = strings.ReplaceAll(waitReason, " (leaked)", "") + + // Get the function name from the stack trace (should be two lines above `created by`). + var funcName string + for i := len(lines) - 1; i >= 0; i-- { + if strings.Contains(lines[i], "created by") { + funcName = strings.TrimPrefix(lines[i-2], "main.") + break + } + } + if funcName == "" { + t.Fatalf("failed to extract function name from stack trace: %s", lines) + } + + leaks = append(leaks, funcName+" "+waitReason) + } + return leaks + } + + // Micro tests involve very simple leaks for each type of concurrency primitive operation. + microTests := []testCase{ + makeTest("NilRecv", + `NilRecv\.func1\(.* \[chan receive \(nil chan\)\]`, + ), + makeTest("NilSend", + `NilSend\.func1\(.* \[chan send \(nil chan\)\]`, + ), + makeTest("SelectNoCases", + `SelectNoCases\.func1\(.* \[select \(no cases\)\]`, + ), + makeTest("ChanRecv", + `ChanRecv\.func1\(.* \[chan receive\]`, + ), + makeTest("ChanSend", + `ChanSend\.func1\(.* \[chan send\]`, + ), + makeTest("Select", + `Select\.func1\(.* \[select\]`, + ), + makeTest("WaitGroup", + `WaitGroup\.func1\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeTest("MutexStack", + `MutexStack\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("MutexHeap", + `MutexHeap\.func1.1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Cond", + `Cond\.func1\(.* \[sync\.Cond\.Wait\]`, + ), + makeTest("RWMutexRLock", + `RWMutexRLock\.func1\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeTest("RWMutexLock", + `RWMutexLock\.func1\(.* \[sync\.(RW)?Mutex\.Lock\]`, + ), + makeTest("Mixed", + `Mixed\.func1\(.* \[sync\.WaitGroup\.Wait\]`, + `Mixed\.func1.1\(.* \[chan send\]`, + ), + makeTest("NoLeakGlobal"), + } + + // Stress tests are flaky and we do not strictly care about their output. + // They are only intended to stress the goroutine leak detector and profiling + // infrastructure in interesting ways. + stressTestCases := []testCase{ + makeFlakyTest("SpawnGC", + `spawnGC.func1\(.* \[chan receive\]`, + ), + makeTest("DaisyChain"), + } + + // Common goroutine leak patterns. + // Extracted from "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach" + // doi:10.1109/CGO57630.2024.10444835 + patternTestCases := []testCase{ + makeTest("NoCloseRange", + `noCloseRange\(.* \[chan send\]`, + `noCloseRange\.func1\(.* \[chan receive\]`, + ), + makeTest("MethodContractViolation", + `worker\.Start\.func1\(.* \[select\]`, + ), + makeTest("DoubleSend", + `DoubleSend\.func3\(.* \[chan send\]`, + ), + makeTest("EarlyReturn", + `earlyReturn\.func1\(.* \[chan send\]`, + ), + makeTest("NCastLeak", + `nCastLeak\.func1\(.* \[chan send\]`, + `NCastLeak\.func2\(.* \[chan receive\]`, + ), + makeTest("Timeout", + // (vsaioc): Timeout is *theoretically* flaky, but the + // pseudo-random choice for select case branches makes it + // practically impossible for it to fail. + `timeout\.func1\(.* \[chan send\]`, + ), + } + + // GoKer tests from "GoBench: A Benchmark Suite of Real-World Go Concurrency Bugs". + // Refer to testdata/testgoroutineleakprofile/goker/README.md. + // + // This list is curated for tests that are not excessively flaky. + // Some tests are also excluded because they are redundant. + // + // TODO(vsaioc): Some of these might be removable (their patterns may overlap). + gokerTestCases := []testCase{ + makeFlakyTest("Cockroach584", + `Cockroach584\.func2\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach1055", + `Cockroach1055\.func2\(.* \[chan receive\]`, + `Cockroach1055\.func2\.2\(.* \[sync\.WaitGroup\.Wait\]`, + `Cockroach1055\.func2\.1\(.* \[chan receive\]`, + `Cockroach1055\.func2\.1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach1462", + `\(\*Stopper_cockroach1462\)\.RunWorker\.func1\(.* \[chan send\]`, + `Cockroach1462\.func2\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeFlakyTest("Cockroach2448", + `\(\*Store_cockroach2448\)\.processRaft\(.* \[select\]`, + `\(\*state_cockroach2448\)\.start\(.* \[select\]`, + ), + makeFlakyTest("Cockroach3710", + `\(\*Store_cockroach3710\)\.ForceRaftLogScanAndProcess\(.* \[sync\.RWMutex\.RLock\]`, + `\(\*Store_cockroach3710\)\.processRaft\.func1\(.* \[sync\.RWMutex\.Lock\]`, + ), + makeFlakyTest("Cockroach6181", + `testRangeCacheCoalescedRequests_cockroach6181\(.* \[sync\.WaitGroup\.Wait\]`, + `testRangeCacheCoalescedRequests_cockroach6181\.func1\.1\(.* \[sync\.(RW)?Mutex\.Lock\]`, + `testRangeCacheCoalescedRequests_cockroach6181\.func1\.1\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeTest("Cockroach7504", + `Cockroach7504\.func2\.1.* \[sync\.Mutex\.Lock\]`, + `Cockroach7504\.func2\.2.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach9935", + `\(\*loggingT_cockroach9935\)\.outputLogEntry\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach10214", + `\(*Store_cockroach10214\)\.sendQueuedHeartbeats\(.* \[sync\.Mutex\.Lock\]`, + `\(*Replica_cockroach10214\)\.tick\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach10790", + `\(\*Replica_cockroach10790\)\.beginCmds\.func1\(.* \[chan receive\]`, + ), + makeTest("Cockroach13197", + `\(\*Tx_cockroach13197\)\.awaitDone\(.* \[chan receive\]`, + ), + makeTest("Cockroach13755", + `\(\*Rows_cockroach13755\)\.awaitDone\(.* \[chan receive\]`, + ), + makeFlakyTest("Cockroach16167", + `Cockroach16167\.func2\(.* \[sync\.RWMutex\.RLock\]`, + `\(\*Executor_cockroach16167\)\.Start\(.* \[sync\.RWMutex\.Lock\]`, + ), + makeFlakyTest("Cockroach18101", + `restore_cockroach18101\.func1\(.* \[chan send\]`, + ), + makeTest("Cockroach24808", + `Cockroach24808\.func2\(.* \[chan send\]`, + ), + makeTest("Cockroach25456", + `Cockroach25456\.func2\(.* \[chan receive\]`, + ), + makeTest("Cockroach35073", + `Cockroach35073\.func2.1\(.* \[chan send\]`, + `Cockroach35073\.func2\(.* \[chan send\]`, + ), + makeTest("Cockroach35931", + `Cockroach35931\.func2\(.* \[chan send\]`, + ), + makeTest("Etcd5509", + `Etcd5509\.func2\(.* \[sync\.RWMutex\.Lock\]`, + ), + makeTest("Etcd6708", + `Etcd6708\.func2\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeFlakyTest("Etcd6857", + `\(\*node_etcd6857\)\.Status\(.* \[chan send\]`, + ), + makeFlakyTest("Etcd6873", + `\(\*watchBroadcasts_etcd6873\)\.stop\(.* \[chan receive\]`, + `newWatchBroadcasts_etcd6873\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Etcd7492", + `Etcd7492\.func2\(.* \[sync\.WaitGroup\.Wait\]`, + `Etcd7492\.func2\.1\(.* \[chan send\]`, + `\(\*simpleTokenTTLKeeper_etcd7492\)\.run\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Etcd7902", + `doRounds_etcd7902\.func1\(.* \[chan receive\]`, + `doRounds_etcd7902\.func1\(.* \[sync\.Mutex\.Lock\]`, + `runElectionFunc_etcd7902\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeTest("Etcd10492", + `Etcd10492\.func2\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Grpc660", + `\(\*benchmarkClient_grpc660\)\.doCloseLoopUnary\.func1\(.* \[chan send\]`, + ), + makeFlakyTest("Grpc795", + `\(\*Server_grpc795\)\.Serve\(.* \[sync\.Mutex\.Lock\]`, + `testServerGracefulStopIdempotent_grpc795\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Grpc862", + `DialContext_grpc862\.func2\(.* \[chan receive\]`), + makeTest("Grpc1275", + `testInflightStreamClosing_grpc1275\.func1\(.* \[chan receive\]`), + makeTest("Grpc1424", + `DialContext_grpc1424\.func1\(.* \[chan receive\]`), + makeFlakyTest("Grpc1460", + `\(\*http2Client_grpc1460\)\.keepalive\(.* \[chan receive\]`, + `\(\*http2Client_grpc1460\)\.NewStream\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Grpc3017", + // grpc/3017 involves a goroutine leak that also simultaneously engages many GC assists. + `Grpc3017\.func2\(.* \[chan receive\]`, + `Grpc3017\.func2\.1\(.* \[sync\.Mutex\.Lock\]`, + `\(\*lbCacheClientConn_grpc3017\)\.RemoveSubConn\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Hugo3251", + `Hugo3251\.func2\(.* \[sync\.WaitGroup\.Wait\]`, + `Hugo3251\.func2\.1\(.* \[sync\.Mutex\.Lock\]`, + `Hugo3251\.func2\.1\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeFlakyTest("Hugo5379", + `\(\*Page_hugo5379\)\.initContent\.func1\.1\(.* \[sync\.Mutex\.Lock\]`, + `pageRenderer_hugo5379\(.* \[sync\.Mutex\.Lock\]`, + `Hugo5379\.func2\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeFlakyTest("Istio16224", + `Istio16224\.func2\(.* \[sync\.Mutex\.Lock\]`, + `\(\*controller_istio16224\)\.Run\(.* \[chan send\]`, + `\(\*controller_istio16224\)\.Run\(.* \[chan receive\]`, + ), + makeFlakyTest("Istio17860", + `\(\*agent_istio17860\)\.runWait\(.* \[chan send\]`, + ), + makeFlakyTest("Istio18454", + `\(\*Worker_istio18454\)\.Start\.func1\(.* \[chan receive\]`, + `\(\*Worker_istio18454\)\.Start\.func1\(.* \[chan send\]`, + ), + // NOTE(vsaioc): + // Kubernetes/1321 is excluded due to a race condition in the original program + // that may, in extremely rare cases, lead to nil pointer dereference crashes. + // (Reproducible even with regular GC). Only kept here for posterity. + // + // makeTest(testCase{name: "Kubernetes1321"}, + // `NewMux_kubernetes1321\.gowrap1\(.* \[chan send\]`, + // `testMuxWatcherClose_kubernetes1321\(.* \[sync\.Mutex\.Lock\]`), + makeTest("Kubernetes5316", + `finishRequest_kubernetes5316\.func1\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes6632", + `\(\*idleAwareFramer_kubernetes6632\)\.monitor\(.* \[sync\.Mutex\.Lock\]`, + `\(\*idleAwareFramer_kubernetes6632\)\.WriteFrame\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes10182", + `\(\*statusManager_kubernetes10182\)\.Start\.func1\(.* \[sync\.Mutex\.Lock\]`, + `\(\*statusManager_kubernetes10182\)\.SetPodStatus\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes11298", + `After_kubernetes11298\.func1\(.* \[chan receive\]`, + `After_kubernetes11298\.func1\(.* \[sync\.Cond\.Wait\]`, + `Kubernetes11298\.func2\(.* \[chan receive\]`, + ), + makeFlakyTest("Kubernetes13135", + `Util_kubernetes13135\(.* \[sync\.Mutex\.Lock\]`, + `\(\*WatchCache_kubernetes13135\)\.Add\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Kubernetes25331", + `\(\*watchChan_kubernetes25331\)\.run\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes26980", + `Kubernetes26980\.func2\(.* \[chan receive\]`, + `Kubernetes26980\.func2\.1\(.* \[sync\.Mutex\.Lock\]`, + `\(\*processorListener_kubernetes26980\)\.pop\(.* \[chan receive\]`, + ), + makeFlakyTest("Kubernetes30872", + `\(\*DelayingDeliverer_kubernetes30872\)\.StartWithHandler\.func1\(.* \[sync\.Mutex\.Lock\]`, + `\(\*Controller_kubernetes30872\)\.Run\(.* \[sync\.Mutex\.Lock\]`, + `\(\*NamespaceController_kubernetes30872\)\.Run\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Kubernetes38669", + `\(\*cacheWatcher_kubernetes38669\)\.process\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes58107", + `\(\*ResourceQuotaController_kubernetes58107\)\.worker\(.* \[sync\.Cond\.Wait\]`, + `\(\*ResourceQuotaController_kubernetes58107\)\.worker\(.* \[sync\.RWMutex\.RLock\]`, + `\(\*ResourceQuotaController_kubernetes58107\)\.Sync\(.* \[sync\.RWMutex\.Lock\]`, + ), + makeFlakyTest("Kubernetes62464", + `\(\*manager_kubernetes62464\)\.reconcileState\(.* \[sync\.RWMutex\.RLock\]`, + `\(\*staticPolicy_kubernetes62464\)\.RemoveContainer\(.* \[sync\.(RW)?Mutex\.Lock\]`, + ), + makeFlakyTest("Kubernetes70277", + `Kubernetes70277\.func2\(.* \[chan receive\]`, + ), + makeFlakyTest("Moby4951", + `\(\*DeviceSet_moby4951\)\.DeleteDevice\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Moby7559", + `\(\*UDPProxy_moby7559\)\.Run\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Moby17176", + `testDevmapperLockReleasedDeviceDeletion_moby17176\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Moby21233", + `\(\*Transfer_moby21233\)\.Watch\.func1\(.* \[chan send\]`, + `\(\*Transfer_moby21233\)\.Watch\.func1\(.* \[select\]`, + `testTransfer_moby21233\(.* \[chan receive\]`, + ), + makeTest("Moby25348", + `\(\*Manager_moby25348\)\.init\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeFlakyTest("Moby27782", + `\(\*JSONFileLogger_moby27782\)\.readLogs\(.* \[sync\.Cond\.Wait\]`, + `\(\*Watcher_moby27782\)\.readEvents\(.* \[select\]`, + ), + makeFlakyTest("Moby28462", + `monitor_moby28462\(.* \[sync\.Mutex\.Lock\]`, + `\(\*Daemon_moby28462\)\.StateChanged\(.* \[chan send\]`, + ), + makeTest("Moby30408", + `Moby30408\.func2\(.* \[chan receive\]`, + `testActive_moby30408\.func1\(.* \[sync\.Cond\.Wait\]`, + ), + makeFlakyTest("Moby33781", + `monitor_moby33781\.func1\(.* \[chan send\]`, + ), + makeFlakyTest("Moby36114", + `\(\*serviceVM_moby36114\)\.hotAddVHDsAtStart\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Serving2137", + `\(\*Breaker_serving2137\)\.concurrentRequest\.func1\(.* \[chan send\]`, + `\(\*Breaker_serving2137\)\.concurrentRequest\.func1\(.* \[sync\.Mutex\.Lock\]`, + `Serving2137\.func2\(.* \[chan receive\]`, + ), + makeTest("Syncthing4829", + `Syncthing4829\.func2\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeTest("Syncthing5795", + `\(\*rawConnection_syncthing5795\)\.dispatcherLoop\(.* \[chan receive\]`, + `Syncthing5795\.func2.* \[chan receive\]`, + ), + } + + // Combine all test cases into a single list. + testCases := append(microTests, stressTestCases...) + testCases = append(testCases, patternTestCases...) + + // Test cases must not panic or cause fatal exceptions. + failStates := regexp.MustCompile(`fatal|panic`) + + testApp := func(exepath string, testCases []testCase) { + + // Build the test program once. + exe, err := buildTestProg(t, exepath) + if err != nil { + t.Fatal(fmt.Sprintf("building testgoroutineleakprofile failed: %v", err)) + } + + for _, tcase := range testCases { + t.Run(tcase.name, func(t *testing.T) { + t.Parallel() + + cmdEnv := []string{ + "GODEBUG=asyncpreemptoff=1", + "GOEXPERIMENT=greenteagc,goroutineleakprofile", + } + + if tcase.simple { + // If the test is simple, set GOMAXPROCS=1 in order to better + // control the behavior of the scheduler. + cmdEnv = append(cmdEnv, "GOMAXPROCS=1") + } + + var output string + for i := 0; i < tcase.repetitions; i++ { + // Run program for one repetition and get runOutput trace. + runOutput := runBuiltTestProg(t, exe, tcase.name, cmdEnv...) + if len(runOutput) == 0 { + t.Errorf("Test %s produced no output. Is the goroutine leak profile collected?", tcase.name) + } + + // Zero tolerance policy for fatal exceptions or panics. + if failStates.MatchString(runOutput) { + t.Errorf("unexpected fatal exception or panic!\noutput:\n%s\n\n", runOutput) + } + + output += runOutput + "\n\n" + } + + // Extract all the goroutine leaks + foundLeaks := extractLeaks(output) + + // If the test case was not expected to produce leaks, but some were reported, + // stop the test immediately. Zero tolerance policy for false positives. + if len(tcase.expectedLeaks)+len(tcase.flakyLeaks) == 0 && len(foundLeaks) > 0 { + t.Errorf("output:\n%s\n\ngoroutines leaks detected in case with no leaks", output) + } + + unexpectedLeaks := make([]string, 0, len(foundLeaks)) + + // Parse every leak and check if it is expected (maybe as a flaky leak). + LEAKS: + for _, leak := range foundLeaks { + // Check if the leak is expected. + // If it is, check whether it has been encountered before. + var foundNew bool + var leakPattern *regexp.Regexp + + for expectedLeak, ok := range tcase.expectedLeaks { + if expectedLeak.MatchString(leak) { + if !ok { + foundNew = true + } + + leakPattern = expectedLeak + break + } + } + + if foundNew { + // Only bother writing if we found a new leak. + tcase.expectedLeaks[leakPattern] = true + } + + if leakPattern == nil { + // We are dealing with a leak not marked as expected. + // Check if it is a flaky leak. + for flakyLeak := range tcase.flakyLeaks { + if flakyLeak.MatchString(leak) { + // The leak is flaky. Carry on to the next line. + continue LEAKS + } + } + + unexpectedLeaks = append(unexpectedLeaks, leak) + } + } + + missingLeakStrs := make([]string, 0, len(tcase.expectedLeaks)) + for expectedLeak, found := range tcase.expectedLeaks { + if !found { + missingLeakStrs = append(missingLeakStrs, expectedLeak.String()) + } + } + + var errors []error + if len(unexpectedLeaks) > 0 { + errors = append(errors, fmt.Errorf("unexpected goroutine leaks:\n%s\n", strings.Join(unexpectedLeaks, "\n"))) + } + if len(missingLeakStrs) > 0 { + errors = append(errors, fmt.Errorf("missing expected leaks:\n%s\n", strings.Join(missingLeakStrs, ", "))) + } + if len(errors) > 0 { + t.Fatalf("Failed with the following errors:\n%s\n\noutput:\n%s", errors, output) + } + }) + } + } + + testApp("testgoroutineleakprofile", testCases) + testApp("testgoroutineleakprofile/goker", gokerTestCases) +} diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 508de9a115723c..37a92c64bc6ecc 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -1267,6 +1267,28 @@ func markBitsForSpan(base uintptr) (mbits markBits) { return mbits } +// isMarkedOrNotInHeap returns true if a pointer is in the heap and marked, +// or if the pointer is not in the heap. Used by goroutine leak detection +// to determine if concurrency resources are reachable in memory. +func isMarkedOrNotInHeap(p unsafe.Pointer) bool { + obj, span, objIndex := findObject(uintptr(p), 0, 0) + if obj != 0 { + mbits := span.markBitsForIndex(objIndex) + return mbits.isMarked() + } + + // If we fall through to get here, the object is not in the heap. + // In this case, it is either a pointer to a stack object or a global resource. + // Treat it as reachable in memory by default, to be safe. + // + // TODO(vsaioc): we could be more precise by checking against the stacks + // of runnable goroutines. I don't think this is necessary, based on what we've seen, but + // let's keep the option open in case the runtime evolves. + // This will (naively) lead to quadratic blow-up for goroutine leak detection, + // but if it is only run on demand, maybe the extra cost is not a show-stopper. + return true +} + // advance advances the markBits to the next object in the span. func (m *markBits) advance() { if m.mask == 1<<7 { diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 68cbfda5000573..b13ec845fc401f 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -376,8 +376,8 @@ type workType struct { // (and thus 8-byte alignment even on 32-bit architectures). bytesMarked uint64 - markrootNext uint32 // next markroot job - markrootJobs uint32 // number of markroot jobs + markrootNext atomic.Uint32 // next markroot job + markrootJobs atomic.Uint32 // number of markroot jobs nproc uint32 tstart int64 @@ -385,17 +385,44 @@ type workType struct { // Number of roots of various root types. Set by gcPrepareMarkRoots. // - // nStackRoots == len(stackRoots), but we have nStackRoots for - // consistency. - nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int + // During normal GC cycle, nStackRoots == nMaybeRunnableStackRoots == len(stackRoots); + // during goroutine leak detection, nMaybeRunnableStackRoots is the number of stackRoots + // scheduled for marking. + // In both variants, nStackRoots == len(stackRoots). + nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nMaybeRunnableStackRoots int + + // The following fields monitor the GC phase of the current cycle during + // goroutine leak detection. + goroutineLeak struct { + // Once set, it indicates that the GC will perform goroutine leak detection during + // the next GC cycle; it is set by goroutineLeakGC and unset during gcStart. + pending atomic.Bool + // Once set, it indicates that the GC has started a goroutine leak detection run; + // it is set during gcStart and unset during gcMarkTermination; + // + // Protected by STW. + enabled bool + // Once set, it indicates that the GC has performed goroutine leak detection during + // the current GC cycle; it is set during gcMarkDone, right after goroutine leak detection, + // and unset during gcMarkTermination; + // + // Protected by STW. + done bool + // The number of leaked goroutines during the last leak detection GC cycle. + // + // Write-protected by STW in findGoroutineLeaks. + count int + } // Base indexes of each root type. Set by gcPrepareMarkRoots. baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32 - // stackRoots is a snapshot of all of the Gs that existed - // before the beginning of concurrent marking. The backing - // store of this must not be modified because it might be - // shared with allgs. + // stackRoots is a snapshot of all of the Gs that existed before the + // beginning of concurrent marking. During goroutine leak detection, stackRoots + // is partitioned into two sets; to the left of nMaybeRunnableStackRoots are stackRoots + // of running / runnable goroutines and to the right of nMaybeRunnableStackRoots are + // stackRoots of unmarked / not runnable goroutines + // The stackRoots array is re-partitioned after each marking phase iteration. stackRoots []*g // Each type of GC state transition is protected by a lock. @@ -562,6 +589,55 @@ func GC() { releasem(mp) } +// goroutineLeakGC runs a GC cycle that performs goroutine leak detection. +// +//go:linkname goroutineLeakGC runtime/pprof.runtime_goroutineLeakGC +func goroutineLeakGC() { + // Set the pending flag to true, instructing the next GC cycle to + // perform goroutine leak detection. + work.goroutineLeak.pending.Store(true) + + // Spin GC cycles until the pending flag is unset. + // This ensures that goroutineLeakGC waits for a GC cycle that + // actually performs goroutine leak detection. + // + // This is needed in case multiple concurrent calls to GC + // are simultaneously fired by the system, wherein some + // of them are dropped. + // + // In the vast majority of cases, only one loop iteration is needed; + // however, multiple concurrent calls to goroutineLeakGC could lead to + // the execution of additional GC cycles. + // + // Examples: + // + // pending? | G1 | G2 + // ---------|-------------------------|----------------------- + // - | goroutineLeakGC() | goroutineLeakGC() + // - | pending.Store(true) | . + // X | for pending.Load() | . + // X | GC() | . + // X | > gcStart() | . + // X | pending.Store(false) | . + // ... + // - | > gcMarkDone() | . + // - | . | pending.Store(true) + // ... + // X | > gcMarkTermination() | . + // X | ... + // X | < GC returns | . + // X | for pending.Load | . + // X | GC() | . + // X | . | for pending.Load() + // X | . | GC() + // ... + // The first to pick up the pending flag will start a + // leak detection cycle. + for work.goroutineLeak.pending.Load() { + GC() + } +} + // gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has // already completed this mark phase, it returns immediately. func gcWaitOnMark(n uint32) { @@ -785,6 +861,15 @@ func gcStart(trigger gcTrigger) { schedEnableUser(false) } + // If goroutine leak detection is pending, enable it for this GC cycle. + if work.goroutineLeak.pending.Load() { + work.goroutineLeak.enabled = true + work.goroutineLeak.pending.Store(false) + // Set all sync objects of blocked goroutines as untraceable + // by the GC. Only set as traceable at the end of the GC cycle. + setSyncObjectsUntraceable() + } + // Enter concurrent mark phase and enable // write barriers. // @@ -995,8 +1080,20 @@ top: } } }) - if restart { - gcDebugMarkDone.restartedDueTo27993 = true + + // Check whether we need to resume the marking phase because of issue #27993 + // or because of goroutine leak detection. + if restart || (work.goroutineLeak.enabled && !work.goroutineLeak.done) { + if restart { + // Restart because of issue #27993. + gcDebugMarkDone.restartedDueTo27993 = true + } else { + // Marking has reached a fixed-point. Attempt to detect goroutine leaks. + // + // If the returned value is true, then detection already concluded for this cycle. + // Otherwise, more runnable goroutines were discovered, requiring additional mark work. + work.goroutineLeak.done = findGoroutineLeaks() + } getg().m.preemptoff = "" systemstack(func() { @@ -1047,6 +1144,172 @@ top: gcMarkTermination(stw) } +// isMaybeRunnable checks whether a goroutine may still be semantically runnable. +// For goroutines which are semantically runnable, this will eventually return true +// as the GC marking phase progresses. It returns false for leaked goroutines, or for +// goroutines which are not yet computed as possibly runnable by the GC. +func (gp *g) isMaybeRunnable() bool { + // Check whether the goroutine is actually in a waiting state first. + if readgstatus(gp) != _Gwaiting { + // If the goroutine is not waiting, then clearly it is maybe runnable. + return true + } + + switch gp.waitreason { + case waitReasonSelectNoCases, + waitReasonChanSendNilChan, + waitReasonChanReceiveNilChan: + // Select with no cases or communicating on nil channels + // make goroutines unrunnable by definition. + return false + case waitReasonChanReceive, + waitReasonSelect, + waitReasonChanSend: + // Cycle all through all *sudog to check whether + // the goroutine is waiting on a marked channel. + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + if isMarkedOrNotInHeap(unsafe.Pointer(sg.c.get())) { + return true + } + } + return false + case waitReasonSyncCondWait, + waitReasonSyncWaitGroupWait, + waitReasonSyncMutexLock, + waitReasonSyncRWMutexLock, + waitReasonSyncRWMutexRLock: + // If waiting on mutexes, wait groups, or condition variables, + // check if the synchronization primitive attached to the sudog is marked. + if gp.waiting != nil { + return isMarkedOrNotInHeap(gp.waiting.elem.get()) + } + } + return true +} + +// findMaybeRunnableGoroutines checks to see if more blocked but maybe-runnable goroutines exist. +// If so, it adds them into root set and increments work.markrootJobs accordingly. +// Returns true if we need to run another phase of markroots; returns false otherwise. +func findMaybeRunnableGoroutines() (moreWork bool) { + oldRootJobs := work.markrootJobs.Load() + + // To begin with we have a set of unchecked stackRoots between + // vIndex and ivIndex. During the loop, anything < vIndex should be + // valid stackRoots and anything >= ivIndex should be invalid stackRoots. + // The loop terminates when the two indices meet. + var vIndex, ivIndex int = work.nMaybeRunnableStackRoots, work.nStackRoots + // Reorder goroutine list + for vIndex < ivIndex { + if work.stackRoots[vIndex].isMaybeRunnable() { + vIndex = vIndex + 1 + continue + } + for ivIndex = ivIndex - 1; ivIndex != vIndex; ivIndex = ivIndex - 1 { + if gp := work.stackRoots[ivIndex]; gp.isMaybeRunnable() { + work.stackRoots[ivIndex] = work.stackRoots[vIndex] + work.stackRoots[vIndex] = gp + vIndex = vIndex + 1 + break + } + } + } + + newRootJobs := work.baseStacks + uint32(vIndex) + if newRootJobs > oldRootJobs { + work.nMaybeRunnableStackRoots = vIndex + work.markrootJobs.Store(newRootJobs) + } + return newRootJobs > oldRootJobs +} + +// setSyncObjectsUntraceable scans allgs and sets the elem and c fields of all sudogs to +// an untrackable pointer. This prevents the GC from marking these objects as live in memory +// by following these pointers when runnning deadlock detection. +func setSyncObjectsUntraceable() { + assertWorldStopped() + + forEachGRace(func(gp *g) { + // Set as untraceable all synchronization objects of goroutines + // blocked at concurrency operations that could leak. + switch { + case gp.waitreason.isSyncWait(): + // Synchronization primitives are reachable from the *sudog via + // via the elem field. + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + sg.elem.setUntraceable() + } + case gp.waitreason.isChanWait(): + // Channels and select statements are reachable from the *sudog via the c field. + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + sg.c.setUntraceable() + } + } + }) +} + +// gcRestoreSyncObjects restores the elem and c fields of all sudogs to their original values. +// Should be invoked after the goroutine leak detection phase. +func gcRestoreSyncObjects() { + assertWorldStopped() + + forEachGRace(func(gp *g) { + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + sg.elem.setTraceable() + sg.c.setTraceable() + } + }) +} + +// findGoroutineLeaks scans the remaining stackRoots and marks any which are +// blocked over exclusively unreachable concurrency primitives as leaked (deadlocked). +// Returns true if the goroutine leak check was performed (or unnecessary). +// Returns false if the GC cycle has not yet computed all maybe-runnable goroutines. +func findGoroutineLeaks() bool { + assertWorldStopped() + + // Report goroutine leaks and mark them unreachable, and resume marking + // we still need to mark these unreachable *g structs as they + // get reused, but their stack won't get scanned + if work.nMaybeRunnableStackRoots == work.nStackRoots { + // nMaybeRunnableStackRoots == nStackRoots means that all goroutines are marked. + return true + } + + // Check whether any more maybe-runnable goroutines can be found by the GC. + if findMaybeRunnableGoroutines() { + // We found more work, so we need to resume the marking phase. + return false + } + + // For the remaining goroutines, mark them as unreachable and leaked. + work.goroutineLeak.count = work.nStackRoots - work.nMaybeRunnableStackRoots + + for i := work.nMaybeRunnableStackRoots; i < work.nStackRoots; i++ { + gp := work.stackRoots[i] + casgstatus(gp, _Gwaiting, _Gleaked) + + // Add the primitives causing the goroutine leaks + // to the GC work queue, to ensure they are marked. + // + // NOTE(vsaioc): these primitives should also be reachable + // from the goroutine's stack, but let's play it safe. + switch { + case gp.waitreason.isChanWait(): + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + shade(sg.c.uintptr()) + } + case gp.waitreason.isSyncWait(): + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + shade(sg.elem.uintptr()) + } + } + } + // Put the remaining roots as ready for marking and drain them. + work.markrootJobs.Add(int32(work.nStackRoots - work.nMaybeRunnableStackRoots)) + work.nMaybeRunnableStackRoots = work.nStackRoots + return true +} + // World must be stopped and mark assists and background workers must be // disabled. func gcMarkTermination(stw worldStop) { @@ -1199,7 +1462,18 @@ func gcMarkTermination(stw worldStop) { throw("non-concurrent sweep failed to drain all sweep queues") } + if work.goroutineLeak.enabled { + // Restore the elem and c fields of all sudogs to their original values. + gcRestoreSyncObjects() + } + + var goroutineLeakDone bool systemstack(func() { + // Pull the GC out of goroutine leak detection mode. + work.goroutineLeak.enabled = false + goroutineLeakDone = work.goroutineLeak.done + work.goroutineLeak.done = false + // The memstats updated above must be updated with the world // stopped to ensure consistency of some values, such as // sched.idleTime and sched.totaltime. memstats also include @@ -1273,7 +1547,11 @@ func gcMarkTermination(stw worldStop) { printlock() print("gc ", memstats.numgc, " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ", - util, "%: ") + util, "%") + if goroutineLeakDone { + print(" (checking for goroutine leaks)") + } + print(": ") prev := work.tSweepTerm for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} { if i != 0 { @@ -1647,8 +1925,8 @@ func gcMark(startTime int64) { work.tstart = startTime // Check that there's no marking work remaining. - if work.full != 0 || work.markrootNext < work.markrootJobs { - print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n") + if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); work.full != 0 || next < jobs { + print("runtime: full=", hex(work.full), " next=", next, " jobs=", jobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n") panic("non-empty mark queue after concurrent mark") } diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index bb5fcabab37db8..ba3824f00dc9cb 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -53,6 +53,55 @@ const ( pagesPerSpanRoot = min(512, pagesPerArena) ) +// internalBlocked returns true if the goroutine is blocked due to an +// internal (non-leaking) waitReason, e.g. waiting for the netpoller or garbage collector. +// Such goroutines are never leak detection candidates according to the GC. +// +//go:nosplit +func (gp *g) internalBlocked() bool { + reason := gp.waitreason + return reason < waitReasonChanReceiveNilChan || waitReasonSyncWaitGroupWait < reason +} + +// allGsSnapshotSortedForGC takes a snapshot of allgs and returns a sorted +// array of Gs. The array is sorted by the G's status, with running Gs +// first, followed by blocked Gs. The returned index indicates the cutoff +// between runnable and blocked Gs. +// +// The world must be stopped or allglock must be held. +func allGsSnapshotSortedForGC() ([]*g, int) { + assertWorldStoppedOrLockHeld(&allglock) + + // Reset the status of leaked goroutines in order to improve + // the precision of goroutine leak detection. + for _, gp := range allgs { + gp.atomicstatus.CompareAndSwap(_Gleaked, _Gwaiting) + } + + allgsSorted := make([]*g, len(allgs)) + + // Indices cutting off runnable and blocked Gs. + var currIndex, blockedIndex = 0, len(allgsSorted) - 1 + for _, gp := range allgs { + // not sure if we need atomic load because we are stopping the world, + // but do it just to be safe for now + if status := readgstatus(gp); status != _Gwaiting || gp.internalBlocked() { + allgsSorted[currIndex] = gp + currIndex++ + } else { + allgsSorted[blockedIndex] = gp + blockedIndex-- + } + } + + // Because the world is stopped or allglock is held, allgadd + // cannot happen concurrently with this. allgs grows + // monotonically and existing entries never change, so we can + // simply return a copy of the slice header. For added safety, + // we trim everything past len because that can still change. + return allgsSorted, blockedIndex + 1 +} + // gcPrepareMarkRoots queues root scanning jobs (stacks, globals, and // some miscellany) and initializes scanning-related state. // @@ -102,11 +151,20 @@ func gcPrepareMarkRoots() { // ignore them because they begin life without any roots, so // there's nothing to scan, and any roots they create during // the concurrent phase will be caught by the write barrier. - work.stackRoots = allGsSnapshot() + if work.goroutineLeak.enabled { + // goroutine leak finder GC --- only prepare runnable + // goroutines for marking. + work.stackRoots, work.nMaybeRunnableStackRoots = allGsSnapshotSortedForGC() + } else { + // regular GC --- scan every goroutine + work.stackRoots = allGsSnapshot() + work.nMaybeRunnableStackRoots = len(work.stackRoots) + } + work.nStackRoots = len(work.stackRoots) - work.markrootNext = 0 - work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) + work.markrootNext.Store(0) + work.markrootJobs.Store(uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nMaybeRunnableStackRoots)) // Calculate base indexes of each root type work.baseData = uint32(fixedRootCount) @@ -119,8 +177,8 @@ func gcPrepareMarkRoots() { // gcMarkRootCheck checks that all roots have been scanned. It is // purely for debugging. func gcMarkRootCheck() { - if work.markrootNext < work.markrootJobs { - print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") + if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs { + print(next, " of ", jobs, " markroot jobs done\n") throw("left over markroot jobs") } @@ -858,7 +916,7 @@ func scanstack(gp *g, gcw *gcWork) int64 { case _Grunning: print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") throw("scanstack: goroutine not stopped") - case _Grunnable, _Gsyscall, _Gwaiting: + case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked: // ok } @@ -1126,6 +1184,28 @@ func gcDrainMarkWorkerFractional(gcw *gcWork) { gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit) } +// gcNextMarkRoot safely increments work.markrootNext and returns the +// index of the next root job. The returned boolean is true if the root job +// is valid, and false if there are no more root jobs to be claimed, +// i.e. work.markrootNext >= work.markrootJobs. +func gcNextMarkRoot() (uint32, bool) { + if !work.goroutineLeak.enabled { + // If not running goroutine leak detection, assume regular GC behavior. + job := work.markrootNext.Add(1) - 1 + return job, job < work.markrootJobs.Load() + } + + // Otherwise, use a CAS loop to increment markrootNext. + for next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs; next = work.markrootNext.Load() { + // There is still work available at the moment. + if work.markrootNext.CompareAndSwap(next, next+1) { + // We manage to snatch a root job. Return the root index. + return next, true + } + } + return 0, false +} + // gcDrain scans roots and objects in work buffers, blackening grey // objects until it is unable to get more work. It may return before // GC is done; it's the caller's responsibility to balance work from @@ -1184,13 +1264,12 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { } } - // Drain root marking jobs. - if work.markrootNext < work.markrootJobs { + if work.markrootNext.Load() < work.markrootJobs.Load() { // Stop if we're preemptible, if someone wants to STW, or if // someone is calling forEachP. for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) { - job := atomic.Xadd(&work.markrootNext, +1) - 1 - if job >= work.markrootJobs { + job, ok := gcNextMarkRoot() + if !ok { break } markroot(gcw, job, flushBgCredit) @@ -1342,9 +1421,9 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 { if b = gcw.tryGetObj(); b == 0 { if s = gcw.tryGetSpan(); s == 0 { // Try to do a root job. - if work.markrootNext < work.markrootJobs { - job := atomic.Xadd(&work.markrootNext, +1) - 1 - if job < work.markrootJobs { + if work.markrootNext.Load() < work.markrootJobs.Load() { + job, ok := gcNextMarkRoot() + if ok { workFlushed += markroot(gcw, job, false) continue } diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index 53fcd3d966ec3b..3975e1e76b501c 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -1143,7 +1143,7 @@ func gcMarkWorkAvailable() bool { if !work.full.empty() { return true // global work available } - if work.markrootNext < work.markrootJobs { + if work.markrootNext.Load() < work.markrootJobs.Load() { return true // root scan work available } if work.spanqMask.any() { diff --git a/src/runtime/mgcmark_nogreenteagc.go b/src/runtime/mgcmark_nogreenteagc.go index e4505032917210..9838887f7be008 100644 --- a/src/runtime/mgcmark_nogreenteagc.go +++ b/src/runtime/mgcmark_nogreenteagc.go @@ -124,7 +124,7 @@ func gcMarkWorkAvailable() bool { if !work.full.empty() { return true // global work available } - if work.markrootNext < work.markrootJobs { + if work.markrootNext.Load() < work.markrootJobs.Load() { return true // root scan work available } return false diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 97b29076523d5d..0957e67b50fa2c 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1259,6 +1259,20 @@ func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.P return goroutineProfileWithLabelsConcurrent(p, labels) } +//go:linkname pprof_goroutineLeakProfileWithLabels +func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + return goroutineLeakProfileWithLabelsConcurrent(p, labels) +} + +// labels may be nil. If labels is non-nil, it must have the same length as p. +func goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + if labels != nil && len(labels) != len(p) { + labels = nil + } + + return goroutineLeakProfileWithLabelsConcurrent(p, labels) +} + var goroutineProfile = struct { sema uint32 active bool @@ -1302,6 +1316,48 @@ func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileSt return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new)) } +func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + if len(p) == 0 { + // An empty slice is obviously too small. Return a rough + // allocation estimate. + return work.goroutineLeak.count, false + } + + // Use the same semaphore as goroutineProfileWithLabelsConcurrent, + // because ultimately we still use goroutine profiles. + semacquire(&goroutineProfile.sema) + + // Unlike in goroutineProfileWithLabelsConcurrent, we don't need to + // save the current goroutine stack, because it is obviously not leaked. + + pcbuf := makeProfStack() // see saveg() for explanation + + // Prepare a profile large enough to store all leaked goroutines. + n = work.goroutineLeak.count + + if n > len(p) { + // There's not enough space in p to store the whole profile, so (per the + // contract of runtime.GoroutineProfile) we're not allowed to write to p + // at all and must return n, false. + semrelease(&goroutineProfile.sema) + return n, false + } + + // Visit each leaked goroutine and try to record its stack. + forEachGRace(func(gp1 *g) { + if readgstatus(gp1) == _Gleaked { + doRecordGoroutineProfile(gp1, pcbuf) + } + }) + + if raceenabled { + raceacquire(unsafe.Pointer(&labelSync)) + } + + semrelease(&goroutineProfile.sema) + return n, true +} + func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { if len(p) == 0 { // An empty slice is obviously too small. Return a rough diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go index 55563009b3a2c1..b524e992b8b209 100644 --- a/src/runtime/pprof/pprof.go +++ b/src/runtime/pprof/pprof.go @@ -80,6 +80,7 @@ import ( "cmp" "fmt" "internal/abi" + "internal/goexperiment" "internal/profilerecord" "io" "runtime" @@ -105,12 +106,13 @@ import ( // // Each Profile has a unique name. A few profiles are predefined: // -// goroutine - stack traces of all current goroutines -// heap - a sampling of memory allocations of live objects -// allocs - a sampling of all past memory allocations -// threadcreate - stack traces that led to the creation of new OS threads -// block - stack traces that led to blocking on synchronization primitives -// mutex - stack traces of holders of contended mutexes +// goroutine - stack traces of all current goroutines +// goroutineleak - stack traces of all leaked goroutines +// allocs - a sampling of all past memory allocations +// heap - a sampling of memory allocations of live objects +// threadcreate - stack traces that led to the creation of new OS threads +// block - stack traces that led to blocking on synchronization primitives +// mutex - stack traces of holders of contended mutexes // // These predefined profiles maintain themselves and panic on an explicit // [Profile.Add] or [Profile.Remove] method call. @@ -169,6 +171,7 @@ import ( // holds a lock for 1s while 5 other goroutines are waiting for the entire // second to acquire the lock, its unlock call stack will report 5s of // contention. + type Profile struct { name string mu sync.Mutex @@ -189,6 +192,12 @@ var goroutineProfile = &Profile{ write: writeGoroutine, } +var goroutineLeakProfile = &Profile{ + name: "goroutineleak", + count: runtime_goroutineleakcount, + write: writeGoroutineLeak, +} + var threadcreateProfile = &Profile{ name: "threadcreate", count: countThreadCreate, @@ -231,6 +240,9 @@ func lockProfiles() { "block": blockProfile, "mutex": mutexProfile, } + if goexperiment.GoroutineLeakProfile { + profiles.m["goroutineleak"] = goroutineLeakProfile + } } } @@ -275,6 +287,7 @@ func Profiles() []*Profile { all := make([]*Profile, 0, len(profiles.m)) for _, p := range profiles.m { + all = append(all, p) } @@ -747,6 +760,23 @@ func writeGoroutine(w io.Writer, debug int) error { return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels) } +// writeGoroutineLeak first invokes a GC cycle that performs goroutine leak detection. +// It then writes the goroutine profile, filtering for leaked goroutines. +func writeGoroutineLeak(w io.Writer, debug int) error { + // Run the GC with leak detection first so that leaked goroutines + // may transition to the leaked state. + runtime_goroutineLeakGC() + + // If the debug flag is set sufficiently high, just defer to writing goroutine stacks + // like in a regular goroutine profile. Include non-leaked goroutines, too. + if debug >= 2 { + return writeGoroutineStacks(w) + } + + // Otherwise, write the goroutine leak profile. + return writeRuntimeProfile(w, debug, "goroutineleak", pprof_goroutineLeakProfileWithLabels) +} + func writeGoroutineStacks(w io.Writer) error { // We don't know how big the buffer needs to be to collect // all the goroutines. Start with 1 MB and try a few times, doubling each time. @@ -969,6 +999,9 @@ func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile fu //go:linkname pprof_goroutineProfileWithLabels runtime.pprof_goroutineProfileWithLabels func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) +//go:linkname pprof_goroutineLeakProfileWithLabels runtime.pprof_goroutineLeakProfileWithLabels +func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) + //go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond func pprof_cyclesPerSecond() int64 diff --git a/src/runtime/pprof/runtime.go b/src/runtime/pprof/runtime.go index 8d37c7d3add146..690cab81ab5a5a 100644 --- a/src/runtime/pprof/runtime.go +++ b/src/runtime/pprof/runtime.go @@ -29,6 +29,12 @@ func runtime_setProfLabel(labels unsafe.Pointer) // runtime_getProfLabel is defined in runtime/proflabel.go. func runtime_getProfLabel() unsafe.Pointer +// runtime_goroutineleakcount is defined in runtime/proc.go. +func runtime_goroutineleakcount() int + +// runtime_goroutineLeakGC is defined in runtime/mgc.go. +func runtime_goroutineLeakGC() + // SetGoroutineLabels sets the current goroutine's labels to match ctx. // A new goroutine inherits the labels of the goroutine that created it. // This is a lower-level API than [Do], which should be used instead when possible. diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index 22727df74eead2..5367f66213804b 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -160,7 +160,7 @@ func suspendG(gp *g) suspendGState { s = _Gwaiting fallthrough - case _Grunnable, _Gsyscall, _Gwaiting: + case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked: // Claim goroutine by setting scan bit. // This may race with execution or readying of gp. // The scan bit keeps it from transition state. @@ -269,6 +269,7 @@ func resumeG(state suspendGState) { case _Grunnable | _Gscan, _Gwaiting | _Gscan, + _Gleaked | _Gscan, _Gsyscall | _Gscan: casfrom_Gscanstatus(gp, s, s&^_Gscan) } diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 4b1ab1af6a5075..e5686705293e8a 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -517,7 +517,7 @@ func acquireSudog() *sudog { s := pp.sudogcache[n-1] pp.sudogcache[n-1] = nil pp.sudogcache = pp.sudogcache[:n-1] - if s.elem != nil { + if s.elem.get() != nil { throw("acquireSudog: found s.elem != nil in cache") } releasem(mp) @@ -526,7 +526,7 @@ func acquireSudog() *sudog { //go:nosplit func releaseSudog(s *sudog) { - if s.elem != nil { + if s.elem.get() != nil { throw("runtime: sudog with non-nil elem") } if s.isSelect { @@ -541,7 +541,7 @@ func releaseSudog(s *sudog) { if s.waitlink != nil { throw("runtime: sudog with non-nil waitlink") } - if s.c != nil { + if s.c.get() != nil { throw("runtime: sudog with non-nil c") } gp := getg() @@ -1222,6 +1222,7 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { _Gscanwaiting, _Gscanrunning, _Gscansyscall, + _Gscanleaked, _Gscanpreempted: if newval == oldval&^_Gscan { success = gp.atomicstatus.CompareAndSwap(oldval, newval) @@ -1242,6 +1243,7 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool { case _Grunnable, _Grunning, _Gwaiting, + _Gleaked, _Gsyscall: if newval == oldval|_Gscan { r := gp.atomicstatus.CompareAndSwap(oldval, newval) @@ -5565,6 +5567,14 @@ func gcount(includeSys bool) int32 { return n } +// goroutineleakcount returns the number of leaked goroutines last reported by +// the runtime. +// +//go:linkname goroutineleakcount runtime/pprof.runtime_goroutineleakcount +func goroutineleakcount() int { + return work.goroutineLeak.count +} + func mcount() int32 { return int32(sched.mnext - sched.nmfreed) } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 042c3137cd03df..6016c6fde054fb 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -87,6 +87,9 @@ const ( // ready()ing this G. _Gpreempted // 9 + // _Gleaked represents a leaked goroutine caught by the GC. + _Gleaked // 10 + // _Gscan combined with one of the above states other than // _Grunning indicates that GC is scanning the stack. The // goroutine is not executing user code and the stack is owned @@ -104,6 +107,7 @@ const ( _Gscansyscall = _Gscan + _Gsyscall // 0x1003 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 + _Gscanleaked = _Gscan + _Gleaked // 0x100a ) const ( @@ -315,6 +319,78 @@ type gobuf struct { bp uintptr // for framepointer-enabled architectures } +// maybeTraceablePtr is a special pointer that is conditionally trackable +// by the GC. It consists of an address as a uintptr (vu) and a pointer +// to a data element (vp). +// +// maybeTraceablePtr values can be in one of three states: +// 1. Unset: vu == 0 && vp == nil +// 2. Untracked: vu != 0 && vp == nil +// 3. Tracked: vu != 0 && vp != nil +// +// Do not set fields manually. Use methods instead. +// Extend this type with additional methods if needed. +type maybeTraceablePtr struct { + vp unsafe.Pointer // For liveness only. + vu uintptr // Source of truth. +} + +// untrack unsets the pointer but preserves the address. +// This is used to hide the pointer from the GC. +// +//go:nosplit +func (p *maybeTraceablePtr) setUntraceable() { + p.vp = nil +} + +// setTraceable resets the pointer to the stored address. +// This is used to make the pointer visible to the GC. +// +//go:nosplit +func (p *maybeTraceablePtr) setTraceable() { + p.vp = unsafe.Pointer(p.vu) +} + +// set sets the pointer to the data element and updates the address. +// +//go:nosplit +func (p *maybeTraceablePtr) set(v unsafe.Pointer) { + p.vp = v + p.vu = uintptr(v) +} + +// get retrieves the pointer to the data element. +// +//go:nosplit +func (p *maybeTraceablePtr) get() unsafe.Pointer { + return unsafe.Pointer(p.vu) +} + +// uintptr returns the uintptr address of the pointer. +// +//go:nosplit +func (p *maybeTraceablePtr) uintptr() uintptr { + return p.vu +} + +// maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr) +// to track hchan pointers. +// +// Do not set fields manually. Use methods instead. +type maybeTraceableChan struct { + maybeTraceablePtr +} + +//go:nosplit +func (p *maybeTraceableChan) set(c *hchan) { + p.maybeTraceablePtr.set(unsafe.Pointer(c)) +} + +//go:nosplit +func (p *maybeTraceableChan) get() *hchan { + return (*hchan)(p.maybeTraceablePtr.get()) +} + // sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving // on a channel. // @@ -334,7 +410,8 @@ type sudog struct { next *sudog prev *sudog - elem unsafe.Pointer // data element (may point to stack) + + elem maybeTraceablePtr // data element (may point to stack) // The following fields are never accessed concurrently. // For channels, waitlink is only accessed by g. @@ -362,10 +439,10 @@ type sudog struct { // in the second entry in the list.) waiters uint16 - parent *sudog // semaRoot binary tree - waitlink *sudog // g.waiting list or semaRoot - waittail *sudog // semaRoot - c *hchan // channel + parent *sudog // semaRoot binary tree + waitlink *sudog // g.waiting list or semaRoot + waittail *sudog // semaRoot + c maybeTraceableChan // channel } type libcall struct { @@ -1072,24 +1149,24 @@ const ( waitReasonZero waitReason = iota // "" waitReasonGCAssistMarking // "GC assist marking" waitReasonIOWait // "IO wait" - waitReasonChanReceiveNilChan // "chan receive (nil chan)" - waitReasonChanSendNilChan // "chan send (nil chan)" waitReasonDumpingHeap // "dumping heap" waitReasonGarbageCollection // "garbage collection" waitReasonGarbageCollectionScan // "garbage collection scan" waitReasonPanicWait // "panicwait" - waitReasonSelect // "select" - waitReasonSelectNoCases // "select (no cases)" waitReasonGCAssistWait // "GC assist wait" waitReasonGCSweepWait // "GC sweep wait" waitReasonGCScavengeWait // "GC scavenge wait" - waitReasonChanReceive // "chan receive" - waitReasonChanSend // "chan send" waitReasonFinalizerWait // "finalizer wait" waitReasonForceGCIdle // "force gc (idle)" waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)" waitReasonSemacquire // "semacquire" waitReasonSleep // "sleep" + waitReasonChanReceiveNilChan // "chan receive (nil chan)" + waitReasonChanSendNilChan // "chan send (nil chan)" + waitReasonSelectNoCases // "select (no cases)" + waitReasonSelect // "select" + waitReasonChanReceive // "chan receive" + waitReasonChanSend // "chan send" waitReasonSyncCondWait // "sync.Cond.Wait" waitReasonSyncMutexLock // "sync.Mutex.Lock" waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock" @@ -1175,12 +1252,34 @@ func (w waitReason) String() string { return waitReasonStrings[w] } +// isMutexWait returns true if the goroutine is blocked because of +// sync.Mutex.Lock or sync.RWMutex.[R]Lock. +// +//go:nosplit func (w waitReason) isMutexWait() bool { return w == waitReasonSyncMutexLock || w == waitReasonSyncRWMutexRLock || w == waitReasonSyncRWMutexLock } +// isSyncWait returns true if the goroutine is blocked because of +// sync library primitive operations. +// +//go:nosplit +func (w waitReason) isSyncWait() bool { + return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait +} + +// isChanWait is true if the goroutine is blocked because of non-nil +// channel operations or a select statement with at least one case. +// +//go:nosplit +func (w waitReason) isChanWait() bool { + return w == waitReasonSelect || + w == waitReasonChanReceive || + w == waitReasonChanSend +} + func (w waitReason) isWaitingForSuspendG() bool { return isWaitingForSuspendG[w] } diff --git a/src/runtime/select.go b/src/runtime/select.go index 113dc8ad19e984..553f6960eb1286 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -83,7 +83,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool { // channels in lock order. var lastc *hchan for sg := gp.waiting; sg != nil; sg = sg.waitlink { - if sg.c != lastc && lastc != nil { + if sg.c.get() != lastc && lastc != nil { // As soon as we unlock the channel, fields in // any sudog with that channel may change, // including c and waitlink. Since multiple @@ -92,7 +92,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool { // of a channel. unlock(&lastc.lock) } - lastc = sg.c + lastc = sg.c.get() } if lastc != nil { unlock(&lastc.lock) @@ -320,12 +320,12 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo sg.isSelect = true // No stack splits between assigning elem and enqueuing // sg on gp.waiting where copystack can find it. - sg.elem = cas.elem + sg.elem.set(cas.elem) sg.releasetime = 0 if t0 != 0 { sg.releasetime = -1 } - sg.c = c + sg.c.set(c) // Construct waiting list in lock order. *nextp = sg nextp = &sg.waitlink @@ -368,8 +368,8 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo // Clear all elem before unlinking from gp.waiting. for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { sg1.isSelect = false - sg1.elem = nil - sg1.c = nil + sg1.elem.set(nil) + sg1.c.set(nil) } gp.waiting = nil diff --git a/src/runtime/sema.go b/src/runtime/sema.go index 6af49b1b0c42d9..0fe0f2a2c20739 100644 --- a/src/runtime/sema.go +++ b/src/runtime/sema.go @@ -303,7 +303,10 @@ func cansemacquire(addr *uint32) bool { // queue adds s to the blocked goroutines in semaRoot. func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { s.g = getg() - s.elem = unsafe.Pointer(addr) + s.elem.set(unsafe.Pointer(addr)) + // Storing this pointer so that we can trace the semaphore address + // from the blocked goroutine when checking for goroutine leaks. + s.g.waiting = s s.next = nil s.prev = nil s.waiters = 0 @@ -311,7 +314,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { var last *sudog pt := &root.treap for t := *pt; t != nil; t = *pt { - if t.elem == unsafe.Pointer(addr) { + if uintptr(unsafe.Pointer(addr)) == t.elem.uintptr() { // Already have addr in list. if lifo { // Substitute s in t's place in treap. @@ -357,7 +360,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { return } last = t - if uintptr(unsafe.Pointer(addr)) < uintptr(t.elem) { + if uintptr(unsafe.Pointer(addr)) < t.elem.uintptr() { pt = &t.prev } else { pt = &t.next @@ -402,11 +405,13 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now, tailtime int64) { ps := &root.treap s := *ps + for ; s != nil; s = *ps { - if s.elem == unsafe.Pointer(addr) { + if uintptr(unsafe.Pointer(addr)) == s.elem.uintptr() { goto Found } - if uintptr(unsafe.Pointer(addr)) < uintptr(s.elem) { + + if uintptr(unsafe.Pointer(addr)) < s.elem.uintptr() { ps = &s.prev } else { ps = &s.next @@ -470,8 +475,10 @@ Found: } tailtime = s.acquiretime } + // Goroutine is no longer blocked. Clear the waiting pointer. + s.g.waiting = nil s.parent = nil - s.elem = nil + s.elem.set(nil) s.next = nil s.prev = nil s.ticket = 0 @@ -590,6 +597,10 @@ func notifyListWait(l *notifyList, t uint32) { // Enqueue itself. s := acquireSudog() s.g = getg() + // Storing this pointer so that we can trace the condvar address + // from the blocked goroutine when checking for goroutine leaks. + s.elem.set(unsafe.Pointer(l)) + s.g.waiting = s s.ticket = t s.releasetime = 0 t0 := int64(0) @@ -607,6 +618,10 @@ func notifyListWait(l *notifyList, t uint32) { if t0 != 0 { blockevent(s.releasetime-t0, 2) } + // Goroutine is no longer blocked. Clear up its waiting pointer, + // and clean up the sudog before releasing it. + s.g.waiting = nil + s.elem.set(nil) releaseSudog(s) } diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go index de859866a5adb2..5888177f0ea7a1 100644 --- a/src/runtime/sizeof_test.go +++ b/src/runtime/sizeof_test.go @@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing - {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing + {runtime.Sudog{}, 64, 104}, // sudog, but exported for testing } if xreg > runtime.PtrSize { diff --git a/src/runtime/stack.go b/src/runtime/stack.go index d809820b076dfd..55e97e77afa957 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -821,7 +821,8 @@ func adjustsudogs(gp *g, adjinfo *adjustinfo) { // the data elements pointed to by a SudoG structure // might be in the stack. for s := gp.waiting; s != nil; s = s.waitlink { - adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) + adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vu)) + adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vp)) } } @@ -834,7 +835,7 @@ func fillstack(stk stack, b byte) { func findsghi(gp *g, stk stack) uintptr { var sghi uintptr for sg := gp.waiting; sg != nil; sg = sg.waitlink { - p := uintptr(sg.elem) + uintptr(sg.c.elemsize) + p := sg.elem.uintptr() + uintptr(sg.c.get().elemsize) if stk.lo <= p && p < stk.hi && p > sghi { sghi = p } @@ -853,7 +854,7 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { // Lock channels to prevent concurrent send/receive. var lastc *hchan for sg := gp.waiting; sg != nil; sg = sg.waitlink { - if sg.c != lastc { + if sg.c.get() != lastc { // There is a ranking cycle here between gscan bit and // hchan locks. Normally, we only allow acquiring hchan // locks and then getting a gscan bit. In this case, we @@ -863,9 +864,9 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { // suspended. So, we get a special hchan lock rank here // that is lower than gscan, but doesn't allow acquiring // any other locks other than hchan. - lockWithRank(&sg.c.lock, lockRankHchanLeaf) + lockWithRank(&sg.c.get().lock, lockRankHchanLeaf) } - lastc = sg.c + lastc = sg.c.get() } // Adjust sudogs. @@ -885,10 +886,10 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { // Unlock channels. lastc = nil for sg := gp.waiting; sg != nil; sg = sg.waitlink { - if sg.c != lastc { - unlock(&sg.c.lock) + if sg.c.get() != lastc { + unlock(&sg.c.get().lock) } - lastc = sg.c + lastc = sg.c.get() } return sgsize diff --git a/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go b/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go new file mode 100644 index 00000000000000..353e48ee7034f7 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go @@ -0,0 +1,277 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "fmt" + "os" + "runtime" + "runtime/pprof" + "time" +) + +// Common goroutine leak patterns. Extracted from: +// "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach" +// doi:10.1109/CGO57630.2024.10444835 +// +// Tests in this file are not flaky iff. the test is run with GOMAXPROCS=1. +// The main goroutine forcefully yields via `runtime.Gosched()` before +// running the profiler. This moves them to the back of the run queue, +// allowing the leaky goroutines to be scheduled beforehand and get stuck. + +func init() { + register("NoCloseRange", NoCloseRange) + register("MethodContractViolation", MethodContractViolation) + register("DoubleSend", DoubleSend) + register("EarlyReturn", EarlyReturn) + register("NCastLeak", NCastLeak) + register("Timeout", Timeout) +} + +// Incoming list of items and the number of workers. +func noCloseRange(list []any, workers int) { + ch := make(chan any) + + // Create each worker + for i := 0; i < workers; i++ { + go func() { + + // Each worker waits for an item and processes it. + for item := range ch { + // Process each item + _ = item + } + }() + } + + // Send each item to one of the workers. + for _, item := range list { + // Sending can leak if workers == 0 or if one of the workers panics + ch <- item + } + // The channel is never closed, so workers leak once there are no more + // items left to process. +} + +func NoCloseRange() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + go noCloseRange([]any{1, 2, 3}, 0) + go noCloseRange([]any{1, 2, 3}, 3) +} + +// A worker processes items pushed to `ch` one by one in the background. +// When the worker is no longer needed, it must be closed with `Stop`. +// +// Specifications: +// +// A worker may be started any number of times, but must be stopped only once. +// Stopping a worker multiple times will lead to a close panic. +// Any worker that is started must eventually be stopped. +// Failing to stop a worker results in a goroutine leak +type worker struct { + ch chan any + done chan any +} + +// Start spawns a background goroutine that extracts items pushed to the queue. +func (w worker) Start() { + go func() { + + for { + select { + case <-w.ch: // Normal workflow + case <-w.done: + return // Shut down + } + } + }() +} + +func (w worker) Stop() { + // Allows goroutine created by Start to terminate + close(w.done) +} + +func (w worker) AddToQueue(item any) { + w.ch <- item +} + +// worker limited in scope by workerLifecycle +func workerLifecycle(items []any) { + // Create a new worker + w := worker{ + ch: make(chan any), + done: make(chan any), + } + // Start worker + w.Start() + + // Operate on worker + for _, item := range items { + w.AddToQueue(item) + } + + runtime.Gosched() + // Exits without calling ’Stop’. Goroutine created by `Start` eventually leaks. +} + +func MethodContractViolation() { + prof := pprof.Lookup("goroutineleak") + defer func() { + runtime.Gosched() + prof.WriteTo(os.Stdout, 2) + }() + + workerLifecycle(make([]any, 10)) + runtime.Gosched() +} + +// doubleSend incoming channel must send a message (incoming error simulates an error generated internally). +func doubleSend(ch chan any, err error) { + if err != nil { + // In case of an error, send nil. + ch <- nil + // Return is missing here. + } + // Otherwise, continue with normal behaviour + // This send is still executed in the error case, which may lead to a goroutine leak. + ch <- struct{}{} +} + +func DoubleSend() { + prof := pprof.Lookup("goroutineleak") + ch := make(chan any) + defer func() { + runtime.Gosched() + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + doubleSend(ch, nil) + }() + <-ch + + go func() { + doubleSend(ch, fmt.Errorf("error")) + }() + <-ch + + ch1 := make(chan any, 1) + go func() { + doubleSend(ch1, fmt.Errorf("error")) + }() + <-ch1 +} + +// earlyReturn demonstrates a common pattern of goroutine leaks. +// A return statement interrupts the evaluation of the parent goroutine before it can consume a message. +// Incoming error simulates an error produced internally. +func earlyReturn(err error) { + // Create a synchronous channel + ch := make(chan any) + + go func() { + + // Send something to the channel. + // Leaks if the parent goroutine terminates early. + ch <- struct{}{} + }() + + if err != nil { + // Interrupt evaluation of parent early in case of error. + // Sender leaks. + return + } + + // Only receive if there is no error. + <-ch +} + +func EarlyReturn() { + prof := pprof.Lookup("goroutineleak") + defer func() { + runtime.Gosched() + prof.WriteTo(os.Stdout, 2) + }() + + go earlyReturn(fmt.Errorf("error")) +} + +// nCastLeak processes a number of items. First result to pass the post is retrieved from the channel queue. +func nCastLeak(items []any) { + // Channel is synchronous. + ch := make(chan any) + + // Iterate over every item + for range items { + go func() { + + // Process item and send result to channel + ch <- struct{}{} + // Channel is synchronous: only one sender will synchronise + }() + } + // Retrieve first result. All other senders block. + // Receiver blocks if there are no senders. + <-ch +} + +func NCastLeak() { + prof := pprof.Lookup("goroutineleak") + defer func() { + for i := 0; i < yieldCount; i++ { + // Yield enough times to allow all the leaky goroutines to + // reach the execution point. + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + nCastLeak(nil) + }() + + go func() { + nCastLeak(make([]any, 5)) + }() +} + +// A context is provided to short-circuit evaluation, leading +// the sender goroutine to leak. +func timeout(ctx context.Context) { + ch := make(chan any) + + go func() { + ch <- struct{}{} + }() + + select { + case <-ch: // Receive message + // Sender is released + case <-ctx.Done(): // Context was cancelled or timed out + // Sender is leaked + } +} + +func Timeout() { + prof := pprof.Lookup("goroutineleak") + defer func() { + runtime.Gosched() + prof.WriteTo(os.Stdout, 2) + }() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + for i := 0; i < 100; i++ { + go timeout(ctx) + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE b/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE new file mode 100644 index 00000000000000..f4b4b8abc4b372 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) +Copyright © 2021 Institute of Computing Technology, University of New South Wales + + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the “Software”), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/README.md b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md new file mode 100644 index 00000000000000..88c50e1e480a08 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md @@ -0,0 +1,1847 @@ +# GoKer + +The following examples are obtained from the publication +"GoBench: A Benchmark Suite of Real-World Go Concurrency Bugs" +(doi:10.1109/CGO51591.2021.9370317). + +**Authors** +Ting Yuan (yuanting@ict.ac.cn): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences, + University of Chinese Academy of Sciences, Beijing, China; +Guangwei Li (liguangwei@ict.ac.cn): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences, + University of Chinese Academy of Sciences, Beijing, China; +Jie Lu† (lujie@ict.ac.an): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences; +Chen Liu (liuchen17z@ict.ac.cn): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences, + University of Chinese Academy of Sciences, Beijing, China +Lian Li (lianli@ict.ac.cn): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences, + University of Chinese Academy of Sciences, Beijing, China; +Jingling Xue (jingling@cse.unsw.edu.au): + University of New South Wales, School of Computer Science and Engineering, Sydney, Australia + +White paper: https://lujie.ac.cn/files/papers/GoBench.pdf + +The examples have been modified in order to run the goroutine leak +profiler. Buggy snippets are moved from within a unit test to separate +applications. Each is then independently executed, possibly as multiple +copies within the same application in order to exercise more interleavings. +Concurrently, the main program sets up a waiting period (typically 1ms), followed +by a goroutine leak profile request. Other modifications may involve injecting calls +to `runtime.Gosched()`, to more reliably exercise buggy interleavings, or reductions +in waiting periods when calling `time.Sleep`, in order to reduce overall testing time. + +The resulting goroutine leak profile is analyzed to ensure that no unexpected leaks occurred, +and that the expected leaks did occur. If the leak is flaky, the only purpose of the expected +leak list is to protect against unexpected leaks. + +The entries below document each of the corresponding leaks. + +## Cockroach/10214 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#10214]|[pull request]|[patch]| Resource | AB-BA leak | + +[cockroach#10214]:(cockroach10214_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/10214/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/10214 + +### Description + +This goroutine leak is caused by different order when acquiring +coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats() +so that cockroachdb can unlock coalescedMu before locking raftMu. + +### Example execution + +```go +G1 G2 +------------------------------------------------------------------------------------ +s.sendQueuedHeartbeats() . +s.coalescedMu.Lock() [L1] . +s.sendQueuedHeartbeatsToNode() . +s.mu.replicas[0].reportUnreachable() . +s.mu.replicas[0].raftMu.Lock() [L2] . +. s.mu.replicas[0].tick() +. s.mu.replicas[0].raftMu.Lock() [L2] +. s.mu.replicas[0].tickRaftMuLocked() +. s.mu.replicas[0].mu.Lock() [L3] +. s.mu.replicas[0].maybeQuiesceLocked() +. s.mu.replicas[0].maybeCoalesceHeartbeat() +. s.coalescedMu.Lock() [L1] +--------------------------------G1,G2 leak------------------------------------------ +``` + +## Cockroach/1055 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#1055]|[pull request]|[patch]| Mixed | Channel & WaitGroup | + +[cockroach#1055]:(cockroach1055_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/1055/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/1055 + +### Description + +1. `Stop()` is called and blocked at `s.stop.Wait()` after acquiring the lock. +2. `StartTask()` is called and attempts to acquire the lock. It is then blocked. +3. `Stop()` never finishes since the task doesn't call SetStopped. + +### Example execution + +```go +G1 G2.0 G2.1 G2.2 G3 +------------------------------------------------------------------------------------------------------------------------------- +s[0].stop.Add(1) [1] +go func() [G2.0] +s[1].stop.Add(1) [1] . +go func() [G2.1] . +s[2].stop.Add(1) [1] . . +go func() [G2.2] . . +go func() [G3] . . . +<-done . . . . +. s[0].StartTask() . . . +. s[0].draining == 0 . . . +. . s[1].StartTask() . . +. . s[1].draining == 0 . . +. . . s[2].StartTask() . +. . . s[2].draining == 0 . +. . . . s[0].Quiesce() +. . . . s[0].mu.Lock() [L1[0]] +. s[0].mu.Lock() [L1[0]] . . . +. s[0].drain.Add(1) [1] . . . +. s[0].mu.Unlock() [L1[0]] . . . +. <-s[0].ShouldStop() . . . +. . . . s[0].draining = 1 +. . . . s[0].drain.Wait() +. . s[0].mu.Lock() [L1[1]] . . +. . s[1].drain.Add(1) [1] . . +. . s[1].mu.Unlock() [L1[1]] . . +. . <-s[1].ShouldStop() . . +. . . s[2].mu.Lock() [L1[2]] . +. . . s[2].drain.Add() [1] . +. . . s[2].mu.Unlock() [L1[2]] . +. . . <-s[2].ShouldStop() . +----------------------------------------------------G1, G2.[0..2], G3 leak----------------------------------------------------- +``` + +## Cockroach/10790 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#10790]|[pull request]|[patch]| Communication | Channel & Context | + +[cockroach#10790]:(cockroach10790_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/10790/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/10790 + +### Description + +It is possible that a message from `ctxDone` will make `beginCmds` +return without draining the channel `ch`, so that anonymous function +goroutines will leak. + +### Example execution + +```go +G1 G2 helper goroutine +----------------------------------------------------- +. . r.sendChans() +r.beginCmds() . . +. . ch1 <- true +<- ch1 . . +. . ch2 <- true +... +. cancel() +<- ch1 +------------------G1 leak---------------------------- +``` + +## Cockroach/13197 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#13197]|[pull request]|[patch]| Communication | Channel & Context | + +[cockroach#13197]:(cockroach13197_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/13197/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/13197 + +### Description + +One goroutine executing `(*Tx).awaitDone()` blocks and +waiting for a signal `context.Done()`. + +### Example execution + +```go +G1 G2 +------------------------------- +begin() +. awaitDone() +return . +. <-tx.ctx.Done() +-----------G2 leaks------------ +``` + +## Cockroach/13755 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#13755]|[pull request]|[patch]| Communication | Channel & Context | + +[cockroach#13755]:(cockroach13755_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/13755/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/13755 + +### Description + +The buggy code does not close the db query result (rows), +so that one goroutine running `(*Rows).awaitDone` is blocked forever. +The blocking goroutine is waiting for cancel signal from context. + +### Example execution + +```go +G1 G2 +--------------------------------------- +initContextClose() +. awaitDone() +return . +. <-tx.ctx.Done() +---------------G2 leaks---------------- +``` + +## Cockroach/1462 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#1462]|[pull request]|[patch]| Mixed | Channel & WaitGroup | + +[cockroach#1462]:(cockroach1462_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/1462/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/1462 + +### Description + +Executing `<-stopper.ShouldStop()` in `processEventsUntil` may cause +goroutines created by `lt.RunWorker` in `lt.start` to be stuck sending +a message over `lt.Events`. The main thread is then stuck at `s.stop.Wait()`, +since the sender goroutines cannot call `s.stop.Done()`. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------------------------------------------------------------------- +NewLocalInterceptableTransport() +lt.start() +lt.stopper.RunWorker() +s.AddWorker() +s.stop.Add(1) [1] +go func() [G2] +stopper.RunWorker() . +s.AddWorker() . +s.stop.Add(1) [2] . +go func() [G3] . +s.Stop() . . +s.Quiesce() . . +. select [default] . +. lt.Events <- interceptMessage(0) . +close(s.stopper) . . +. . select [<-stopper.ShouldStop()] +. . <<>> +s.stop.Wait() . +----------------------------------------------G1,G2 leak----------------------------------------------- +``` + +## Cockroach/16167 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#16167]|[pull request]|[patch]| Resource | Double Locking | + +[cockroach#16167]:(cockroach16167_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/16167/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/16167 + +### Description + +This is another example of goroutine leaks caused by recursively +acquiring `RWLock`. +There are two lock variables (`systemConfigCond` and `systemConfigMu`) +which refer to the same underlying lock. The leak invovlves two goroutines. +The first acquires `systemConfigMu.Lock()`, then tries to acquire `systemConfigMu.RLock()`. +The second acquires `systemConfigMu.Lock()`. +If the second goroutine interleaves in between the two lock operations of the +first goroutine, both goroutines will leak. + +### Example execution + +```go +G1 G2 +--------------------------------------------------------------- +. e.Start() +. e.updateSystemConfig() +e.execParsed() . +e.systemConfigCond.L.Lock() [L1] . +. e.systemConfigMu.Lock() [L1] +e.systemConfigMu.RLock() [L1] . +------------------------G1,G2 leak----------------------------- +``` + +## Cockroach/18101 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#18101]|[pull request]|[patch]| Resource | Double Locking | + +[cockroach#18101]:(cockroach18101_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/18101/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/18101 + +### Description + +The `context.Done()` signal short-circuits the reader goroutine, but not +the senders, leading them to leak. + +### Example execution + +```go +G1 G2 helper goroutine +-------------------------------------------------------------- +restore() +. splitAndScatter() +<-readyForImportCh . +<-readyForImportCh <==> readyForImportCh<- +... +. . cancel() +<> . <> + readyForImportCh<- +-----------------------G2 leaks-------------------------------- +``` + +## Cockroach/2448 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#2448]|[pull request]|[patch]| Communication | Channel | + +[cockroach#2448]:(cockroach2448_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/2448/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/2448 + +### Description + +This bug is caused by two goroutines waiting for each other +to unblock their channels: + +1) `MultiRaft` sends the commit event for the Membership change +2) `store.processRaft` takes it and begins processing +3) another command commits and triggers another `sendEvent`, but + this blocks since `store.processRaft` isn't ready for another + `select`. Consequently the main `MultiRaft` loop is waiting for + that as well. +4) the `Membership` change was applied to the range, and the store + now tries to execute the callback +5) the callback tries to write to `callbackChan`, but that is + consumed by the `MultiRaft` loop, which is currently waiting + for `store.processRaft` to consume from the events channel, + which it will only do after the callback has completed. + +### Example execution + +```go +G1 G2 +-------------------------------------------------------------------------- +s.processRaft() st.start() +select . +. select [default] +. s.handleWriteResponse() +. s.sendEvent() +. select +<-s.multiraft.Events <----> m.Events <- event +. select [default] +. s.handleWriteResponse() +. s.sendEvent() +. select [m.Events<-, <-s.stopper.ShouldStop()] +callback() . +select [ + m.callbackChan<-, + <-s.stopper.ShouldStop() +] . +------------------------------G1,G2 leak---------------------------------- +``` + +## Cockroach/24808 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#24808]|[pull request]|[patch]| Communication | Channel | + +[cockroach#24808]:(cockroach24808_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/24808/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/24808 + +### Description + +When we `Start` the `Compactor`, it may already have received +`Suggestions`, leaking the previously blocking write to a full channel. + +### Example execution + +```go +G1 +------------------------------------------------ +... +compactor.ch <- +compactor.Start() +compactor.ch <- +--------------------G1 leaks-------------------- +``` + +## Cockroach/25456 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#25456]|[pull request]|[patch]| Communication | Channel | + +[cockroach#25456]:(cockroach25456_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/25456/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/25456 + +### Description + +When `CheckConsistency` (in the complete code) returns an error, the queue +checks whether the store is draining to decide whether the error is worth +logging. This check was incorrect and would block until the store actually +started draining. + +### Example execution + +```go +G1 +--------------------------------------- +... +<-repl.store.Stopper().ShouldQuiesce() +---------------G1 leaks---------------- +``` + +## Cockroach/35073 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#35073]|[pull request]|[patch]| Communication | Channel | + +[cockroach#35073]:(cockroach35073_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/35073/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/35073 + +### Description + +Previously, the outbox could fail during startup without closing its +`RowChannel`. This could lead to goroutine leaks in rare cases due +to channel communication mismatch. + +### Example execution + +## Cockroach/35931 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#35931]|[pull request]|[patch]| Communication | Channel | + +[cockroach#35931]:(cockroach35931_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/35931/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/35931 + +### Description + +Previously, if a processor that reads from multiple inputs was waiting +on one input to provide more data, and the other input was full, and +both inputs were connected to inbound streams, it was possible to +cause goroutine leaks during flow cancellation when trying to propagate +the cancellation metadata messages into the flow. The cancellation method +wrote metadata messages to each inbound stream one at a time, so if the +first one was full, the canceller would block and never send a cancellation +message to the second stream, which was the one actually being read from. + +### Example execution + +## Cockroach/3710 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#3710]|[pull request]|[patch]| Resource | RWR Deadlock | + +[cockroach#3710]:(cockroach3710_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/3710/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/3710 + +### Description + +The goroutine leak is caused by acquiring a RLock twice in a call chain. +`ForceRaftLogScanAndProcess(acquire s.mu.RLock())` +`-> MaybeAdd()` +`-> shouldQueue()` +`-> getTruncatableIndexes()` +`->RaftStatus(acquire s.mu.Rlock())` + +### Example execution + +```go +G1 G2 +------------------------------------------------------------ +store.ForceRaftLogScanAndProcess() +s.mu.RLock() +s.raftLogQueue.MaybeAdd() +bq.impl.shouldQueue() +getTruncatableIndexes() +r.store.RaftStatus() +. store.processRaft() +. s.mu.Lock() +s.mu.RLock() +----------------------G1,G2 leak----------------------------- +``` + +## Cockroach/584 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#584]|[pull request]|[patch]| Resource | Double Locking | + +[cockroach#584]:(cockroach584_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/584/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/584 + +### Description + +Missing call to `mu.Unlock()` before the `break` in the loop. + +### Example execution + +```go +G1 +--------------------------- +g.bootstrap() +g.mu.Lock() [L1] +if g.closed { ==> break +g.manage() +g.mu.Lock() [L1] +----------G1 leaks--------- +``` + +## Cockroach/6181 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#6181]|[pull request]|[patch]| Resource | RWR Deadlock | + +[cockroach#6181]:(cockroach6181_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/6181/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/6181 + +### Description + +The same `RWMutex` may be recursively acquired for both reading and writing. + +### Example execution + +```go +G1 G2 G3 ... +----------------------------------------------------------------------------------------------- +testRangeCacheCoalescedRquests() +initTestDescriptorDB() +pauseLookupResumeAndAssert() +return +. doLookupWithToken() +. . doLookupWithToken() +. rc.LookupRangeDescriptor() . +. . rc.LookupRangeDescriptor() +. rdc.rangeCacheMu.RLock() . +. rdc.String() . +. . rdc.rangeCacheMu.RLock() +. . fmt.Printf() +. . rdc.rangeCacheMu.RUnlock() +. . rdc.rangeCacheMu.Lock() +. rdc.rangeCacheMu.RLock() . +-----------------------------------G2,G3,... leak---------------------------------------------- +``` + +## Cockroach/7504 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#7504]|[pull request]|[patch]| Resource | AB-BA Deadlock | + +[cockroach#7504]:(cockroach7504_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/7504/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/7504 + +### Description + +The locks are acquired as `leaseState` and `tableNameCache` in `Release()`, but +as `tableNameCache` and `leaseState` in `AcquireByName`, leading to an AB-BA deadlock. + +### Example execution + +```go +G1 G2 +----------------------------------------------------- +mgr.AcquireByName() mgr.Release() +m.tableNames.get(id) . +c.mu.Lock() [L2] . +. t.release(lease) +. t.mu.Lock() [L3] +. s.mu.Lock() [L1] +lease.mu.Lock() [L1] . +. t.removeLease(s) +. t.tableNameCache.remove() +. c.mu.Lock() [L2] +---------------------G1, G2 leak--------------------- +``` + +## Cockroach/9935 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#9935]|[pull request]|[patch]| Resource | Double Locking | + +[cockroach#9935]:(cockroach9935_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/9935/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/9935 + +### Description + +This bug is caused by acquiring `l.mu.Lock()` twice. + +### Example execution + +## Etcd/10492 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#10492]|[pull request]|[patch]| Resource | Double locking | + +[etcd#10492]:(etcd10492_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/10492/files +[pull request]:https://github.com/etcd-io/etcd/pull/10492 + +### Description + +A simple double locking case for lines 19, 31. + +## Etcd/5509 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#5509]|[pull request]|[patch]| Resource | Double locking | + +[etcd#5509]:(etcd5509_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/5509/files +[pull request]:https://github.com/etcd-io/etcd/pull/5509 + +### Description + +`r.acquire()` returns holding `r.client.mu.RLock()` on a failure path (line 42). +This causes any call to `client.Close()` to leak goroutines. + +## Etcd/6708 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#6708]|[pull request]|[patch]| Resource | Double locking | + +[etcd#6708]:(etcd6708_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/6708/files +[pull request]:https://github.com/etcd-io/etcd/pull/6708 + +### Description + +Line 54, 49 double locking + +## Etcd/6857 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#6857]|[pull request]|[patch]| Communication | Channel | + +[etcd#6857]:(etcd6857_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/6857/files +[pull request]:https://github.com/etcd-io/etcd/pull/6857 + +### Description + +Choosing a different case in a `select` statement (`n.stop`) will +lead to goroutine leaks when sending over `n.status`. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------- +n.run() . . +. . n.Stop() +. . n.stop<- +<-n.stop . . +. . <-n.done +close(n.done) . . +return . . +. . return +. n.Status() +. n.status<- +----------------G2 leaks------------------- +``` + +## Etcd/6873 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#6873]|[pull request]|[patch]| Mixed | Channel & Lock | + +[etcd#6873]:(etcd6873_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/6873/files +[pull request]:https://github.com/etcd-io/etcd/pull/6873 + +### Description + +This goroutine leak involves a goroutine acquiring a lock and being +blocked over a channel operation with no partner, while another tries +to acquire the same lock. + +### Example execution + +```go +G1 G2 G3 +-------------------------------------------------------------- +newWatchBroadcasts() +wbs.update() +wbs.updatec <- +return +. <-wbs.updatec . +. wbs.coalesce() . +. . wbs.stop() +. . wbs.mu.Lock() +. . close(wbs.updatec) +. . <-wbs.donec +. wbs.mu.Lock() . +---------------------G2,G3 leak-------------------------------- +``` + +## Etcd/7492 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#7492]|[pull request]|[patch]| Mixed | Channel & Lock | + +[etcd#7492]:(etcd7492_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/7492/files +[pull request]:https://github.com/etcd-io/etcd/pull/7492 + +### Description + +This goroutine leak involves a goroutine acquiring a lock and being +blocked over a channel operation with no partner, while another tries +to acquire the same lock. + +### Example execution + +```go +G2 G1 +--------------------------------------------------------------- +. stk.run() +ts.assignSimpleTokenToUser() . +t.simpleTokensMu.Lock() . +t.simpleTokenKeeper.addSimpleToken() . +tm.addSimpleTokenCh <- true . +. <-tm.addSimpleTokenCh +t.simpleTokensMu.Unlock() . +ts.assignSimpleTokenToUser() . +... +t.simpleTokensMu.Lock() +. <-tokenTicker.C +tm.addSimpleTokenCh <- true . +. tm.deleteTokenFunc() +. t.simpleTokensMu.Lock() +---------------------------G1,G2 leak-------------------------- +``` + +## Etcd/7902 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#7902]|[pull request]|[patch]| Mixed | Channel & Lock | + +[etcd#7902]:(etcd7902_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/7902/files +[pull request]:https://github.com/etcd-io/etcd/pull/7902 + +### Description + +If the follower gooroutine acquires `mu.Lock()` first and calls +`rc.release()`, it will be blocked sending over `rcNextc`. +Only the leader can `close(nextc)` to unblock the follower. +However, in order to invoke `rc.release()`, the leader needs +to acquires `mu.Lock()`. +The fix is to remove the lock and unlock around `rc.release()`. + +### Example execution + +```go +G1 G2 (leader) G3 (follower) +--------------------------------------------------------------------- +runElectionFunc() +doRounds() +wg.Wait() +. ... +. mu.Lock() +. rc.validate() +. rcNextc = nextc +. mu.Unlock() ... +. . mu.Lock() +. . rc.validate() +. . mu.Unlock() +. . mu.Lock() +. . rc.release() +. . <-rcNextc +. mu.Lock() +-------------------------G1,G2,G3 leak-------------------------- +``` + +## Grpc/1275 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#1275]|[pull request]|[patch]| Communication | Channel | + +[grpc#1275]:(grpc1275_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/1275/files +[pull request]:https://github.com/grpc/grpc-go/pull/1275 + +### Description + +Two goroutines are involved in this leak. The main goroutine +is blocked at `case <- donec`, and is waiting for the second goroutine +to close the channel. +The second goroutine is created by the main goroutine. It is blocked +when calling `stream.Read()`, which invokes `recvBufferRead.Read()`. +The second goroutine is blocked at case `i := r.recv.get()`, and it is +waiting for someone to send a message to this channel. +It is the `client.CloseSream()` method called by the main goroutine that +should send the message, but it is not. The patch is to send out this message. + +### Example execution + +```go +G1 G2 +----------------------------------------------------- +testInflightStreamClosing() +. stream.Read() +. io.ReadFull() +. <-r.recv.get() +CloseStream() +<-donec +---------------------G1, G2 leak--------------------- +``` + +## Grpc/1424 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#1424]|[pull request]|[patch]| Communication | Channel | + +[grpc#1424]:(grpc1424_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/1424/files +[pull request]:https://github.com/grpc/grpc-go/pull/1424 + +### Description + +The goroutine running `cc.lbWatcher` returns without +draining the `done` channel. + +### Example execution + +```go +G1 G2 G3 +----------------------------------------------------------------- +DialContext() . . +. cc.dopts.balancer.Notify() . +. . cc.lbWatcher() +. <-doneChan +close() +---------------------------G2 leaks------------------------------- +``` + +## Grpc/1460 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#1460]|[pull request]|[patch]| Mixed | Channel & Lock | + +[grpc#1460]:(grpc1460_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/1460/files +[pull request]:https://github.com/grpc/grpc-go/pull/1460 + +### Description + +When gRPC keepalives are enabled (which isn't the case +by default at this time) and PermitWithoutStream is false +(the default), the client can leak goroutines when transitioning +between having no active stream and having one active +stream.The keepalive() goroutine is stuck at “<-t.awakenKeepalive”, +while the main goroutine is stuck in NewStream() on t.mu.Lock(). + +### Example execution + +```go +G1 G2 +-------------------------------------------- +client.keepalive() +. client.NewStream() +t.mu.Lock() +<-t.awakenKeepalive +. t.mu.Lock() +---------------G1,G2 leak------------------- +``` + +## Grpc/3017 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#3017]|[pull request]|[patch]| Resource | Missing unlock | + +[grpc#3017]:(grpc3017_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/3017/files +[pull request]:https://github.com/grpc/grpc-go/pull/3017 + +### Description + +Line 65 is an execution path with a missing unlock. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------------------------------------------------------------ +NewSubConn([1]) +ccc.mu.Lock() [L1] +sc = 1 +ccc.subConnToAddr[1] = 1 +go func() [G2] +<-done . +. ccc.RemoveSubConn(1) +. ccc.mu.Lock() +. addr = 1 +. entry = &subConnCacheEntry_grpc3017{} +. cc.subConnCache[1] = entry +. timer = time.AfterFunc() [G3] +. entry.cancel = func() +. sc = ccc.NewSubConn([1]) +. ccc.mu.Lock() [L1] +. entry.cancel() +. !timer.Stop() [true] +. entry.abortDeleting = true +. . ccc.mu.Lock() +. . <<>> +. ccc.RemoveSubConn(1) +. ccc.mu.Lock() [L1] +-------------------------------------------G1, G2 leak----------------------------------------- +``` + +## Grpc/660 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#660]|[pull request]|[patch]| Communication | Channel | + +[grpc#660]:(grpc660_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/660/files +[pull request]:https://github.com/grpc/grpc-go/pull/660 + +### Description + +The parent function could return without draining the done channel. + +### Example execution + +```go +G1 G2 helper goroutine +------------------------------------------------------------- +doCloseLoopUnary() +. bc.stop <- true +<-bc.stop +return +. done <- +----------------------G2 leak-------------------------------- +``` + +## Grpc/795 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#795]|[pull request]|[patch]| Resource | Double locking | + +[grpc#795]:(grpc795_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/795/files +[pull request]:https://github.com/grpc/grpc-go/pull/795 + +### Description + +Line 20 is an execution path with a missing unlock. + +## Grpc/862 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#862]|[pull request]|[patch]| Communication | Channel & Context | + +[grpc#862]:(grpc862_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/862/files +[pull request]:https://github.com/grpc/grpc-go/pull/862 + +### Description + +When return value `conn` is `nil`, `cc(ClientConn)` is not closed. +The goroutine executing resetAddrConn is leaked. The patch is to +close `ClientConn` in `defer func()`. + +### Example execution + +```go +G1 G2 +--------------------------------------- +DialContext() +. cc.resetAddrConn() +. resetTransport() +. <-ac.ctx.Done() +--------------G2 leak------------------ +``` + +## Hugo/3251 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[hugo#3251]|[pull request]|[patch]| Resource | RWR deadlock | + +[hugo#3251]:(hugo3251_test.go) +[patch]:https://github.com/gohugoio/hugo/pull/3251/files +[pull request]:https://github.com/gohugoio/hugo/pull/3251 + +### Description + +A goroutine can hold `Lock()` at line 20 then acquire `RLock()` at +line 29. `RLock()` at line 29 will never be acquired because `Lock()` +at line 20 will never be released. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------------------------------------------------------ +wg.Add(1) [W1: 1] +go func() [G2] +go func() [G3] +. resGetRemote() +. remoteURLLock.URLLock(url) +. l.Lock() [L1] +. l.m[url] = &sync.Mutex{} [L2] +. l.m[url].Lock() [L2] +. l.Unlock() [L1] +. . resGetRemote() +. . remoteURLLock.URLLock(url) +. . l.Lock() [L1] +. . l.m[url].Lock() [L2] +. remoteURLLock.URLUnlock(url) +. l.RLock() [L1] +... +wg.Wait() [W1] +----------------------------------------G1,G2,G3 leak-------------------------------------- +``` + +## Hugo/5379 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[hugo#5379]|[pull request]|[patch]| Resource | Double locking | + +[hugo#5379]:(hugo5379_test.go) +[patch]:https://github.com/gohugoio/hugo/pull/5379/files +[pull request]:https://github.com/gohugoio/hugo/pull/5379 + +### Description + +A goroutine first acquire `contentInitMu` at line 99 then +acquire the same `Mutex` at line 66 + +## Istio/16224 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[istio#16224]|[pull request]|[patch]| Mixed | Channel & Lock | + +[istio#16224]:(istio16224_test.go) +[patch]:https://github.com/istio/istio/pull/16224/files +[pull request]:https://github.com/istio/istio/pull/16224 + +### Description + +A goroutine holds a `Mutex` at line 91 and is then blocked at line 93. +Another goroutine attempts to acquire the same `Mutex` at line 101 to +further drains the same channel at 103. + +## Istio/17860 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[istio#17860]|[pull request]|[patch]| Communication | Channel | + +[istio#17860]:(istio17860_test.go) +[patch]:https://github.com/istio/istio/pull/17860/files +[pull request]:https://github.com/istio/istio/pull/17860 + +### Description + +`a.statusCh` can't be drained at line 70. + +## Istio/18454 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[istio#18454]|[pull request]|[patch]| Communication | Channel & Context | + +[istio#18454]:(istio18454_test.go) +[patch]:https://github.com/istio/istio/pull/18454/files +[pull request]:https://github.com/istio/istio/pull/18454 + +### Description + +`s.timer.Stop()` at line 56 and 61 can be called concurrency +(i.e. from their entry point at line 104 and line 66). +See [Timer](https://golang.org/pkg/time/#Timer). + +## Kubernetes/10182 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#10182]|[pull request]|[patch]| Mixed | Channel & Lock | + +[kubernetes#10182]:(kubernetes10182_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/10182/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/10182 + +### Description + +Goroutine 1 is blocked on a lock held by goroutine 3, +while goroutine 3 is blocked on sending message to `ch`, +which is read by goroutine 1. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------------------------------------------- +s.Start() +s.syncBatch() +. s.SetPodStatus() +. s.podStatusesLock.Lock() +<-s.podStatusChannel <===> s.podStatusChannel <- true +. s.podStatusesLock.Unlock() +. return +s.DeletePodStatus() . +. . s.podStatusesLock.Lock() +. . s.podStatusChannel <- true +s.podStatusesLock.Lock() +-----------------------------G1,G3 leak----------------------------------------- +``` + +## Kubernetes/11298 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#11298]|[pull request]|[patch]| Communication | Channel & Condition Variable | + +[kubernetes#11298]:(kubernetes11298_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/11298/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/11298 + +### Description + +`n.node` used the `n.lock` as underlaying locker. The service loop initially +locked it, the `Notify` function tried to lock it before calling `n.node.Signal()`, +leading to a goroutine leak. `n.cond.Signal()` at line 59 and line 81 are not +guaranteed to unblock the `n.cond.Wait` at line 56. + +## Kubernetes/13135 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#13135]|[pull request]|[patch]| Resource | AB-BA deadlock | + +[kubernetes#13135]:(kubernetes13135_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/13135/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/13135 + +### Description + +```go +G1 G2 G3 +---------------------------------------------------------------------------------- +NewCacher() +watchCache.SetOnReplace() +watchCache.SetOnEvent() +. cacher.startCaching() +. c.Lock() +. c.reflector.ListAndWatch() +. r.syncWith() +. r.store.Replace() +. w.Lock() +. w.onReplace() +. cacher.initOnce.Do() +. cacher.Unlock() +return cacher . +. . c.watchCache.Add() +. . w.processEvent() +. . w.Lock() +. cacher.startCaching() . +. c.Lock() . +... +. c.Lock() +. w.Lock() +--------------------------------G2,G3 leak----------------------------------------- +``` + +## Kubernetes/1321 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#1321]|[pull request]|[patch]| Mixed | Channel & Lock | + +[kubernetes#1321]:(kubernetes1321_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/1321/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/1321 + +### Description + +This is a lock-channel bug. The first goroutine invokes +`distribute()`, which holds `m.lock.Lock()`, while blocking +at sending message to `w.result`. The second goroutine +invokes `stopWatching()` function, which can unblock the first +goroutine by closing `w.result`. However, in order to close `w.result`, +`stopWatching()` function needs to acquire `m.lock.Lock()`. + +The fix is to introduce another channel and put receive message +from the second channel in the same `select` statement as the +`w.result`. Close the second channel can unblock the first +goroutine, while no need to hold `m.lock.Lock()`. + +### Example execution + +```go +G1 G2 +---------------------------------------------- +testMuxWatcherClose() +NewMux() +. m.loop() +. m.distribute() +. m.lock.Lock() +. w.result <- true +w := m.Watch() +w.Stop() +mw.m.stopWatching() +m.lock.Lock() +---------------G1,G2 leak--------------------- +``` + +## Kubernetes/25331 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#25331]|[pull request]|[patch]| Communication | Channel & Context | + +[kubernetes#25331]:(kubernetes25331_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/25331/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/25331 + +### Description + +A potential goroutine leak occurs when an error has happened, +blocking `resultChan`, while cancelling context in `Stop()`. + +### Example execution + +```go +G1 G2 +------------------------------------ +wc.run() +. wc.Stop() +. wc.errChan <- +. wc.cancel() +<-wc.errChan +wc.cancel() +wc.resultChan <- +-------------G1 leak---------------- +``` + +## Kubernetes/26980 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#26980]|[pull request]|[patch]| Mixed | Channel & Lock | + +[kubernetes#26980]:(kubernetes26980_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/26980/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/26980 + +### Description + +A goroutine holds a `Mutex` at line 24 and blocked at line 35. +Another goroutine blocked at line 58 by acquiring the same `Mutex`. + +## Kubernetes/30872 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#30872]|[pull request]|[patch]| Resource | AB-BA deadlock | + +[kubernetes#30872]:(kubernetes30872_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/30872/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/30872 + +### Description + +The lock is acquired both at lines 92 and 157. + +## Kubernetes/38669 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#38669]|[pull request]|[patch]| Communication | Channel | + +[kubernetes#38669]:(kubernetes38669_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/38669/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/38669 + +### Description + +No sender for line 33. + +## Kubernetes/5316 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#5316]|[pull request]|[patch]| Communication | Channel | + +[kubernetes#5316]:(kubernetes5316_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/5316/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/5316 + +### Description + +If the main goroutine selects a case that doesn’t consumes +the channels, the anonymous goroutine will be blocked on sending +to channel. + +### Example execution + +```go +G1 G2 +-------------------------------------- +finishRequest() +. fn() +time.After() +. errCh<-/ch<- +--------------G2 leaks---------------- +``` + +## Kubernetes/58107 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#58107]|[pull request]|[patch]| Resource | RWR deadlock | + +[kubernetes#58107]:(kubernetes58107_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/58107/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/58107 + +### Description + +The rules for read and write lock: allows concurrent read lock; +write lock has higher priority than read lock. + +There are two queues (queue 1 and queue 2) involved in this bug, +and the two queues are protected by the same read-write lock +(`rq.workerLock.RLock()`). Before getting an element from queue 1 or +queue 2, `rq.workerLock.RLock()` is acquired. If the queue is empty, +`cond.Wait()` will be invoked. There is another goroutine (goroutine D), +which will periodically invoke `rq.workerLock.Lock()`. Under the following +situation, deadlock will happen. Queue 1 is empty, so that some goroutines +hold `rq.workerLock.RLock()`, and block at `cond.Wait()`. Goroutine D is +blocked when acquiring `rq.workerLock.Lock()`. Some goroutines try to process +jobs in queue 2, but they are blocked when acquiring `rq.workerLock.RLock()`, +since write lock has a higher priority. + +The fix is to not acquire `rq.workerLock.RLock()`, while pulling data +from any queue. Therefore, when a goroutine is blocked at `cond.Wait()`, +`rq.workLock.RLock()` is not held. + +### Example execution + +```go +G3 G4 G5 +-------------------------------------------------------------------- +. . Sync() +rq.workerLock.RLock() . . +q.cond.Wait() . . +. . rq.workerLock.Lock() +. rq.workerLock.RLock() +. q.cond.L.Lock() +-----------------------------G3,G4,G5 leak----------------------------- +``` + +## Kubernetes/62464 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#62464]|[pull request]|[patch]| Resource | RWR deadlock | + +[kubernetes#62464]:(kubernetes62464_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/62464/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/62464 + +### Description + +This is another example for recursive read lock bug. It has +been noticed by the go developers that RLock should not be +recursively used in the same thread. + +### Example execution + +```go +G1 G2 +-------------------------------------------------------- +m.reconcileState() +m.state.GetCPUSetOrDefault() +s.RLock() +s.GetCPUSet() +. p.RemoveContainer() +. s.GetDefaultCPUSet() +. s.SetDefaultCPUSet() +. s.Lock() +s.RLock() +---------------------G1,G2 leak-------------------------- +``` + +## Kubernetes/6632 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#6632]|[pull request]|[patch]| Mixed | Channel & Lock | + +[kubernetes#6632]:(kubernetes6632_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/6632/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/6632 + +### Description + +When `resetChan` is full, `WriteFrame` holds the lock and blocks +on the channel. Then `monitor()` fails to close the `resetChan` +because the lock is already held by `WriteFrame`. + + +### Example execution + +```go +G1 G2 helper goroutine +---------------------------------------------------------------- +i.monitor() +<-i.conn.closeChan +. i.WriteFrame() +. i.writeLock.Lock() +. i.resetChan <- +. . i.conn.closeChan<- +i.writeLock.Lock() +----------------------G1,G2 leak-------------------------------- +``` + +## Kubernetes/70277 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#70277]|[pull request]|[patch]| Communication | Channel | + +[kubernetes#70277]:kubernetes70277_test.go +[patch]:https://github.com/kubernetes/kubernetes/pull/70277/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/70277 + +### Description + +`wait.poller()` returns a function with type `WaitFunc`. +the function creates a goroutine and the goroutine only +quits when after or done closed. + +The `doneCh` defined at line 70 is never closed. + +## Moby/17176 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#17176]|[pull request]|[patch]| Resource | Double locking | + +[moby#17176]:(moby17176_test.go) +[patch]:https://github.com/moby/moby/pull/17176/files +[pull request]:https://github.com/moby/moby/pull/17176 + +### Description + +`devices.nrDeletedDevices` takes `devices.Lock()` but does +not release it (line 36) if there are no deleted devices. This will block +other goroutines trying to acquire `devices.Lock()`. + +## Moby/21233 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#21233]|[pull request]|[patch]| Communication | Channel | + +[moby#21233]:(moby21233_test.go) +[patch]:https://github.com/moby/moby/pull/21233/files +[pull request]:https://github.com/moby/moby/pull/21233 + +### Description + +This test was checking that it received every progress update that was +produced. But delivery of these intermediate progress updates is not +guaranteed. A new update can overwrite the previous one if the previous +one hasn't been sent to the channel yet. + +The call to `t.Fatalf` terminated the current goroutine which was consuming +the channel, which caused a deadlock and eventual test timeout rather +than a proper failure message. + +### Example execution + +```go +G1 G2 G3 +---------------------------------------------------------- +testTransfer() . . +tm.Transfer() . . +t.Watch() . . +. WriteProgress() . +. ProgressChan<- . +. . <-progressChan +. ... ... +. return . +. <-progressChan +<-watcher.running +----------------------G1,G3 leak-------------------------- +``` + +## Moby/25384 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#25384]|[pull request]|[patch]| Mixed | Misuse WaitGroup | + +[moby#25384]:(moby25384_test.go) +[patch]:https://github.com/moby/moby/pull/25384/files +[pull request]:https://github.com/moby/moby/pull/25384 + +### Description + +When `n=1` (where `n` is `len(pm.plugins)`), the location of `group.Wait()` doesn’t matter. +When `n > 1`, `group.Wait()` is invoked in each iteration. Whenever +`group.Wait()` is invoked, it waits for `group.Done()` to be executed `n` times. +However, `group.Done()` is only executed once in one iteration. + +Misuse of sync.WaitGroup + +## Moby/27782 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#27782]|[pull request]|[patch]| Communication | Channel & Condition Variable | + +[moby#27782]:(moby27782_test.go) +[patch]:https://github.com/moby/moby/pull/27782/files +[pull request]:https://github.com/moby/moby/pull/27782 + +### Description + +### Example execution + +```go +G1 G2 G3 +----------------------------------------------------------------------- +InitializeStdio() +startLogging() +l.ReadLogs() +NewLogWatcher() +. l.readLogs() +container.Reset() . +LogDriver.Close() . +r.Close() . +close(w.closeNotifier) . +. followLogs(logWatcher) +. watchFile() +. New() +. NewEventWatcher() +. NewWatcher() +. . w.readEvents() +. . event.ignoreLinux() +. . return false +. <-logWatcher.WatchClose() . +. fileWatcher.Remove() . +. w.cv.Wait() . +. . w.Events <- event +------------------------------G2,G3 leak------------------------------- +``` + +## Moby/28462 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#28462]|[pull request]|[patch]| Mixed | Channel & Lock | + +[moby#28462]:(moby28462_test.go) +[patch]:https://github.com/moby/moby/pull/28462/files +[pull request]:https://github.com/moby/moby/pull/28462 + +### Description + +One goroutine may acquire a lock and try to send a message over channel `stop`, +while the other will try to acquire the same lock. With the wrong ordering, +both goroutines will leak. + +### Example execution + +```go +G1 G2 +-------------------------------------------------------------- +monitor() +handleProbeResult() +. d.StateChanged() +. c.Lock() +. d.updateHealthMonitorElseBranch() +. h.CloseMonitorChannel() +. s.stop <- struct{}{} +c.Lock() +----------------------G1,G2 leak------------------------------ +``` + +## Moby/30408 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#30408]|[pull request]|[patch]| Communication | Condition Variable | + +[moby#30408]:(moby30408_test.go) +[patch]:https://github.com/moby/moby/pull/30408/files +[pull request]:https://github.com/moby/moby/pull/30408 + +### Description + +`Wait()` at line 22 has no corresponding `Signal()` or `Broadcast()`. + +### Example execution + +```go +G1 G2 +------------------------------------------ +testActive() +. p.waitActive() +. p.activateWait.L.Lock() +. p.activateWait.Wait() +<-done +-----------------G1,G2 leak--------------- +``` + +## Moby/33781 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#33781]|[pull request]|[patch]| Communication | Channel & Context | + +[moby#33781]:(moby33781_test.go) +[patch]:https://github.com/moby/moby/pull/33781/files +[pull request]:https://github.com/moby/moby/pull/33781 + +### Description + +The goroutine created using an anonymous function is blocked +sending a message over an unbuffered channel. However there +exists a path in the parent goroutine where the parent function +will return without draining the channel. + +### Example execution + +```go +G1 G2 G3 +---------------------------------------- +monitor() . +<-time.After() . +. . +<-stop stop<- +. +cancelProbe() +return +. result<- +----------------G3 leak------------------ +``` + +## Moby/36114 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#36114]|[pull request]|[patch]| Resource | Double locking | + +[moby#36114]:(moby36114_test.go) +[patch]:https://github.com/moby/moby/pull/36114/files +[pull request]:https://github.com/moby/moby/pull/36114 + +### Description + +The the lock for the struct svm has already been locked when calling +`svm.hotRemoveVHDsAtStart()`. + +## Moby/4951 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#4951]|[pull request]|[patch]| Resource | AB-BA deadlock | + +[moby#4951]:(moby4951_test.go) +[patch]:https://github.com/moby/moby/pull/4951/files +[pull request]:https://github.com/moby/moby/pull/4951 + +### Description + +The root cause and patch is clearly explained in the commit +description. The global lock is `devices.Lock()`, and the device +lock is `baseInfo.lock.Lock()`. It is very likely that this bug +can be reproduced. + +## Moby/7559 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#7559]|[pull request]|[patch]| Resource | Double locking | + +[moby#7559]:(moby7559_test.go) +[patch]:https://github.com/moby/moby/pull/7559/files +[pull request]:https://github.com/moby/moby/pull/7559 + +### Description + +Line 25 is missing a call to `.Unlock`. + +### Example execution + +```go +G1 +--------------------------- +proxy.connTrackLock.Lock() +if err != nil { continue } +proxy.connTrackLock.Lock() +-----------G1 leaks-------- +``` + +## Serving/2137 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[serving#2137]|[pull request]|[patch]| Mixed | Channel & Lock | + +[serving#2137]:(serving2137_test.go) +[patch]:https://github.com/ knative/serving/pull/2137/files +[pull request]:https://github.com/ knative/serving/pull/2137 + +### Description + +### Example execution + +```go +G1 G2 G3 +---------------------------------------------------------------------------------- +b.concurrentRequests(2) . . +b.concurrentRequest() . . +r.lock.Lock() . . +. start.Done() . +start.Wait() . . +b.concurrentRequest() . . +r.lock.Lock() . . +. . start.Done() +start.Wait() . . +unlockAll(locks) . . +unlock(lc) . . +req.lock.Unlock() . . +ok := <-req.accepted . . +. b.Maybe() . +. b.activeRequests <- t . +. thunk() . +. r.lock.Lock() . +. . b.Maybe() +. . b.activeRequests <- t +----------------------------G1,G2,G3 leak----------------------------------------- +``` + +## Syncthing/4829 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[syncthing#4829]|[pull request]|[patch]| Resource | Double locking | + +[syncthing#4829]:(syncthing4829_test.go) +[patch]:https://github.com/syncthing/syncthing/pull/4829/files +[pull request]:https://github.com/syncthing/syncthing/pull/4829 + +### Description + +Double locking at line 17 and line 30. + +### Example execution + +```go +G1 +--------------------------- +mapping.clearAddresses() +m.mut.Lock() [L2] +m.notify(...) +m.mut.RLock() [L2] +----------G1 leaks--------- +``` + +## Syncthing/5795 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[syncthing#5795]|[pull request]|[patch]| Communication | Channel | + +[syncthing#5795]:(syncthing5795_test.go) +[patch]:https://github.com/syncthing/syncthing/pull/5795/files +[pull request]:https://github.com/syncthing/syncthing/pull/5795 + +### Description + +`<-c.dispatcherLoopStopped` at line 82 is blocking forever because +`dispatcherLoop()` is blocking at line 72. + +### Example execution + +```go +G1 G2 +-------------------------------------------------------------- +c.Start() +go c.dispatcherLoop() [G3] +. select [<-c.inbox, <-c.closed] +c.inbox <- <================> [<-c.inbox] +<-c.dispatcherLoopStopped . +. default +. c.ccFn()/c.Close() +. close(c.closed) +. <-c.dispatcherLoopStopped +---------------------G1,G2 leak------------------------------- +``` \ No newline at end of file diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go new file mode 100644 index 00000000000000..4f5ef3b0fc32fc --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go @@ -0,0 +1,145 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/10214 + * Buggy version: 7207111aa3a43df0552509365fdec741a53f873f + * fix commit-id: 27e863d90ab0660494778f1c35966cc5ddc38e32 + * Flaky: 3/100 + * Description: This goroutine leak is caused by different order when acquiring + * coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats() + * so that cockroachdb can unlock coalescedMu before locking raftMu. + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" + "unsafe" +) + +func init() { + register("Cockroach10214", Cockroach10214) +} + +type Store_cockroach10214 struct { + coalescedMu struct { + sync.Mutex // L1 + heartbeatResponses []int + } + mu struct { + replicas map[int]*Replica_cockroach10214 + } +} + +func (s *Store_cockroach10214) sendQueuedHeartbeats() { + s.coalescedMu.Lock() // L1 acquire + defer s.coalescedMu.Unlock() // L2 release + for i := 0; i < len(s.coalescedMu.heartbeatResponses); i++ { + s.sendQueuedHeartbeatsToNode() // L2 + } +} + +func (s *Store_cockroach10214) sendQueuedHeartbeatsToNode() { + for i := 0; i < len(s.mu.replicas); i++ { + r := s.mu.replicas[i] + r.reportUnreachable() // L2 + } +} + +type Replica_cockroach10214 struct { + raftMu sync.Mutex // L2 + mu sync.Mutex // L3 + store *Store_cockroach10214 +} + +func (r *Replica_cockroach10214) reportUnreachable() { + r.raftMu.Lock() // L2 acquire + time.Sleep(time.Millisecond) + defer r.raftMu.Unlock() // L2 release +} + +func (r *Replica_cockroach10214) tick() { + r.raftMu.Lock() // L2 acquire + defer r.raftMu.Unlock() // L2 release + r.tickRaftMuLocked() +} + +func (r *Replica_cockroach10214) tickRaftMuLocked() { + r.mu.Lock() // L3 acquire + defer r.mu.Unlock() // L3 release + if r.maybeQuiesceLocked() { + return + } +} + +func (r *Replica_cockroach10214) maybeQuiesceLocked() bool { + for i := 0; i < 2; i++ { + if !r.maybeCoalesceHeartbeat() { + return true + } + } + return false +} + +func (r *Replica_cockroach10214) maybeCoalesceHeartbeat() bool { + msgtype := uintptr(unsafe.Pointer(r)) % 3 + switch msgtype { + case 0, 1, 2: + r.store.coalescedMu.Lock() // L1 acquire + default: + return false + } + r.store.coalescedMu.Unlock() // L1 release + return true +} + +func Cockroach10214() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 1000; i++ { + go func() { + store := &Store_cockroach10214{} + responses := &store.coalescedMu.heartbeatResponses + *responses = append(*responses, 1, 2) + store.mu.replicas = make(map[int]*Replica_cockroach10214) + + rp1 := &Replica_cockroach10214{ // L2,3[0] + store: store, + } + rp2 := &Replica_cockroach10214{ // L2,3[1] + store: store, + } + store.mu.replicas[0] = rp1 + store.mu.replicas[1] = rp2 + + go store.sendQueuedHeartbeats() // G1 + go rp1.tick() // G2 + }() + } +} + +// Example of goroutine leak trace: +// +// G1 G2 +//------------------------------------------------------------------------------------ +// s.sendQueuedHeartbeats() . +// s.coalescedMu.Lock() [L1] . +// s.sendQueuedHeartbeatsToNode() . +// s.mu.replicas[0].reportUnreachable() . +// s.mu.replicas[0].raftMu.Lock() [L2] . +// . s.mu.replicas[0].tick() +// . s.mu.replicas[0].raftMu.Lock() [L2] +// . s.mu.replicas[0].tickRaftMuLocked() +// . s.mu.replicas[0].mu.Lock() [L3] +// . s.mu.replicas[0].maybeQuiesceLocked() +// . s.mu.replicas[0].maybeCoalesceHeartbeat() +// . s.coalescedMu.Lock() [L1] +//--------------------------------G1,G2 leak------------------------------------------ \ No newline at end of file diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go new file mode 100644 index 00000000000000..687baed25a2a44 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go @@ -0,0 +1,115 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "sync" + "sync/atomic" + "time" +) + +func init() { + register("Cockroach1055", Cockroach1055) +} + +type Stopper_cockroach1055 struct { + stopper chan struct{} + stop sync.WaitGroup + mu sync.Mutex + draining int32 + drain sync.WaitGroup +} + +func (s *Stopper_cockroach1055) AddWorker() { + s.stop.Add(1) +} + +func (s *Stopper_cockroach1055) ShouldStop() <-chan struct{} { + if s == nil { + return nil + } + return s.stopper +} + +func (s *Stopper_cockroach1055) SetStopped() { + if s != nil { + s.stop.Done() + } +} + +func (s *Stopper_cockroach1055) Quiesce() { + s.mu.Lock() + defer s.mu.Unlock() + s.draining = 1 + s.drain.Wait() + s.draining = 0 +} + +func (s *Stopper_cockroach1055) Stop() { + s.mu.Lock() // L1 + defer s.mu.Unlock() + atomic.StoreInt32(&s.draining, 1) + s.drain.Wait() + close(s.stopper) + s.stop.Wait() +} + +func (s *Stopper_cockroach1055) StartTask() bool { + if atomic.LoadInt32(&s.draining) == 0 { + s.mu.Lock() + defer s.mu.Unlock() + s.drain.Add(1) + return true + } + return false +} + +func NewStopper_cockroach1055() *Stopper_cockroach1055 { + return &Stopper_cockroach1055{ + stopper: make(chan struct{}), + } +} + +func Cockroach1055() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i <= 1000; i++ { + go func() { // G1 + var stoppers []*Stopper_cockroach1055 + for i := 0; i < 2; i++ { + stoppers = append(stoppers, NewStopper_cockroach1055()) + } + + for i := range stoppers { + s := stoppers[i] + s.AddWorker() + go func() { // G2 + s.StartTask() + <-s.ShouldStop() + s.SetStopped() + }() + } + + done := make(chan struct{}) + go func() { // G3 + for _, s := range stoppers { + s.Quiesce() + } + for _, s := range stoppers { + s.Stop() + } + close(done) + }() + + <-done + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go new file mode 100644 index 00000000000000..636f45b3e0e765 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go @@ -0,0 +1,98 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/10790 + * Buggy version: 96b5452557ebe26bd9d85fe7905155009204d893 + * fix commit-id: f1a5c19125c65129b966fbdc0e6408e8df214aba + * Flaky: 28/100 + * Description: + * It is possible that a message from ctxDone will make the function beginCmds + * returns without draining the channel ch, so that goroutines created by anonymous + * function will leak. + */ + +package main + +import ( + "context" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Cockroach10790", Cockroach10790) +} + +type Replica_cockroach10790 struct { + chans []chan bool +} + +func (r *Replica_cockroach10790) beginCmds(ctx context.Context) { + ctxDone := ctx.Done() + for _, ch := range r.chans { + select { + case <-ch: + case <-ctxDone: + go func() { // G3 + for _, ch := range r.chans { + <-ch + } + }() + } + } +} + +func (r *Replica_cockroach10790) sendChans(ctx context.Context) { + for _, ch := range r.chans { + select { + case ch <- true: + case <-ctx.Done(): + return + } + } +} + +func NewReplica_cockroach10790() *Replica_cockroach10790 { + r := &Replica_cockroach10790{} + r.chans = append(r.chans, make(chan bool), make(chan bool)) + return r +} + +// Example of goroutine leak trace: +// +// G1 G2 G3 helper goroutine +//-------------------------------------------------------------------------------------- +// . . r.sendChans() +// r.beginCmds() . . +// . . ch1 <- +// <-ch1 <================================================> ch1 <- +// . . select [ch2<-, <-ctx.Done()] +// . cancel() . +// . <> [<-ctx.Done()] ==> return +// . <> +// go func() [G3] . +// . <-ch1 +// ------------------------------G3 leaks---------------------------------------------- +// + +func Cockroach10790() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + r := NewReplica_cockroach10790() + ctx, cancel := context.WithCancel(context.Background()) + go r.sendChans(ctx) // helper goroutine + go r.beginCmds(ctx) // G1 + go cancel() // G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go new file mode 100644 index 00000000000000..a0a9a792676692 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go @@ -0,0 +1,82 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/13197 + * Buggy version: fff27aedabafe20cef57f75905fe340cab48c2a4 + * fix commit-id: 9bf770cd8f6eaff5441b80d3aec1a5614e8747e1 + * Flaky: 100/100 + * Description: One goroutine executing (*Tx).awaitDone() blocks + * waiting for a signal over context.Done() that never comes. + */ +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Cockroach13197", Cockroach13197) +} + +type DB_cockroach13197 struct{} + +func (db *DB_cockroach13197) begin(ctx context.Context) *Tx_cockroach13197 { + ctx, cancel := context.WithCancel(ctx) + tx := &Tx_cockroach13197{ + cancel: cancel, + ctx: ctx, + } + go tx.awaitDone() // G2 + return tx +} + +type Tx_cockroach13197 struct { + cancel context.CancelFunc + ctx context.Context +} + +func (tx *Tx_cockroach13197) awaitDone() { + <-tx.ctx.Done() +} + +func (tx *Tx_cockroach13197) Rollback() { + tx.rollback() +} + +func (tx *Tx_cockroach13197) rollback() { + tx.close() +} + +func (tx *Tx_cockroach13197) close() { + tx.cancel() +} + +// Example of goroutine leak trace: +// +// G1 G2 +//-------------------------------- +// begin() +// . awaitDone() +// <> . +// <-tx.ctx.Done() +//------------G2 leak------------- + +func Cockroach13197() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + db := &DB_cockroach13197{} + db.begin(context.Background()) // G1 +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go new file mode 100644 index 00000000000000..5ef6fa1e28a06d --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go @@ -0,0 +1,66 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/13755 + * Buggy version: 7acb881bbb8f23e87b69fce9568d9a3316b5259c + * fix commit-id: ef906076adc1d0e3721944829cfedfed51810088 + * Flaky: 100/100 + */ + +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Cockroach13755", Cockroach13755) +} + +type Rows_cockroach13755 struct { + cancel context.CancelFunc +} + +func (rs *Rows_cockroach13755) initContextClose(ctx context.Context) { + ctx, rs.cancel = context.WithCancel(ctx) + go rs.awaitDone(ctx) +} + +func (rs *Rows_cockroach13755) awaitDone(ctx context.Context) { + <-ctx.Done() + rs.close(ctx.Err()) +} + +func (rs *Rows_cockroach13755) close(err error) { + rs.cancel() +} + +// Example of goroutine leak trace: +// +// G1 G2 +//---------------------------------------- +// initContextClose() +// . awaitDone() +// <> . +// <-tx.ctx.Done() +//----------------G2 leak----------------- + +func Cockroach13755() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + rs := &Rows_cockroach13755{} + rs.initContextClose(context.Background()) +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go new file mode 100644 index 00000000000000..108d7884a3d82b --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go @@ -0,0 +1,167 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach1462", Cockroach1462) +} + +type Stopper_cockroach1462 struct { + stopper chan struct{} + stopped chan struct{} + stop sync.WaitGroup + mu sync.Mutex + drain *sync.Cond + draining bool + numTasks int +} + +func NewStopper_cockroach1462() *Stopper_cockroach1462 { + s := &Stopper_cockroach1462{ + stopper: make(chan struct{}), + stopped: make(chan struct{}), + } + s.drain = sync.NewCond(&s.mu) + return s +} + +func (s *Stopper_cockroach1462) RunWorker(f func()) { + s.AddWorker() + go func() { // G2, G3 + defer s.SetStopped() + f() + }() +} + +func (s *Stopper_cockroach1462) AddWorker() { + s.stop.Add(1) +} +func (s *Stopper_cockroach1462) StartTask() bool { + s.mu.Lock() + runtime.Gosched() + defer s.mu.Unlock() + if s.draining { + return false + } + s.numTasks++ + return true +} + +func (s *Stopper_cockroach1462) FinishTask() { + s.mu.Lock() + runtime.Gosched() + defer s.mu.Unlock() + s.numTasks-- + s.drain.Broadcast() +} +func (s *Stopper_cockroach1462) SetStopped() { + if s != nil { + s.stop.Done() + } +} +func (s *Stopper_cockroach1462) ShouldStop() <-chan struct{} { + if s == nil { + return nil + } + return s.stopper +} + +func (s *Stopper_cockroach1462) Quiesce() { + s.mu.Lock() + runtime.Gosched() + defer s.mu.Unlock() + s.draining = true + for s.numTasks > 0 { + // Unlock s.mu, wait for the signal, and lock s.mu. + s.drain.Wait() + } +} + +func (s *Stopper_cockroach1462) Stop() { + s.Quiesce() + close(s.stopper) + s.stop.Wait() + s.mu.Lock() + runtime.Gosched() + defer s.mu.Unlock() + close(s.stopped) +} + +type interceptMessage_cockroach1462 int + +type localInterceptableTransport_cockroach1462 struct { + mu sync.Mutex + Events chan interceptMessage_cockroach1462 + stopper *Stopper_cockroach1462 +} + +func (lt *localInterceptableTransport_cockroach1462) Close() {} + +type Transport_cockroach1462 interface { + Close() +} + +func NewLocalInterceptableTransport_cockroach1462(stopper *Stopper_cockroach1462) Transport_cockroach1462 { + lt := &localInterceptableTransport_cockroach1462{ + Events: make(chan interceptMessage_cockroach1462), + stopper: stopper, + } + lt.start() + return lt +} + +func (lt *localInterceptableTransport_cockroach1462) start() { + lt.stopper.RunWorker(func() { + for { + select { + case <-lt.stopper.ShouldStop(): + return + default: + lt.Events <- interceptMessage_cockroach1462(0) + } + } + }) +} + +func processEventsUntil_cockroach1462(ch <-chan interceptMessage_cockroach1462, stopper *Stopper_cockroach1462) { + for { + select { + case _, ok := <-ch: + runtime.Gosched() + if !ok { + return + } + case <-stopper.ShouldStop(): + return + } + } +} + +func Cockroach1462() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(2000 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i <= 1000; i++ { + go func() { // G1 + stopper := NewStopper_cockroach1462() + transport := NewLocalInterceptableTransport_cockroach1462(stopper).(*localInterceptableTransport_cockroach1462) + stopper.RunWorker(func() { + processEventsUntil_cockroach1462(transport.Events, stopper) + }) + stopper.Stop() + }() + } +} + diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go new file mode 100644 index 00000000000000..4cd14c7a5b3ac5 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go @@ -0,0 +1,108 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/16167 + * Buggy version: 36fa784aa846b46c29e077634c4e362635f6e74a + * fix commit-id: d064942b067ab84628f79cbfda001fa3138d8d6e + * Flaky: 1/100 + */ + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach16167", Cockroach16167) +} + +type PreparedStatements_cockroach16167 struct { + session *Session_cockroach16167 +} + +func (ps PreparedStatements_cockroach16167) New(e *Executor_cockroach16167) { + e.Prepare(ps.session) +} + +type Session_cockroach16167 struct { + PreparedStatements PreparedStatements_cockroach16167 +} + +func (s *Session_cockroach16167) resetForBatch(e *Executor_cockroach16167) { + e.getDatabaseCache() +} + +type Executor_cockroach16167 struct { + systemConfigCond *sync.Cond + systemConfigMu sync.RWMutex // L1 +} + +func (e *Executor_cockroach16167) Start() { + e.updateSystemConfig() +} + +func (e *Executor_cockroach16167) execParsed(session *Session_cockroach16167) { + e.systemConfigCond.L.Lock() // Same as e.systemConfigMu.RLock() + runtime.Gosched() + defer e.systemConfigCond.L.Unlock() + runTxnAttempt_cockroach16167(e, session) +} + +func (e *Executor_cockroach16167) execStmtsInCurrentTxn(session *Session_cockroach16167) { + e.execStmtInOpenTxn(session) +} + +func (e *Executor_cockroach16167) execStmtInOpenTxn(session *Session_cockroach16167) { + session.PreparedStatements.New(e) +} + +func (e *Executor_cockroach16167) Prepare(session *Session_cockroach16167) { + session.resetForBatch(e) +} + +func (e *Executor_cockroach16167) getDatabaseCache() { + e.systemConfigMu.RLock() + defer e.systemConfigMu.RUnlock() +} + +func (e *Executor_cockroach16167) updateSystemConfig() { + e.systemConfigMu.Lock() + runtime.Gosched() + defer e.systemConfigMu.Unlock() +} + +func runTxnAttempt_cockroach16167(e *Executor_cockroach16167, session *Session_cockroach16167) { + e.execStmtsInCurrentTxn(session) +} + +func NewExectorAndSession_cockroach16167() (*Executor_cockroach16167, *Session_cockroach16167) { + session := &Session_cockroach16167{} + session.PreparedStatements = PreparedStatements_cockroach16167{session} + e := &Executor_cockroach16167{} + return e, session +} + +func Cockroach16167() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { // G1 + e, s := NewExectorAndSession_cockroach16167() + e.systemConfigCond = sync.NewCond(e.systemConfigMu.RLocker()) + go e.Start() // G2 + e.execParsed(s) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go new file mode 100644 index 00000000000000..17b03203304ac7 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go @@ -0,0 +1,60 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/18101 + * Buggy version: f7a8e2f57b6bcf00b9abaf3da00598e4acd3a57f + * fix commit-id: 822bd176cc725c6b50905ea615023200b395e14f + * Flaky: 100/100 + */ + +package main + +import ( + "context" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Cockroach18101", Cockroach18101) +} + +const chanSize_cockroach18101 = 6 + +func restore_cockroach18101(ctx context.Context) bool { + readyForImportCh := make(chan bool, chanSize_cockroach18101) + go func() { // G2 + defer close(readyForImportCh) + splitAndScatter_cockroach18101(ctx, readyForImportCh) + }() + for readyForImportSpan := range readyForImportCh { + select { + case <-ctx.Done(): + return readyForImportSpan + } + } + return true +} + +func splitAndScatter_cockroach18101(ctx context.Context, readyForImportCh chan bool) { + for i := 0; i < chanSize_cockroach18101+2; i++ { + readyForImportCh <- (false || i != 0) + } +} + +func Cockroach18101() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + ctx, cancel := context.WithCancel(context.Background()) + go restore_cockroach18101(ctx) // G1 + go cancel() // helper goroutine + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go new file mode 100644 index 00000000000000..a7544bc8a46a18 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go @@ -0,0 +1,125 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "time" +) + +func init() { + register("Cockroach2448", Cockroach2448) +} + +type Stopper_cockroach2448 struct { + Done chan bool +} + +func (s *Stopper_cockroach2448) ShouldStop() <-chan bool { + return s.Done +} + +type EventMembershipChangeCommitted_cockroach2448 struct { + Callback func() +} + +type MultiRaft_cockroach2448 struct { + stopper *Stopper_cockroach2448 + Events chan interface{} + callbackChan chan func() +} + +// sendEvent can be invoked many times +func (m *MultiRaft_cockroach2448) sendEvent(event interface{}) { + select { + case m.Events <- event: // Waiting for events consumption + case <-m.stopper.ShouldStop(): + } +} + +type state_cockroach2448 struct { + *MultiRaft_cockroach2448 +} + +func (s *state_cockroach2448) start() { + for { + select { + case <-s.stopper.ShouldStop(): + return + case cb := <-s.callbackChan: + cb() + default: + s.handleWriteResponse() + time.Sleep(100 * time.Microsecond) + } + } +} + +func (s *state_cockroach2448) handleWriteResponse() { + s.sendEvent(&EventMembershipChangeCommitted_cockroach2448{ + Callback: func() { + select { + case s.callbackChan <- func() { // Waiting for callbackChan consumption + time.Sleep(time.Nanosecond) + }: + case <-s.stopper.ShouldStop(): + } + }, + }) +} + +type Store_cockroach2448 struct { + multiraft *MultiRaft_cockroach2448 +} + +func (s *Store_cockroach2448) processRaft() { + for { + select { + case e := <-s.multiraft.Events: + switch e := e.(type) { + case *EventMembershipChangeCommitted_cockroach2448: + callback := e.Callback + runtime.Gosched() + if callback != nil { + callback() // Waiting for callbackChan consumption + } + } + case <-s.multiraft.stopper.ShouldStop(): + return + } + } +} + +func NewStoreAndState_cockroach2448() (*Store_cockroach2448, *state_cockroach2448) { + stopper := &Stopper_cockroach2448{ + Done: make(chan bool), + } + mltrft := &MultiRaft_cockroach2448{ + stopper: stopper, + Events: make(chan interface{}), + callbackChan: make(chan func()), + } + st := &state_cockroach2448{mltrft} + s := &Store_cockroach2448{mltrft} + return s, st +} + +func Cockroach2448() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 1000; i++ { + go func() { + s, st := NewStoreAndState_cockroach2448() + go s.processRaft() // G1 + go st.start() // G2 + }() + } +} + diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go new file mode 100644 index 00000000000000..a916d3c928ee2e --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go @@ -0,0 +1,78 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Cockroach24808", Cockroach24808) +} + +type Compactor_cockroach24808 struct { + ch chan struct{} +} + +type Stopper_cockroach24808 struct { + stop sync.WaitGroup + stopper chan struct{} +} + +func (s *Stopper_cockroach24808) RunWorker(ctx context.Context, f func(context.Context)) { + s.stop.Add(1) + go func() { + defer s.stop.Done() + f(ctx) + }() +} + +func (s *Stopper_cockroach24808) ShouldStop() <-chan struct{} { + if s == nil { + return nil + } + return s.stopper +} + +func (s *Stopper_cockroach24808) Stop() { + close(s.stopper) +} + +func (c *Compactor_cockroach24808) Start(ctx context.Context, stopper *Stopper_cockroach24808) { + c.ch <- struct{}{} + stopper.RunWorker(ctx, func(ctx context.Context) { + for { + select { + case <-stopper.ShouldStop(): + return + case <-c.ch: + } + } + }) +} + +func Cockroach24808() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { // G1 + stopper := &Stopper_cockroach24808{stopper: make(chan struct{})} + defer stopper.Stop() + + compactor := &Compactor_cockroach24808{ch: make(chan struct{}, 1)} + compactor.ch <- struct{}{} + + compactor.Start(context.Background(), stopper) + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go new file mode 100644 index 00000000000000..b9259c9f918293 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go @@ -0,0 +1,92 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Cockroach25456", Cockroach25456) +} + +type Stopper_cockroach25456 struct { + quiescer chan struct{} +} + +func (s *Stopper_cockroach25456) ShouldQuiesce() <-chan struct{} { + if s == nil { + return nil + } + return s.quiescer +} + +func NewStopper_cockroach25456() *Stopper_cockroach25456 { + return &Stopper_cockroach25456{quiescer: make(chan struct{})} +} + +type Store_cockroach25456 struct { + stopper *Stopper_cockroach25456 + consistencyQueue *consistencyQueue_cockroach25456 +} + +func (s *Store_cockroach25456) Stopper() *Stopper_cockroach25456 { + return s.stopper +} + +type Replica_cockroach25456 struct { + store *Store_cockroach25456 +} + +func NewReplica_cockroach25456(store *Store_cockroach25456) *Replica_cockroach25456 { + return &Replica_cockroach25456{store: store} +} + +type consistencyQueue_cockroach25456 struct{} + +func (q *consistencyQueue_cockroach25456) process(repl *Replica_cockroach25456) { + <-repl.store.Stopper().ShouldQuiesce() +} + +func newConsistencyQueue_cockroach25456() *consistencyQueue_cockroach25456 { + return &consistencyQueue_cockroach25456{} +} + +type testContext_cockroach25456 struct { + store *Store_cockroach25456 + repl *Replica_cockroach25456 +} + +func (tc *testContext_cockroach25456) StartWithStoreConfig(stopper *Stopper_cockroach25456) { + if tc.store == nil { + tc.store = &Store_cockroach25456{ + consistencyQueue: newConsistencyQueue_cockroach25456(), + } + } + tc.store.stopper = stopper + tc.repl = NewReplica_cockroach25456(tc.store) +} + +func Cockroach25456() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { // G1 + stopper := NewStopper_cockroach25456() + tc := testContext_cockroach25456{} + tc.StartWithStoreConfig(stopper) + + for i := 0; i < 2; i++ { + tc.store.consistencyQueue.process(tc.repl) + } + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go new file mode 100644 index 00000000000000..f00a7bd46259eb --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go @@ -0,0 +1,124 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "sync/atomic" +) + +func init() { + register("Cockroach35073", Cockroach35073) +} + +type ConsumerStatus_cockroach35073 uint32 + +const ( + NeedMoreRows_cockroach35073 ConsumerStatus_cockroach35073 = iota + DrainRequested_cockroach35073 + ConsumerClosed_cockroach35073 +) + +const rowChannelBufSize_cockroach35073 = 16 +const outboxBufRows_cockroach35073 = 16 + +type rowSourceBase_cockroach35073 struct { + consumerStatus ConsumerStatus_cockroach35073 +} + +func (rb *rowSourceBase_cockroach35073) consumerClosed() { + atomic.StoreUint32((*uint32)(&rb.consumerStatus), uint32(ConsumerClosed_cockroach35073)) +} + +type RowChannelMsg_cockroach35073 int + +type RowChannel_cockroach35073 struct { + rowSourceBase_cockroach35073 + dataChan chan RowChannelMsg_cockroach35073 +} + +func (rc *RowChannel_cockroach35073) ConsumerClosed() { + rc.consumerClosed() + select { + case <-rc.dataChan: + default: + } +} + +func (rc *RowChannel_cockroach35073) Push() ConsumerStatus_cockroach35073 { + consumerStatus := ConsumerStatus_cockroach35073( + atomic.LoadUint32((*uint32)(&rc.consumerStatus))) + switch consumerStatus { + case NeedMoreRows_cockroach35073: + rc.dataChan <- RowChannelMsg_cockroach35073(0) + case DrainRequested_cockroach35073: + case ConsumerClosed_cockroach35073: + } + return consumerStatus +} + +func (rc *RowChannel_cockroach35073) InitWithNumSenders() { + rc.initWithBufSizeAndNumSenders(rowChannelBufSize_cockroach35073) +} + +func (rc *RowChannel_cockroach35073) initWithBufSizeAndNumSenders(chanBufSize int) { + rc.dataChan = make(chan RowChannelMsg_cockroach35073, chanBufSize) +} + +type outbox_cockroach35073 struct { + RowChannel_cockroach35073 +} + +func (m *outbox_cockroach35073) init() { + m.RowChannel_cockroach35073.InitWithNumSenders() +} + +func (m *outbox_cockroach35073) start(wg *sync.WaitGroup) { + if wg != nil { + wg.Add(1) + } + go m.run(wg) +} + +func (m *outbox_cockroach35073) run(wg *sync.WaitGroup) { + if wg != nil { + wg.Done() + } +} + +func Cockroach35073() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + outbox := &outbox_cockroach35073{} + outbox.init() + + var wg sync.WaitGroup + for i := 0; i < outboxBufRows_cockroach35073; i++ { + outbox.Push() + } + + var blockedPusherWg sync.WaitGroup + blockedPusherWg.Add(1) + go func() { + outbox.Push() + blockedPusherWg.Done() + }() + + outbox.start(&wg) + + wg.Wait() + outbox.RowChannel_cockroach35073.Push() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go new file mode 100644 index 00000000000000..9ddcda1b6242bf --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go @@ -0,0 +1,135 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Cockroach35931", Cockroach35931) +} + +type RowReceiver_cockroach35931 interface { + Push() +} + +type inboundStreamInfo_cockroach35931 struct { + receiver RowReceiver_cockroach35931 +} + +type RowChannel_cockroach35931 struct { + dataChan chan struct{} +} + +func (rc *RowChannel_cockroach35931) Push() { + // The buffer size can be either 0 or 1 when this function is entered. + // We need context sensitivity or a path-condition on the buffer size + // to find this bug. + rc.dataChan <- struct{}{} +} + +func (rc *RowChannel_cockroach35931) initWithBufSizeAndNumSenders(chanBufSize int) { + rc.dataChan = make(chan struct{}, chanBufSize) +} + +type flowEntry_cockroach35931 struct { + flow *Flow_cockroach35931 + inboundStreams map[int]*inboundStreamInfo_cockroach35931 +} + +type flowRegistry_cockroach35931 struct { + sync.Mutex + flows map[int]*flowEntry_cockroach35931 +} + +func (fr *flowRegistry_cockroach35931) getEntryLocked(id int) *flowEntry_cockroach35931 { + entry, ok := fr.flows[id] + if !ok { + entry = &flowEntry_cockroach35931{} + fr.flows[id] = entry + } + return entry +} + +func (fr *flowRegistry_cockroach35931) cancelPendingStreamsLocked(id int) []RowReceiver_cockroach35931 { + entry := fr.flows[id] + pendingReceivers := make([]RowReceiver_cockroach35931, 0) + for _, is := range entry.inboundStreams { + pendingReceivers = append(pendingReceivers, is.receiver) + } + return pendingReceivers +} + +type Flow_cockroach35931 struct { + id int + flowRegistry *flowRegistry_cockroach35931 + inboundStreams map[int]*inboundStreamInfo_cockroach35931 +} + +func (f *Flow_cockroach35931) cancel() { + f.flowRegistry.Lock() + timedOutReceivers := f.flowRegistry.cancelPendingStreamsLocked(f.id) + f.flowRegistry.Unlock() + + for _, receiver := range timedOutReceivers { + receiver.Push() + } +} + +func (fr *flowRegistry_cockroach35931) RegisterFlow(f *Flow_cockroach35931, inboundStreams map[int]*inboundStreamInfo_cockroach35931) { + entry := fr.getEntryLocked(f.id) + entry.flow = f + entry.inboundStreams = inboundStreams +} + +func makeFlowRegistry_cockroach35931() *flowRegistry_cockroach35931 { + return &flowRegistry_cockroach35931{ + flows: make(map[int]*flowEntry_cockroach35931), + } +} + +func Cockroach35931() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + fr := makeFlowRegistry_cockroach35931() + + left := &RowChannel_cockroach35931{} + left.initWithBufSizeAndNumSenders(1) + right := &RowChannel_cockroach35931{} + right.initWithBufSizeAndNumSenders(1) + + inboundStreams := map[int]*inboundStreamInfo_cockroach35931{ + 0: { + receiver: left, + }, + 1: { + receiver: right, + }, + } + + left.Push() + + flow := &Flow_cockroach35931{ + id: 0, + flowRegistry: fr, + inboundStreams: inboundStreams, + } + + fr.RegisterFlow(flow, inboundStreams) + + flow.cancel() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go new file mode 100644 index 00000000000000..e419cd2fc32019 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go @@ -0,0 +1,122 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/3710 + * Buggy version: 4afdd4860fd7c3bd9e92489f84a95e5cc7d11a0d + * fix commit-id: cb65190f9caaf464723e7d072b1f1b69a044ef7b + * Flaky: 2/100 + */ + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" + "unsafe" +) + +func init() { + register("Cockroach3710", Cockroach3710) +} + +type Store_cockroach3710 struct { + raftLogQueue *baseQueue + replicas map[int]*Replica_cockroach3710 + + mu struct { + sync.RWMutex + } +} + +func (s *Store_cockroach3710) ForceRaftLogScanAndProcess() { + s.mu.RLock() + runtime.Gosched() + for _, r := range s.replicas { + s.raftLogQueue.MaybeAdd(r) + } + s.mu.RUnlock() +} + +func (s *Store_cockroach3710) RaftStatus() { + s.mu.RLock() + defer s.mu.RUnlock() +} + +func (s *Store_cockroach3710) processRaft() { + go func() { + for { + var replicas []*Replica_cockroach3710 + s.mu.Lock() + for _, r := range s.replicas { + replicas = append(replicas, r) + } + s.mu.Unlock() + break + } + }() +} + +type Replica_cockroach3710 struct { + store *Store_cockroach3710 +} + +type baseQueue struct { + sync.Mutex + impl *raftLogQueue +} + +func (bq *baseQueue) MaybeAdd(repl *Replica_cockroach3710) { + bq.Lock() + defer bq.Unlock() + bq.impl.shouldQueue(repl) +} + +type raftLogQueue struct{} + +func (*raftLogQueue) shouldQueue(r *Replica_cockroach3710) { + getTruncatableIndexes(r) +} + +func getTruncatableIndexes(r *Replica_cockroach3710) { + r.store.RaftStatus() +} + +func NewStore_cockroach3710() *Store_cockroach3710 { + rlq := &raftLogQueue{} + bq := &baseQueue{impl: rlq} + store := &Store_cockroach3710{ + raftLogQueue: bq, + replicas: make(map[int]*Replica_cockroach3710), + } + r1 := &Replica_cockroach3710{store} + r2 := &Replica_cockroach3710{store} + + makeKey := func(r *Replica_cockroach3710) int { + return int((uintptr(unsafe.Pointer(r)) >> 1) % 7) + } + store.replicas[makeKey(r1)] = r1 + store.replicas[makeKey(r2)] = r2 + + return store +} + +func Cockroach3710() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 10000; i++ { + go func() { + store := NewStore_cockroach3710() + go store.ForceRaftLogScanAndProcess() // G1 + go store.processRaft() // G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go new file mode 100644 index 00000000000000..33f7ba7a45ec9a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go @@ -0,0 +1,62 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Cockroach584", Cockroach584) +} + +type gossip_cockroach584 struct { + mu sync.Mutex // L1 + closed bool +} + +func (g *gossip_cockroach584) bootstrap() { + for { + g.mu.Lock() + if g.closed { + // Missing g.mu.Unlock + break + } + g.mu.Unlock() + } +} + +func (g *gossip_cockroach584) manage() { + for { + g.mu.Lock() + if g.closed { + // Missing g.mu.Unlock + break + } + g.mu.Unlock() + } +} + +func Cockroach584() { + prof := pprof.Lookup("goroutineleak") + defer func() { + for i := 0; i < yieldCount; i++ { + // Yield several times to allow the child goroutine to run. + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + g := &gossip_cockroach584{ + closed: true, + } + go func() { // G1 + g.bootstrap() + g.manage() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go new file mode 100644 index 00000000000000..80f1dd504daae5 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/6181 + * Buggy version: c0a232b5521565904b851699853bdbd0c670cf1e + * fix commit-id: d5814e4886a776bf7789b3c51b31f5206480d184 + * Flaky: 57/100 + */ +package main + +import ( + "io" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach6181", Cockroach6181) +} + +type testDescriptorDB_cockroach6181 struct { + cache *rangeDescriptorCache_cockroach6181 +} + +func initTestDescriptorDB_cockroach6181() *testDescriptorDB_cockroach6181 { + return &testDescriptorDB_cockroach6181{&rangeDescriptorCache_cockroach6181{}} +} + +type rangeDescriptorCache_cockroach6181 struct { + rangeCacheMu sync.RWMutex +} + +func (rdc *rangeDescriptorCache_cockroach6181) LookupRangeDescriptor() { + rdc.rangeCacheMu.RLock() + runtime.Gosched() + io.Discard.Write([]byte(rdc.String())) + rdc.rangeCacheMu.RUnlock() + rdc.rangeCacheMu.Lock() + rdc.rangeCacheMu.Unlock() +} + +func (rdc *rangeDescriptorCache_cockroach6181) String() string { + rdc.rangeCacheMu.RLock() + defer rdc.rangeCacheMu.RUnlock() + return rdc.stringLocked() +} + +func (rdc *rangeDescriptorCache_cockroach6181) stringLocked() string { + return "something here" +} + +func doLookupWithToken_cockroach6181(rc *rangeDescriptorCache_cockroach6181) { + rc.LookupRangeDescriptor() +} + +func testRangeCacheCoalescedRequests_cockroach6181() { + db := initTestDescriptorDB_cockroach6181() + pauseLookupResumeAndAssert := func() { + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { // G2,G3,... + doLookupWithToken_cockroach6181(db.cache) + wg.Done() + }() + } + wg.Wait() + } + pauseLookupResumeAndAssert() +} + +func Cockroach6181() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go testRangeCacheCoalescedRequests_cockroach6181() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go new file mode 100644 index 00000000000000..945308a76f92b1 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go @@ -0,0 +1,183 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/7504 + * Buggy version: bc963b438cdc3e0ad058a5282358e5aee0595e17 + * fix commit-id: cab761b9f5ee5dee1448bc5d6b1d9f5a0ff0bad5 + * Flaky: 1/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach7504", Cockroach7504) +} + +func MakeCacheKey_cockroach7504(lease *LeaseState_cockroach7504) int { + return lease.id +} + +type LeaseState_cockroach7504 struct { + mu sync.Mutex // L1 + id int +} +type LeaseSet_cockroach7504 struct { + data []*LeaseState_cockroach7504 +} + +func (l *LeaseSet_cockroach7504) find(id int) *LeaseState_cockroach7504 { + return l.data[id] +} + +func (l *LeaseSet_cockroach7504) remove(s *LeaseState_cockroach7504) { + for i := 0; i < len(l.data); i++ { + if s == l.data[i] { + l.data = append(l.data[:i], l.data[i+1:]...) + break + } + } +} + +type tableState_cockroach7504 struct { + tableNameCache *tableNameCache_cockroach7504 + mu sync.Mutex // L3 + active *LeaseSet_cockroach7504 +} + +func (t *tableState_cockroach7504) release(lease *LeaseState_cockroach7504) { + t.mu.Lock() // L3 + defer t.mu.Unlock() // L3 + + s := t.active.find(MakeCacheKey_cockroach7504(lease)) + s.mu.Lock() // L1 + runtime.Gosched() + defer s.mu.Unlock() // L1 + + t.removeLease(s) +} +func (t *tableState_cockroach7504) removeLease(lease *LeaseState_cockroach7504) { + t.active.remove(lease) + t.tableNameCache.remove(lease) // L1 acquire/release +} + +type tableNameCache_cockroach7504 struct { + mu sync.Mutex // L2 + tables map[int]*LeaseState_cockroach7504 +} + +func (c *tableNameCache_cockroach7504) get(id int) { + c.mu.Lock() // L2 + defer c.mu.Unlock() // L2 + lease, ok := c.tables[id] + if !ok { + return + } + if lease == nil { + panic("nil lease in name cache") + } + lease.mu.Lock() // L1 + defer lease.mu.Unlock() // L1 +} + +func (c *tableNameCache_cockroach7504) remove(lease *LeaseState_cockroach7504) { + c.mu.Lock() // L2 + runtime.Gosched() + defer c.mu.Unlock() // L2 + key := MakeCacheKey_cockroach7504(lease) + existing, ok := c.tables[key] + if !ok { + return + } + if existing == lease { + delete(c.tables, key) + } +} + +type LeaseManager_cockroach7504 struct { + _ [64]byte + tableNames *tableNameCache_cockroach7504 + tables map[int]*tableState_cockroach7504 +} + +func (m *LeaseManager_cockroach7504) AcquireByName(id int) { + m.tableNames.get(id) +} + +func (m *LeaseManager_cockroach7504) findTableState(lease *LeaseState_cockroach7504) *tableState_cockroach7504 { + existing, ok := m.tables[lease.id] + if !ok { + return nil + } + return existing +} + +func (m *LeaseManager_cockroach7504) Release(lease *LeaseState_cockroach7504) { + t := m.findTableState(lease) + t.release(lease) +} +func NewLeaseManager_cockroach7504(tname *tableNameCache_cockroach7504, ts *tableState_cockroach7504) *LeaseManager_cockroach7504 { + mgr := &LeaseManager_cockroach7504{ + tableNames: tname, + tables: make(map[int]*tableState_cockroach7504), + } + mgr.tables[0] = ts + return mgr +} +func NewLeaseSet_cockroach7504(n int) *LeaseSet_cockroach7504 { + lset := &LeaseSet_cockroach7504{} + for i := 0; i < n; i++ { + lease := new(LeaseState_cockroach7504) + lset.data = append(lset.data, lease) + } + return lset +} + +func Cockroach7504() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go func() { + leaseNum := 2 + lset := NewLeaseSet_cockroach7504(leaseNum) + + nc := &tableNameCache_cockroach7504{ + tables: make(map[int]*LeaseState_cockroach7504), + } + for i := 0; i < leaseNum; i++ { + nc.tables[i] = lset.find(i) + } + + ts := &tableState_cockroach7504{ + tableNameCache: nc, + active: lset, + } + + mgr := NewLeaseManager_cockroach7504(nc, ts) + + // G1 + go func() { + // lock L2-L1 + mgr.AcquireByName(0) + }() + + // G2 + go func() { + // lock L1-L2 + mgr.Release(lset.find(0)) + }() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go new file mode 100644 index 00000000000000..e143a6670d8ff2 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go @@ -0,0 +1,77 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/9935 + * Buggy version: 4df302cc3f03328395dc3fefbfba58b7718e4f2f + * fix commit-id: ed6a100ba38dd51b0888b9a3d3ac6bdbb26c528c + * Flaky: 100/100 + * Description: This leak is caused by acquiring l.mu.Lock() twice. The fix is + * to release l.mu.Lock() before acquiring l.mu.Lock for the second time. + */ +package main + +import ( + "errors" + "math/rand" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach9935", Cockroach9935) +} + +type loggingT_cockroach9935 struct { + mu sync.Mutex +} + +func (l *loggingT_cockroach9935) outputLogEntry() { + l.mu.Lock() + if err := l.createFile(); err != nil { + l.exit(err) + } + l.mu.Unlock() +} + +func (l *loggingT_cockroach9935) createFile() error { + if rand.Intn(8)%4 > 0 { + return errors.New("") + } + return nil +} + +func (l *loggingT_cockroach9935) exit(err error) { + l.mu.Lock() // Blocked forever + defer l.mu.Unlock() +} + +// Example of goroutine leak trace: +// +// G1 +//---------------------------- +// l.outputLogEntry() +// l.mu.Lock() +// l.createFile() +// l.exit() +// l.mu.Lock() +//-----------G1 leaks--------- + +func Cockroach9935() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + l := &loggingT_cockroach9935{} + go l.outputLogEntry() // G1 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go new file mode 100644 index 00000000000000..7d56642d5e9ed9 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go @@ -0,0 +1,72 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Etcd10492", Etcd10492) +} + +type Checkpointer_etcd10492 func(ctx context.Context) + +type lessor_etcd10492 struct { + mu sync.RWMutex + cp Checkpointer_etcd10492 + checkpointInterval time.Duration +} + +func (le *lessor_etcd10492) Checkpoint() { + le.mu.Lock() // Lock acquired twice here + defer le.mu.Unlock() +} + +func (le *lessor_etcd10492) SetCheckpointer(cp Checkpointer_etcd10492) { + le.mu.Lock() + defer le.mu.Unlock() + + le.cp = cp +} + +func (le *lessor_etcd10492) Renew() { + le.mu.Lock() + unlock := func() { le.mu.Unlock() } + defer func() { unlock() }() + + if le.cp != nil { + le.cp(context.Background()) + } +} + +func Etcd10492() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { // G1 + le := &lessor_etcd10492{ + checkpointInterval: 0, + } + fakerCheckerpointer_etcd10492 := func(ctx context.Context) { + le.Checkpoint() + } + le.SetCheckpointer(fakerCheckerpointer_etcd10492) + le.mu.Lock() + le.mu.Unlock() + le.Renew() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go new file mode 100644 index 00000000000000..868e926e66949a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go @@ -0,0 +1,126 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "io" + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Etcd5509", Etcd5509) +} + +var ErrConnClosed_etcd5509 error + +type Client_etcd5509 struct { + mu sync.RWMutex + ctx context.Context + cancel context.CancelFunc +} + +func (c *Client_etcd5509) Close() { + c.mu.Lock() + defer c.mu.Unlock() + if c.cancel == nil { + return + } + c.cancel() + c.cancel = nil + c.mu.Unlock() + c.mu.Lock() +} + +type remoteClient_etcd5509 struct { + client *Client_etcd5509 + mu sync.Mutex +} + +func (r *remoteClient_etcd5509) acquire(ctx context.Context) error { + for { + r.client.mu.RLock() + closed := r.client.cancel == nil + r.mu.Lock() + r.mu.Unlock() + if closed { + return ErrConnClosed_etcd5509 // Missing RUnlock before return + } + r.client.mu.RUnlock() + } +} + +type kv_etcd5509 struct { + rc *remoteClient_etcd5509 +} + +func (kv *kv_etcd5509) Get(ctx context.Context) error { + return kv.Do(ctx) +} + +func (kv *kv_etcd5509) Do(ctx context.Context) error { + for { + err := kv.do(ctx) + if err == nil { + return nil + } + return err + } +} + +func (kv *kv_etcd5509) do(ctx context.Context) error { + err := kv.getRemote(ctx) + return err +} + +func (kv *kv_etcd5509) getRemote(ctx context.Context) error { + return kv.rc.acquire(ctx) +} + +type KV interface { + Get(ctx context.Context) error + Do(ctx context.Context) error +} + +func NewKV_etcd5509(c *Client_etcd5509) KV { + return &kv_etcd5509{rc: &remoteClient_etcd5509{ + client: c, + }} +} + +func Etcd5509() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + ctx, _ := context.WithCancel(context.TODO()) + cli := &Client_etcd5509{ + ctx: ctx, + } + kv := NewKV_etcd5509(cli) + donec := make(chan struct{}) + go func() { + defer close(donec) + err := kv.Get(context.TODO()) + if err != nil && err != ErrConnClosed_etcd5509 { + io.Discard.Write([]byte("Expect ErrConnClosed")) + } + }() + + runtime.Gosched() + cli.Close() + + <-donec + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go new file mode 100644 index 00000000000000..afbbe35104bbb8 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go @@ -0,0 +1,100 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Etcd6708", Etcd6708) +} + +type EndpointSelectionMode_etcd6708 int + +const ( + EndpointSelectionRandom_etcd6708 EndpointSelectionMode_etcd6708 = iota + EndpointSelectionPrioritizeLeader_etcd6708 +) + +type MembersAPI_etcd6708 interface { + Leader(ctx context.Context) +} + +type Client_etcd6708 interface { + Sync(ctx context.Context) + SetEndpoints() + httpClient_etcd6708 +} + +type httpClient_etcd6708 interface { + Do(context.Context) +} + +type httpClusterClient_etcd6708 struct { + sync.RWMutex + selectionMode EndpointSelectionMode_etcd6708 +} + +func (c *httpClusterClient_etcd6708) getLeaderEndpoint() { + mAPI := NewMembersAPI_etcd6708(c) + mAPI.Leader(context.Background()) +} + +func (c *httpClusterClient_etcd6708) SetEndpoints() { + switch c.selectionMode { + case EndpointSelectionRandom_etcd6708: + case EndpointSelectionPrioritizeLeader_etcd6708: + c.getLeaderEndpoint() + } +} + +func (c *httpClusterClient_etcd6708) Do(ctx context.Context) { + c.RLock() + c.RUnlock() +} + +func (c *httpClusterClient_etcd6708) Sync(ctx context.Context) { + c.Lock() + defer c.Unlock() + + c.SetEndpoints() +} + +type httpMembersAPI_etcd6708 struct { + client httpClient_etcd6708 +} + +func (m *httpMembersAPI_etcd6708) Leader(ctx context.Context) { + m.client.Do(ctx) +} + +func NewMembersAPI_etcd6708(c Client_etcd6708) MembersAPI_etcd6708 { + return &httpMembersAPI_etcd6708{ + client: c, + } +} + +func Etcd6708() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + hc := &httpClusterClient_etcd6708{ + selectionMode: EndpointSelectionPrioritizeLeader_etcd6708, + } + hc.Sync(context.Background()) + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go new file mode 100644 index 00000000000000..0798ab23d3b588 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go @@ -0,0 +1,81 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: etcd + * Issue or PR : https://github.com/etcd-io/etcd/pull/6857 + * Buggy version: 7c8f13aed7fe251e7066ed6fc1a090699c2cae0e + * fix commit-id: 7afc490c95789c408fbc256d8e790273d331c984 + * Flaky: 19/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Etcd6857", Etcd6857) +} + +type Status_etcd6857 struct{} + +type node_etcd6857 struct { + status chan chan Status_etcd6857 + stop chan struct{} + done chan struct{} +} + +func (n *node_etcd6857) Status() Status_etcd6857 { + c := make(chan Status_etcd6857) + n.status <- c + return <-c +} + +func (n *node_etcd6857) run() { + for { + select { + case c := <-n.status: + c <- Status_etcd6857{} + case <-n.stop: + close(n.done) + return + } + } +} + +func (n *node_etcd6857) Stop() { + select { + case n.stop <- struct{}{}: + case <-n.done: + return + } + <-n.done +} + +func NewNode_etcd6857() *node_etcd6857 { + return &node_etcd6857{ + status: make(chan chan Status_etcd6857), + stop: make(chan struct{}), + done: make(chan struct{}), + } +} + +func Etcd6857() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i <= 100; i++ { + go func() { + n := NewNode_etcd6857() + go n.run() // G1 + go n.Status() // G2 + go n.Stop() // G3 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go new file mode 100644 index 00000000000000..1846d0f260c2f6 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go @@ -0,0 +1,98 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: etcd + * Issue or PR : https://github.com/etcd-io/etcd/commit/7618fdd1d642e47cac70c03f637b0fd798a53a6e + * Buggy version: 377f19b0031f9c0aafe2aec28b6f9019311f52f9 + * fix commit-id: 7618fdd1d642e47cac70c03f637b0fd798a53a6e + * Flaky: 9/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Etcd6873", Etcd6873) +} + +type watchBroadcast_etcd6873 struct{} + +type watchBroadcasts_etcd6873 struct { + mu sync.Mutex + updatec chan *watchBroadcast_etcd6873 + donec chan struct{} +} + +func newWatchBroadcasts_etcd6873() *watchBroadcasts_etcd6873 { + wbs := &watchBroadcasts_etcd6873{ + updatec: make(chan *watchBroadcast_etcd6873, 1), + donec: make(chan struct{}), + } + go func() { // G2 + defer close(wbs.donec) + for wb := range wbs.updatec { + wbs.coalesce(wb) + } + }() + return wbs +} + +func (wbs *watchBroadcasts_etcd6873) coalesce(wb *watchBroadcast_etcd6873) { + wbs.mu.Lock() + wbs.mu.Unlock() +} + +func (wbs *watchBroadcasts_etcd6873) stop() { + wbs.mu.Lock() + defer wbs.mu.Unlock() + close(wbs.updatec) + <-wbs.donec +} + +func (wbs *watchBroadcasts_etcd6873) update(wb *watchBroadcast_etcd6873) { + select { + case wbs.updatec <- wb: + default: + } +} + +// Example of goroutine leak trace: +// +// G1 G2 G3 +//--------------------------------------------------------- +// newWatchBroadcasts() +// wbs.update() +// wbs.updatec <- +// return +// <-wbs.updatec +// wbs.coalesce() +// wbs.stop() +// wbs.mu.Lock() +// close(wbs.updatec) +// <-wbs.donec +// wbs.mu.Lock() +//---------------------G2,G3 leak------------------------- +// + +func Etcd6873() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + wbs := newWatchBroadcasts_etcd6873() // G1 + wbs.update(&watchBroadcast_etcd6873{}) + go wbs.stop() // G3 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go new file mode 100644 index 00000000000000..3c8d58a221cac5 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go @@ -0,0 +1,163 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: etcd + * Issue or PR : https://github.com/etcd-io/etcd/pull/7492 + * Buggy version: 51939650057d602bb5ab090633138fffe36854dc + * fix commit-id: 1b1fabef8ffec606909f01c3983300fff539f214 + * Flaky: 40/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Etcd7492", Etcd7492) +} + +type TokenProvider_etcd7492 interface { + assign() + enable() + disable() +} + +type simpleTokenTTLKeeper_etcd7492 struct { + tokens map[string]time.Time + addSimpleTokenCh chan struct{} + stopCh chan chan struct{} + deleteTokenFunc func(string) +} + +type authStore_etcd7492 struct { + tokenProvider TokenProvider_etcd7492 +} + +func (as *authStore_etcd7492) Authenticate() { + as.tokenProvider.assign() +} + +func NewSimpleTokenTTLKeeper_etcd7492(deletefunc func(string)) *simpleTokenTTLKeeper_etcd7492 { + stk := &simpleTokenTTLKeeper_etcd7492{ + tokens: make(map[string]time.Time), + addSimpleTokenCh: make(chan struct{}, 1), + stopCh: make(chan chan struct{}), + deleteTokenFunc: deletefunc, + } + go stk.run() // G1 + return stk +} + +func (tm *simpleTokenTTLKeeper_etcd7492) run() { + tokenTicker := time.NewTicker(time.Nanosecond) + defer tokenTicker.Stop() + for { + select { + case <-tm.addSimpleTokenCh: + runtime.Gosched() + /// Make tm.tokens not empty is enough + tm.tokens["1"] = time.Now() + case <-tokenTicker.C: + runtime.Gosched() + for t, _ := range tm.tokens { + tm.deleteTokenFunc(t) + delete(tm.tokens, t) + } + case waitCh := <-tm.stopCh: + waitCh <- struct{}{} + return + } + } +} + +func (tm *simpleTokenTTLKeeper_etcd7492) addSimpleToken() { + tm.addSimpleTokenCh <- struct{}{} + runtime.Gosched() +} + +func (tm *simpleTokenTTLKeeper_etcd7492) stop() { + waitCh := make(chan struct{}) + tm.stopCh <- waitCh + <-waitCh + close(tm.stopCh) +} + +type tokenSimple_etcd7492 struct { + simpleTokenKeeper *simpleTokenTTLKeeper_etcd7492 + simpleTokensMu sync.RWMutex +} + +func (t *tokenSimple_etcd7492) assign() { + t.assignSimpleTokenToUser() +} + +func (t *tokenSimple_etcd7492) assignSimpleTokenToUser() { + t.simpleTokensMu.Lock() + runtime.Gosched() + t.simpleTokenKeeper.addSimpleToken() + t.simpleTokensMu.Unlock() +} +func newDeleterFunc(t *tokenSimple_etcd7492) func(string) { + return func(tk string) { + t.simpleTokensMu.Lock() + defer t.simpleTokensMu.Unlock() + } +} + +func (t *tokenSimple_etcd7492) enable() { + t.simpleTokenKeeper = NewSimpleTokenTTLKeeper_etcd7492(newDeleterFunc(t)) +} + +func (t *tokenSimple_etcd7492) disable() { + if t.simpleTokenKeeper != nil { + t.simpleTokenKeeper.stop() + t.simpleTokenKeeper = nil + } + t.simpleTokensMu.Lock() + t.simpleTokensMu.Unlock() +} + +func newTokenProviderSimple_etcd7492() *tokenSimple_etcd7492 { + return &tokenSimple_etcd7492{} +} + +func setupAuthStore_etcd7492() (store *authStore_etcd7492, teardownfunc func()) { + as := &authStore_etcd7492{ + tokenProvider: newTokenProviderSimple_etcd7492(), + } + as.tokenProvider.enable() + tearDown := func() { + as.tokenProvider.disable() + } + return as, tearDown +} + +func Etcd7492() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go func() { + as, tearDown := setupAuthStore_etcd7492() + defer tearDown() + var wg sync.WaitGroup + wg.Add(3) + for i := 0; i < 3; i++ { + go func() { // G2 + as.Authenticate() + defer wg.Done() + }() + } + wg.Wait() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go new file mode 100644 index 00000000000000..0a96d7f0472d9e --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: etcd + * Issue or PR : https://github.com/coreos/etcd/pull/7902 + * Buggy version: dfdaf082c51ba14861267f632f6af795a27eb4ef + * fix commit-id: 87d99fe0387ee1df1cf1811d88d37331939ef4ae + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Etcd7902", Etcd7902) +} + +type roundClient_etcd7902 struct { + progress int + acquire func() + validate func() + release func() +} + +func runElectionFunc_etcd7902() { + rcs := make([]roundClient_etcd7902, 3) + nextc := make(chan bool) + for i := range rcs { + var rcNextc chan bool + setRcNextc := func() { + rcNextc = nextc + } + rcs[i].acquire = func() {} + rcs[i].validate = func() { + setRcNextc() + } + rcs[i].release = func() { + if i == 0 { // Assume the first roundClient is the leader + close(nextc) + nextc = make(chan bool) + } + <-rcNextc // Follower is blocking here + } + } + doRounds_etcd7902(rcs, 100) +} + +func doRounds_etcd7902(rcs []roundClient_etcd7902, rounds int) { + var mu sync.Mutex + var wg sync.WaitGroup + wg.Add(len(rcs)) + for i := range rcs { + go func(rc *roundClient_etcd7902) { // G2,G3 + defer wg.Done() + for rc.progress < rounds || rounds <= 0 { + rc.acquire() + mu.Lock() + rc.validate() + mu.Unlock() + time.Sleep(10 * time.Millisecond) + rc.progress++ + mu.Lock() + rc.release() + mu.Unlock() + } + }(&rcs[i]) + } + wg.Wait() +} + +func Etcd7902() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go runElectionFunc_etcd7902() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go new file mode 100644 index 00000000000000..ec5491e438f74a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go @@ -0,0 +1,111 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc-go + * Issue or PR : https://github.com/grpc/grpc-go/pull/1275 + * Buggy version: (missing) + * fix commit-id: 0669f3f89e0330e94bb13fa1ce8cc704aab50c9c + * Flaky: 100/100 + */ +package main + +import ( + "io" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Grpc1275", Grpc1275) +} + +type recvBuffer_grpc1275 struct { + c chan bool +} + +func (b *recvBuffer_grpc1275) get() <-chan bool { + return b.c +} + +type recvBufferReader_grpc1275 struct { + recv *recvBuffer_grpc1275 +} + +func (r *recvBufferReader_grpc1275) Read(p []byte) (int, error) { + select { + case <-r.recv.get(): + } + return 0, nil +} + +type Stream_grpc1275 struct { + trReader io.Reader +} + +func (s *Stream_grpc1275) Read(p []byte) (int, error) { + return io.ReadFull(s.trReader, p) +} + +type http2Client_grpc1275 struct{} + +func (t *http2Client_grpc1275) CloseStream(s *Stream_grpc1275) { + // It is the client.CloseSream() method called by the + // main goroutine that should send the message, but it + // is not. The patch is to send out this message. +} + +func (t *http2Client_grpc1275) NewStream() *Stream_grpc1275 { + return &Stream_grpc1275{ + trReader: &recvBufferReader_grpc1275{ + recv: &recvBuffer_grpc1275{ + c: make(chan bool), + }, + }, + } +} + +func testInflightStreamClosing_grpc1275() { + client := &http2Client_grpc1275{} + stream := client.NewStream() + donec := make(chan bool) + go func() { // G2 + defer close(donec) + stream.Read([]byte{1}) + }() + + client.CloseStream(stream) + + timeout := time.NewTimer(300 * time.Nanosecond) + select { + case <-donec: + if !timeout.Stop() { + <-timeout.C + } + case <-timeout.C: + } +} + +/// +/// G1 G2 +/// testInflightStreamClosing() +/// stream.Read() +/// io.ReadFull() +/// <- r.recv.get() +/// CloseStream() +/// <- donec +/// ------------G1 timeout, G2 leak--------------------- +/// + +func Grpc1275() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + go func() { + testInflightStreamClosing_grpc1275() // G1 + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go new file mode 100644 index 00000000000000..777534a788163c --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go @@ -0,0 +1,105 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc-go + * Issue or PR : https://github.com/grpc/grpc-go/pull/1424 + * Buggy version: 39c8c3866d926d95e11c03508bf83d00f2963f91 + * fix commit-id: 64bd0b04a7bb1982078bae6a2ab34c226125fbc1 + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Grpc1424", Grpc1424) +} + +type Balancer_grpc1424 interface { + Notify() <-chan bool +} + +type roundRobin_grpc1424 struct { + mu sync.Mutex + addrCh chan bool +} + +func (rr *roundRobin_grpc1424) Notify() <-chan bool { + return rr.addrCh +} + +type addrConn_grpc1424 struct { + mu sync.Mutex +} + +func (ac *addrConn_grpc1424) tearDown() { + ac.mu.Lock() + defer ac.mu.Unlock() +} + +type dialOption_grpc1424 struct { + balancer Balancer_grpc1424 +} + +type ClientConn_grpc1424 struct { + dopts dialOption_grpc1424 + conns []*addrConn_grpc1424 +} + +func (cc *ClientConn_grpc1424) lbWatcher(doneChan chan bool) { + for addr := range cc.dopts.balancer.Notify() { + if addr { + // nop, make compiler happy + } + var ( + del []*addrConn_grpc1424 + ) + for _, a := range cc.conns { + del = append(del, a) + } + for _, c := range del { + c.tearDown() + } + } +} + +func NewClientConn_grpc1424() *ClientConn_grpc1424 { + cc := &ClientConn_grpc1424{ + dopts: dialOption_grpc1424{ + &roundRobin_grpc1424{addrCh: make(chan bool)}, + }, + } + return cc +} + +func DialContext_grpc1424() { + cc := NewClientConn_grpc1424() + waitC := make(chan error, 1) + go func() { // G2 + defer close(waitC) + ch := cc.dopts.balancer.Notify() + if ch != nil { + doneChan := make(chan bool) + go cc.lbWatcher(doneChan) // G3 + <-doneChan + } + }() + /// close addrCh + close(cc.dopts.balancer.(*roundRobin_grpc1424).addrCh) +} + +func Grpc1424() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + go DialContext_grpc1424() // G1 +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go new file mode 100644 index 00000000000000..bc658b408d8c6b --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go @@ -0,0 +1,84 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc + * Issue or PR : https://github.com/grpc/grpc-go/pull/1460 + * Buggy version: 7db1564ba1229bc42919bb1f6d9c4186f3aa8678 + * fix commit-id: e605a1ecf24b634f94f4eefdab10a9ada98b70dd + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Grpc1460", Grpc1460) +} + +type Stream_grpc1460 struct{} + +type http2Client_grpc1460 struct { + mu sync.Mutex + awakenKeepalive chan struct{} + activeStream []*Stream_grpc1460 +} + +func (t *http2Client_grpc1460) keepalive() { + t.mu.Lock() + if len(t.activeStream) < 1 { + <-t.awakenKeepalive + runtime.Gosched() + t.mu.Unlock() + } else { + t.mu.Unlock() + } +} + +func (t *http2Client_grpc1460) NewStream() { + t.mu.Lock() + runtime.Gosched() + t.activeStream = append(t.activeStream, &Stream_grpc1460{}) + if len(t.activeStream) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + default: + } + } + t.mu.Unlock() +} + +/// +/// G1 G2 +/// client.keepalive() +/// client.NewStream() +/// t.mu.Lock() +/// <-t.awakenKeepalive +/// t.mu.Lock() +/// ---------------G1, G2 deadlock-------------- +/// + +func Grpc1460() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + client := &http2Client_grpc1460{ + awakenKeepalive: make(chan struct{}), + } + go client.keepalive() //G1 + go client.NewStream() //G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go new file mode 100644 index 00000000000000..0523b9509fdcbe --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +// This test case is a reproduction of grpc/3017. +// +// It is a goroutine leak that also simultaneously engages many GC assists. +// Testing runtime behaviour when pivoting between regular and goroutine leak detection modes. + +func init() { + register("Grpc3017", Grpc3017) +} + +type Address_grpc3017 int +type SubConn_grpc3017 int + +type subConnCacheEntry_grpc3017 struct { + sc SubConn_grpc3017 + cancel func() + abortDeleting bool +} + +type lbCacheClientConn_grpc3017 struct { + mu sync.Mutex // L1 + timeout time.Duration + subConnCache map[Address_grpc3017]*subConnCacheEntry_grpc3017 + subConnToAddr map[SubConn_grpc3017]Address_grpc3017 +} + +func (ccc *lbCacheClientConn_grpc3017) NewSubConn(addrs []Address_grpc3017) SubConn_grpc3017 { + if len(addrs) != 1 { + return SubConn_grpc3017(1) + } + addrWithoutMD := addrs[0] + ccc.mu.Lock() // L1 + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { + entry.cancel() + delete(ccc.subConnCache, addrWithoutMD) + return entry.sc + } + scNew := SubConn_grpc3017(1) + ccc.subConnToAddr[scNew] = addrWithoutMD + return scNew +} + +func (ccc *lbCacheClientConn_grpc3017) RemoveSubConn(sc SubConn_grpc3017) { + ccc.mu.Lock() // L1 + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + delete(ccc.subConnToAddr, sc) + } + return + } + + entry := &subConnCacheEntry_grpc3017{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { // G3 + runtime.Gosched() + ccc.mu.Lock() // L1 + if entry.abortDeleting { + return // Missing unlock + } + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + ccc.mu.Unlock() + }) + + entry.cancel = func() { + if !timer.Stop() { + entry.abortDeleting = true + } + } +} + +func Grpc3017() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { //G1 + done := make(chan struct{}) + + ccc := &lbCacheClientConn_grpc3017{ + timeout: time.Nanosecond, + subConnCache: make(map[Address_grpc3017]*subConnCacheEntry_grpc3017), + subConnToAddr: make(map[SubConn_grpc3017]Address_grpc3017), + } + + sc := ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)}) + go func() { // G2 + for i := 0; i < 10000; i++ { + ccc.RemoveSubConn(sc) + sc = ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)}) + } + close(done) + }() + <-done + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go new file mode 100644 index 00000000000000..5f6201ec8062d9 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go @@ -0,0 +1,65 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc-go + * Issue or PR : https://github.com/grpc/grpc-go/pull/660 + * Buggy version: db85417dd0de6cc6f583672c6175a7237e5b5dd2 + * fix commit-id: ceacfbcbc1514e4e677932fd55938ac455d182fb + * Flaky: 100/100 + */ +package main + +import ( + "math/rand" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Grpc660", Grpc660) +} + +type benchmarkClient_grpc660 struct { + stop chan bool +} + +func (bc *benchmarkClient_grpc660) doCloseLoopUnary() { + for { + done := make(chan bool) + go func() { // G2 + if rand.Intn(10) > 7 { + done <- false + return + } + done <- true + }() + select { + case <-bc.stop: + return + case <-done: + } + } +} + +func Grpc660() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + bc := &benchmarkClient_grpc660{ + stop: make(chan bool), + } + go bc.doCloseLoopUnary() // G1 + go func() { // helper goroutine + bc.stop <- true + }() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go new file mode 100644 index 00000000000000..72005cc844e225 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go @@ -0,0 +1,74 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Grpc795", Grpc795) +} + +type Server_grpc795 struct { + mu sync.Mutex + drain bool +} + +func (s *Server_grpc795) GracefulStop() { + s.mu.Lock() + if s.drain { + s.mu.Lock() + return + } + s.drain = true + s.mu.Unlock() +} +func (s *Server_grpc795) Serve() { + s.mu.Lock() + s.mu.Unlock() +} + +func NewServer_grpc795() *Server_grpc795 { + return &Server_grpc795{} +} + +type test_grpc795 struct { + srv *Server_grpc795 +} + +func (te *test_grpc795) startServer() { + s := NewServer_grpc795() + te.srv = s + go s.Serve() +} + +func newTest_grpc795() *test_grpc795 { + return &test_grpc795{} +} + +func testServerGracefulStopIdempotent_grpc795() { + te := newTest_grpc795() + + te.startServer() + + for i := 0; i < 3; i++ { + te.srv.GracefulStop() + } +} + +func Grpc795() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go testServerGracefulStopIdempotent_grpc795() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go new file mode 100644 index 00000000000000..188b3b88ba5f8a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go @@ -0,0 +1,105 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc-go + * Issue or PR : https://github.com/grpc/grpc-go/pull/862 + * Buggy version: d8f4ebe77f6b7b6403d7f98626de8a534f9b93a7 + * fix commit-id: dd5645bebff44f6b88780bb949022a09eadd7dae + * Flaky: 100/100 + */ +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" + "time" +) + +func init() { + register("Grpc862", Grpc862) +} + +type ClientConn_grpc862 struct { + ctx context.Context + cancel context.CancelFunc + conns []*addrConn_grpc862 +} + +func (cc *ClientConn_grpc862) Close() { + cc.cancel() + conns := cc.conns + cc.conns = nil + for _, ac := range conns { + ac.tearDown() + } +} + +func (cc *ClientConn_grpc862) resetAddrConn() { + ac := &addrConn_grpc862{ + cc: cc, + } + cc.conns = append(cc.conns, ac) + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + ac.resetTransport() +} + +type addrConn_grpc862 struct { + cc *ClientConn_grpc862 + ctx context.Context + cancel context.CancelFunc +} + +func (ac *addrConn_grpc862) resetTransport() { + for retries := 1; ; retries++ { + _ = 2 * time.Nanosecond * time.Duration(retries) + timeout := 10 * time.Nanosecond + _, cancel := context.WithTimeout(ac.ctx, timeout) + _ = time.Now() + cancel() + <-ac.ctx.Done() + return + } +} + +func (ac *addrConn_grpc862) tearDown() { + ac.cancel() +} + +func DialContext_grpc862(ctx context.Context) (conn *ClientConn_grpc862) { + cc := &ClientConn_grpc862{} + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + defer func() { + select { + case <-ctx.Done(): + if conn != nil { + conn.Close() + } + conn = nil + default: + } + }() + go func() { // G2 + cc.resetAddrConn() + }() + return conn +} + +func Grpc862() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + ctx, cancel := context.WithCancel(context.Background()) + go DialContext_grpc862(ctx) // G1 + go cancel() // helper goroutine + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go new file mode 100644 index 00000000000000..3804692a8bf6bd --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go @@ -0,0 +1,81 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Hugo3251", Hugo3251) +} + +type remoteLock_hugo3251 struct { + sync.RWMutex // L1 + m map[string]*sync.Mutex // L2 +} + +func (l *remoteLock_hugo3251) URLLock(url string) { + l.Lock() // L1 + if _, ok := l.m[url]; !ok { + l.m[url] = &sync.Mutex{} + } + l.m[url].Lock() // L2 + runtime.Gosched() + l.Unlock() // L1 + // runtime.Gosched() +} + +func (l *remoteLock_hugo3251) URLUnlock(url string) { + l.RLock() // L1 + defer l.RUnlock() // L1 + if um, ok := l.m[url]; ok { + um.Unlock() // L2 + } +} + +func resGetRemote_hugo3251(remoteURLLock *remoteLock_hugo3251, url string) error { + remoteURLLock.URLLock(url) + defer func() { remoteURLLock.URLUnlock(url) }() + + return nil +} + +func Hugo3251() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(time.Second) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 11; i++ { + go func() { // G1 + url := "http://Foo.Bar/foo_Bar-Foo" + remoteURLLock := &remoteLock_hugo3251{m: make(map[string]*sync.Mutex)} + for range []bool{false, true} { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func(gor int) { // G2 + defer wg.Done() + for j := 0; j < 200; j++ { + err := resGetRemote_hugo3251(remoteURLLock, url) + if err != nil { + fmt.Errorf("Error getting resource content: %s", err) + } + time.Sleep(300 * time.Nanosecond) + } + }(i) + } + wg.Wait() + } + }() + } +} \ No newline at end of file diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go new file mode 100644 index 00000000000000..6a1bbe9a3f7767 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go @@ -0,0 +1,317 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "log" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Hugo5379", Hugo5379) +} + +type shortcodeHandler_hugo5379 struct { + p *PageWithoutContent_hugo5379 + contentShortcodes map[int]func() error + contentShortcodesDelta map[int]func() error + init sync.Once // O1 +} + +func (s *shortcodeHandler_hugo5379) executeShortcodesForDelta(p *PageWithoutContent_hugo5379) error { + for k, _ := range s.contentShortcodesDelta { + render := s.contentShortcodesDelta[k] + if err := render(); err != nil { + continue + } + } + return nil +} + +func (s *shortcodeHandler_hugo5379) updateDelta() { + s.init.Do(func() { + s.contentShortcodes = createShortcodeRenderers_hugo5379(s.p.withoutContent()) + }) + + delta := make(map[int]func() error) + + for k, v := range s.contentShortcodes { + if _, ok := delta[k]; !ok { + delta[k] = v + } + } + + s.contentShortcodesDelta = delta +} + +type Page_hugo5379 struct { + *pageInit_hugo5379 + *pageContentInit_hugo5379 + pageWithoutContent *PageWithoutContent_hugo5379 + contentInit sync.Once // O2 + contentInitMu sync.Mutex // L1 + shortcodeState *shortcodeHandler_hugo5379 +} + +func (p *Page_hugo5379) WordCount() { + p.initContentPlainAndMeta() +} + +func (p *Page_hugo5379) initContentPlainAndMeta() { + p.initContent() + p.initPlain(true) +} + +func (p *Page_hugo5379) initPlain(lock bool) { + p.plainInit.Do(func() { + if lock { + /// Double locking here. + p.contentInitMu.Lock() + defer p.contentInitMu.Unlock() + } + }) +} + +func (p *Page_hugo5379) withoutContent() *PageWithoutContent_hugo5379 { + p.pageInit_hugo5379.withoutContentInit.Do(func() { + p.pageWithoutContent = &PageWithoutContent_hugo5379{Page_hugo5379: p} + }) + return p.pageWithoutContent +} + +func (p *Page_hugo5379) prepareForRender() error { + var err error + if err = handleShortcodes_hugo5379(p.withoutContent()); err != nil { + return err + } + return nil +} + +func (p *Page_hugo5379) setContentInit() { + p.shortcodeState.updateDelta() +} + +func (p *Page_hugo5379) initContent() { + p.contentInit.Do(func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + c := make(chan error, 1) + + go func() { // G2 + var err error + p.contentInitMu.Lock() // first lock here + defer p.contentInitMu.Unlock() + + err = p.prepareForRender() + if err != nil { + c <- err + return + } + c <- err + }() + + select { + case <-ctx.Done(): + case <-c: + } + }) +} + +type PageWithoutContent_hugo5379 struct { + *Page_hugo5379 +} + +type pageInit_hugo5379 struct { + withoutContentInit sync.Once +} + +type pageContentInit_hugo5379 struct { + contentInit sync.Once // O3 + plainInit sync.Once // O4 +} + +type HugoSites_hugo5379 struct { + Sites []*Site_hugo5379 +} + +func (h *HugoSites_hugo5379) render() { + for _, s := range h.Sites { + for _, s2 := range h.Sites { + s2.preparePagesForRender() + } + s.renderPages() + } +} + +func (h *HugoSites_hugo5379) Build() { + h.render() +} + +type Pages_hugo5379 []*Page_hugo5379 + +type PageCollections_hugo5379 struct { + Pages Pages_hugo5379 +} + +type Site_hugo5379 struct { + *PageCollections_hugo5379 +} + +func (s *Site_hugo5379) preparePagesForRender() { + for _, p := range s.Pages { + p.setContentInit() + } +} + +func (s *Site_hugo5379) renderForLayouts() { + /// Omit reflections + for _, p := range s.Pages { + p.WordCount() + } +} + +func (s *Site_hugo5379) renderAndWritePage() { + s.renderForLayouts() +} + +func (s *Site_hugo5379) renderPages() { + numWorkers := 2 + wg := &sync.WaitGroup{} + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go pageRenderer_hugo5379(s, wg) // G3 + } + + wg.Wait() +} + +type sitesBuilder_hugo5379 struct { + H *HugoSites_hugo5379 +} + +func (s *sitesBuilder_hugo5379) Build() *sitesBuilder_hugo5379 { + return s.build() +} + +func (s *sitesBuilder_hugo5379) build() *sitesBuilder_hugo5379 { + s.H.Build() + return s +} + +func (s *sitesBuilder_hugo5379) CreateSitesE() error { + sites, err := NewHugoSites_hugo5379() + if err != nil { + return err + } + s.H = sites + return nil +} + +func (s *sitesBuilder_hugo5379) CreateSites() *sitesBuilder_hugo5379 { + if err := s.CreateSitesE(); err != nil { + log.Fatalf("Failed to create sites: %s", err) + } + return s +} + +func newHugoSites_hugo5379(sites ...*Site_hugo5379) (*HugoSites_hugo5379, error) { + h := &HugoSites_hugo5379{Sites: sites} + return h, nil +} + +func newSite_hugo5379() *Site_hugo5379 { + c := &PageCollections_hugo5379{} + s := &Site_hugo5379{ + PageCollections_hugo5379: c, + } + return s +} + +func createSitesFromConfig_hugo5379() []*Site_hugo5379 { + var ( + sites []*Site_hugo5379 + ) + + var s *Site_hugo5379 = newSite_hugo5379() + sites = append(sites, s) + return sites +} + +func NewHugoSites_hugo5379() (*HugoSites_hugo5379, error) { + sites := createSitesFromConfig_hugo5379() + return newHugoSites_hugo5379(sites...) +} + +func prepareShortcodeForPage_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error { + m := make(map[int]func() error) + m[0] = func() error { + return renderShortcode_hugo5379(p) + } + return m +} + +func renderShortcode_hugo5379(p *PageWithoutContent_hugo5379) error { + return renderShortcodeWithPage_hugo5379(p) +} + +func renderShortcodeWithPage_hugo5379(p *PageWithoutContent_hugo5379) error { + /// Omit reflections + p.WordCount() + return nil +} + +func createShortcodeRenderers_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error { + return prepareShortcodeForPage_hugo5379(p) +} + +func newShortcodeHandler_hugo5379(p *Page_hugo5379) *shortcodeHandler_hugo5379 { + return &shortcodeHandler_hugo5379{ + p: p.withoutContent(), + contentShortcodes: make(map[int]func() error), + contentShortcodesDelta: make(map[int]func() error), + } +} + +func handleShortcodes_hugo5379(p *PageWithoutContent_hugo5379) error { + return p.shortcodeState.executeShortcodesForDelta(p) +} + +func pageRenderer_hugo5379(s *Site_hugo5379, wg *sync.WaitGroup) { + defer wg.Done() + s.renderAndWritePage() +} + +func Hugo5379() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { // G1 + b := &sitesBuilder_hugo5379{} + s := b.CreateSites() + for _, site := range s.H.Sites { + p := &Page_hugo5379{ + pageInit_hugo5379: &pageInit_hugo5379{}, + pageContentInit_hugo5379: &pageContentInit_hugo5379{}, + pageWithoutContent: &PageWithoutContent_hugo5379{}, + contentInit: sync.Once{}, + contentInitMu: sync.Mutex{}, + shortcodeState: nil, + } + p.shortcodeState = newShortcodeHandler_hugo5379(p) + site.Pages = append(site.Pages, p) + } + s.Build() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go new file mode 100644 index 00000000000000..839051cc64a18d --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go @@ -0,0 +1,129 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Istio16224", Istio16224) +} + +type ConfigStoreCache_istio16224 interface { + RegisterEventHandler(handler func()) + Run() +} + +type Event_istio16224 int + +type Handler_istio16224 func(Event_istio16224) + +type configstoreMonitor_istio16224 struct { + handlers []Handler_istio16224 + eventCh chan Event_istio16224 +} + +func (m *configstoreMonitor_istio16224) Run(stop <-chan struct{}) { + for { + select { + case <-stop: + // This bug is not descibed, but is a true positive (in our eyes) + // In a real run main exits when the goro is blocked here. + if _, ok := <-m.eventCh; ok { + close(m.eventCh) + } + return + case ce, ok := <-m.eventCh: + if ok { + m.processConfigEvent(ce) + } + } + } +} + +func (m *configstoreMonitor_istio16224) processConfigEvent(ce Event_istio16224) { + m.applyHandlers(ce) +} + +func (m *configstoreMonitor_istio16224) AppendEventHandler(h Handler_istio16224) { + m.handlers = append(m.handlers, h) +} + +func (m *configstoreMonitor_istio16224) applyHandlers(e Event_istio16224) { + for _, f := range m.handlers { + f(e) + } +} +func (m *configstoreMonitor_istio16224) ScheduleProcessEvent(configEvent Event_istio16224) { + m.eventCh <- configEvent +} + +type Monitor_istio16224 interface { + Run(<-chan struct{}) + AppendEventHandler(Handler_istio16224) + ScheduleProcessEvent(Event_istio16224) +} + +type controller_istio16224 struct { + monitor Monitor_istio16224 +} + +func (c *controller_istio16224) RegisterEventHandler(f func(Event_istio16224)) { + c.monitor.AppendEventHandler(f) +} + +func (c *controller_istio16224) Run(stop <-chan struct{}) { + c.monitor.Run(stop) +} + +func (c *controller_istio16224) Create() { + c.monitor.ScheduleProcessEvent(Event_istio16224(0)) +} + +func NewMonitor_istio16224() Monitor_istio16224 { + return NewBufferedMonitor_istio16224() +} + +func NewBufferedMonitor_istio16224() Monitor_istio16224 { + return &configstoreMonitor_istio16224{ + eventCh: make(chan Event_istio16224), + } +} + +func Istio16224() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + controller := &controller_istio16224{monitor: NewMonitor_istio16224()} + done := make(chan bool) + lock := sync.Mutex{} + controller.RegisterEventHandler(func(event Event_istio16224) { + lock.Lock() + defer lock.Unlock() + done <- true + }) + + stop := make(chan struct{}) + go controller.Run(stop) + + controller.Create() + + lock.Lock() // blocks + lock.Unlock() + <-done + + close(stop) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go new file mode 100644 index 00000000000000..aa8317c6d5f41b --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go @@ -0,0 +1,144 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime/pprof" + + "sync" + "time" +) + +func init() { + register("Istio17860", Istio17860) +} + +type Proxy_istio17860 interface { + IsLive() bool +} + +type TestProxy_istio17860 struct { + live func() bool +} + +func (tp TestProxy_istio17860) IsLive() bool { + if tp.live == nil { + return true + } + return tp.live() +} + +type Agent_istio17860 interface { + Run(ctx context.Context) + Restart() +} + +type exitStatus_istio17860 int + +type agent_istio17860 struct { + proxy Proxy_istio17860 + mu *sync.Mutex + statusCh chan exitStatus_istio17860 + currentEpoch int + activeEpochs map[int]struct{} +} + +func (a *agent_istio17860) Run(ctx context.Context) { + for { + select { + case status := <-a.statusCh: + a.mu.Lock() + delete(a.activeEpochs, int(status)) + active := len(a.activeEpochs) + a.mu.Unlock() + if active == 0 { + return + } + case <-ctx.Done(): + return + } + } +} + +func (a *agent_istio17860) Restart() { + a.mu.Lock() + defer a.mu.Unlock() + + a.waitUntilLive() + a.currentEpoch++ + a.activeEpochs[a.currentEpoch] = struct{}{} + + go a.runWait(a.currentEpoch) +} + +func (a *agent_istio17860) runWait(epoch int) { + a.statusCh <- exitStatus_istio17860(epoch) +} + +func (a *agent_istio17860) waitUntilLive() { + if len(a.activeEpochs) == 0 { + return + } + + interval := time.NewTicker(30 * time.Nanosecond) + timer := time.NewTimer(100 * time.Nanosecond) + defer func() { + interval.Stop() + timer.Stop() + }() + + if a.proxy.IsLive() { + return + } + + for { + select { + case <-timer.C: + return + case <-interval.C: + if a.proxy.IsLive() { + return + } + } + } +} + +func NewAgent_istio17860(proxy Proxy_istio17860) Agent_istio17860 { + return &agent_istio17860{ + proxy: proxy, + mu: &sync.Mutex{}, + statusCh: make(chan exitStatus_istio17860), + activeEpochs: make(map[int]struct{}), + } +} + +func Istio17860() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + neverLive := func() bool { + return false + } + + a := NewAgent_istio17860(TestProxy_istio17860{live: neverLive}) + go func() { a.Run(ctx) }() + + a.Restart() + go a.Restart() + + time.Sleep(200 * time.Nanosecond) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go new file mode 100644 index 00000000000000..b410c490322440 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go @@ -0,0 +1,154 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime/pprof" + + "sync" + "time" +) + +func init() { + register("Istio18454", Istio18454) +} + +const eventChCap_istio18454 = 1024 + +type Worker_istio18454 struct { + ctx context.Context + ctxCancel context.CancelFunc +} + +func (w *Worker_istio18454) Start(setupFn func(), runFn func(c context.Context)) { + if setupFn != nil { + setupFn() + } + go func() { + runFn(w.ctx) + }() +} + +func (w *Worker_istio18454) Stop() { + w.ctxCancel() +} + +type Strategy_istio18454 struct { + timer *time.Timer + timerFrequency time.Duration + stateLock sync.Mutex + resetChan chan struct{} + worker *Worker_istio18454 + startTimerFn func() +} + +func (s *Strategy_istio18454) OnChange() { + s.stateLock.Lock() + if s.timer != nil { + s.stateLock.Unlock() + s.resetChan <- struct{}{} + return + } + s.startTimerFn() + s.stateLock.Unlock() +} + +func (s *Strategy_istio18454) startTimer() { + s.timer = time.NewTimer(s.timerFrequency) + eventLoop := func(ctx context.Context) { + for { + select { + case <-s.timer.C: + case <-s.resetChan: + if !s.timer.Stop() { + <-s.timer.C + } + s.timer.Reset(s.timerFrequency) + case <-ctx.Done(): + s.timer.Stop() + return + } + } + } + s.worker.Start(nil, eventLoop) +} + +func (s *Strategy_istio18454) Close() { + s.worker.Stop() +} + +type Event_istio18454 int + +type Processor_istio18454 struct { + stateStrategy *Strategy_istio18454 + worker *Worker_istio18454 + eventCh chan Event_istio18454 +} + +func (p *Processor_istio18454) processEvent() { + p.stateStrategy.OnChange() +} + +func (p *Processor_istio18454) Start() { + setupFn := func() { + for i := 0; i < eventChCap_istio18454; i++ { + p.eventCh <- Event_istio18454(0) + } + } + runFn := func(ctx context.Context) { + defer func() { + p.stateStrategy.Close() + }() + for { + select { + case <-ctx.Done(): + return + case <-p.eventCh: + p.processEvent() + } + } + } + p.worker.Start(setupFn, runFn) +} + +func (p *Processor_istio18454) Stop() { + p.worker.Stop() +} + +func NewWorker_istio18454() *Worker_istio18454 { + worker := &Worker_istio18454{} + worker.ctx, worker.ctxCancel = context.WithCancel(context.Background()) + return worker +} + +func Istio18454() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + stateStrategy := &Strategy_istio18454{ + timerFrequency: time.Nanosecond, + resetChan: make(chan struct{}, 1), + worker: NewWorker_istio18454(), + } + stateStrategy.startTimerFn = stateStrategy.startTimer + + p := &Processor_istio18454{ + stateStrategy: stateStrategy, + worker: NewWorker_istio18454(), + eventCh: make(chan Event_istio18454, eventChCap_istio18454), + } + + p.Start() + defer p.Stop() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go new file mode 100644 index 00000000000000..0eca5f41fbfab4 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go @@ -0,0 +1,95 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/10182 + * Buggy version: 4b990d128a17eea9058d28a3b3688ab8abafbd94 + * fix commit-id: 64ad3e17ad15cd0f9a4fd86706eec1c572033254 + * Flaky: 15/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes10182", Kubernetes10182) +} + +type statusManager_kubernetes10182 struct { + podStatusesLock sync.RWMutex + podStatusChannel chan bool +} + +func (s *statusManager_kubernetes10182) Start() { + go func() { + for i := 0; i < 2; i++ { + s.syncBatch() + } + }() +} + +func (s *statusManager_kubernetes10182) syncBatch() { + runtime.Gosched() + <-s.podStatusChannel + s.DeletePodStatus() +} + +func (s *statusManager_kubernetes10182) DeletePodStatus() { + s.podStatusesLock.Lock() + defer s.podStatusesLock.Unlock() +} + +func (s *statusManager_kubernetes10182) SetPodStatus() { + s.podStatusesLock.Lock() + defer s.podStatusesLock.Unlock() + s.podStatusChannel <- true +} + +func NewStatusManager_kubernetes10182() *statusManager_kubernetes10182 { + return &statusManager_kubernetes10182{ + podStatusChannel: make(chan bool), + } +} + +// Example of deadlock trace: +// +// G1 G2 G3 +// -------------------------------------------------------------------------------- +// s.Start() +// s.syncBatch() +// s.SetPodStatus() +// <-s.podStatusChannel +// s.podStatusesLock.Lock() +// s.podStatusChannel <- true +// s.podStatusesLock.Unlock() +// return +// s.DeletePodStatus() +// s.podStatusesLock.Lock() +// s.podStatusChannel <- true +// s.podStatusesLock.Lock() +// -----------------------------------G1,G3 leak------------------------------------- + +func Kubernetes10182() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + s := NewStatusManager_kubernetes10182() + go s.Start() + go s.SetPodStatus() + go s.SetPodStatus() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go new file mode 100644 index 00000000000000..36405f240a6612 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go @@ -0,0 +1,118 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes11298", Kubernetes11298) +} + +type Signal_kubernetes11298 <-chan struct{} + +func After_kubernetes11298(f func()) Signal_kubernetes11298 { + ch := make(chan struct{}) + go func() { + defer close(ch) + if f != nil { + f() + } + }() + return Signal_kubernetes11298(ch) +} + +func Until_kubernetes11298(f func(), period time.Duration, stopCh <-chan struct{}) { + if f == nil { + return + } + for { + select { + case <-stopCh: + return + default: + } + f() + select { + case <-stopCh: + case <-time.After(period): + } + } + +} + +type notifier_kubernetes11298 struct { + lock sync.Mutex + cond *sync.Cond +} + +// abort will be closed no matter what +func (n *notifier_kubernetes11298) serviceLoop(abort <-chan struct{}) { + n.lock.Lock() + defer n.lock.Unlock() + for { + select { + case <-abort: + return + default: + ch := After_kubernetes11298(func() { + n.cond.Wait() + }) + select { + case <-abort: + n.cond.Signal() + <-ch + return + case <-ch: + } + } + } +} + +// abort will be closed no matter what +func Notify_kubernetes11298(abort <-chan struct{}) { + n := ¬ifier_kubernetes11298{} + n.cond = sync.NewCond(&n.lock) + finished := After_kubernetes11298(func() { + Until_kubernetes11298(func() { + for { + select { + case <-abort: + return + default: + func() { + n.lock.Lock() + defer n.lock.Unlock() + n.cond.Signal() + }() + } + } + }, 0, abort) + }) + Until_kubernetes11298(func() { n.serviceLoop(finished) }, 0, abort) +} +func Kubernetes11298() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + done := make(chan struct{}) + notifyDone := After_kubernetes11298(func() { Notify_kubernetes11298(done) }) + go func() { + defer close(done) + time.Sleep(300 * time.Nanosecond) + }() + <-notifyDone + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go new file mode 100644 index 00000000000000..f6aa8b9ddfe178 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go @@ -0,0 +1,166 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/13135 + * Buggy version: 6ced66249d4fd2a81e86b4a71d8df0139fe5ceae + * fix commit-id: a12b7edc42c5c06a2e7d9f381975658692951d5a + * Flaky: 93/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes13135", Kubernetes13135) +} + +var ( + StopChannel_kubernetes13135 chan struct{} +) + +func Util_kubernetes13135(f func(), period time.Duration, stopCh <-chan struct{}) { + for { + select { + case <-stopCh: + return + default: + } + func() { + f() + }() + time.Sleep(period) + } +} + +type Store_kubernetes13135 interface { + Add(obj interface{}) + Replace(obj interface{}) +} + +type Reflector_kubernetes13135 struct { + store Store_kubernetes13135 +} + +func (r *Reflector_kubernetes13135) ListAndWatch(stopCh <-chan struct{}) error { + r.syncWith() + return nil +} + +func NewReflector_kubernetes13135(store Store_kubernetes13135) *Reflector_kubernetes13135 { + return &Reflector_kubernetes13135{ + store: store, + } +} + +func (r *Reflector_kubernetes13135) syncWith() { + r.store.Replace(nil) +} + +type Cacher_kubernetes13135 struct { + sync.Mutex + initialized sync.WaitGroup + initOnce sync.Once + watchCache *WatchCache_kubernetes13135 + reflector *Reflector_kubernetes13135 +} + +func (c *Cacher_kubernetes13135) processEvent() { + c.Lock() + defer c.Unlock() +} + +func (c *Cacher_kubernetes13135) startCaching(stopChannel <-chan struct{}) { + c.Lock() + for { + err := c.reflector.ListAndWatch(stopChannel) + if err == nil { + break + } + } +} + +type WatchCache_kubernetes13135 struct { + sync.RWMutex + onReplace func() + onEvent func() +} + +func (w *WatchCache_kubernetes13135) SetOnEvent(onEvent func()) { + w.Lock() + defer w.Unlock() + w.onEvent = onEvent +} + +func (w *WatchCache_kubernetes13135) SetOnReplace(onReplace func()) { + w.Lock() + defer w.Unlock() + w.onReplace = onReplace +} + +func (w *WatchCache_kubernetes13135) processEvent() { + w.Lock() + defer w.Unlock() + if w.onEvent != nil { + w.onEvent() + } +} + +func (w *WatchCache_kubernetes13135) Add(obj interface{}) { + w.processEvent() +} + +func (w *WatchCache_kubernetes13135) Replace(obj interface{}) { + w.Lock() + defer w.Unlock() + if w.onReplace != nil { + w.onReplace() + } +} + +func NewCacher_kubernetes13135(stopCh <-chan struct{}) *Cacher_kubernetes13135 { + watchCache := &WatchCache_kubernetes13135{} + cacher := &Cacher_kubernetes13135{ + initialized: sync.WaitGroup{}, + watchCache: watchCache, + reflector: NewReflector_kubernetes13135(watchCache), + } + cacher.initialized.Add(1) + watchCache.SetOnReplace(func() { + cacher.initOnce.Do(func() { cacher.initialized.Done() }) + cacher.Unlock() + }) + watchCache.SetOnEvent(cacher.processEvent) + go Util_kubernetes13135(func() { cacher.startCaching(stopCh) }, 0, stopCh) // G2 + cacher.initialized.Wait() + return cacher +} + +func Kubernetes13135() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + StopChannel_kubernetes13135 = make(chan struct{}) + for i := 0; i < 50; i++ { + go func() { + // Should create a local channel. Using a single global channel + // concurrently will cause a deadlock which does not actually exist + // in the original microbenchmark. + StopChannel_kubernetes13135 := make(chan struct{}) + + c := NewCacher_kubernetes13135(StopChannel_kubernetes13135) // G1 + go c.watchCache.Add(nil) // G3 + go close(StopChannel_kubernetes13135) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go new file mode 100644 index 00000000000000..6c0139a9d24fcb --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go @@ -0,0 +1,100 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/1321 + * Buggy version: 9cd0fc70f1ca852c903b18b0933991036b3b2fa1 + * fix commit-id: 435e0b73bb99862f9dedf56a50260ff3dfef14ff + * Flaky: 1/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes1321", Kubernetes1321) +} + +type muxWatcher_kubernetes1321 struct { + result chan struct{} + m *Mux_kubernetes1321 + id int64 +} + +func (mw *muxWatcher_kubernetes1321) Stop() { + mw.m.stopWatching(mw.id) +} + +type Mux_kubernetes1321 struct { + lock sync.Mutex + watchers map[int64]*muxWatcher_kubernetes1321 +} + +func NewMux_kubernetes1321() *Mux_kubernetes1321 { + m := &Mux_kubernetes1321{ + watchers: map[int64]*muxWatcher_kubernetes1321{}, + } + go m.loop() // G2 + return m +} + +func (m *Mux_kubernetes1321) Watch() *muxWatcher_kubernetes1321 { + mw := &muxWatcher_kubernetes1321{ + result: make(chan struct{}), + m: m, + id: int64(len(m.watchers)), + } + m.watchers[mw.id] = mw + runtime.Gosched() + return mw +} + +func (m *Mux_kubernetes1321) loop() { + for i := 0; i < 100; i++ { + m.distribute() + } +} + +func (m *Mux_kubernetes1321) distribute() { + m.lock.Lock() + defer m.lock.Unlock() + for _, w := range m.watchers { + w.result <- struct{}{} + runtime.Gosched() + } +} + +func (m *Mux_kubernetes1321) stopWatching(id int64) { + m.lock.Lock() + defer m.lock.Unlock() + w, ok := m.watchers[id] + if !ok { + return + } + delete(m.watchers, id) + close(w.result) +} + +func testMuxWatcherClose_kubernetes1321() { + m := NewMux_kubernetes1321() + m.watchers[m.Watch().id].Stop() +} + +func Kubernetes1321() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 1000; i++ { + go testMuxWatcherClose_kubernetes1321() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go new file mode 100644 index 00000000000000..323cb236c0687a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go @@ -0,0 +1,72 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/25331 + * Buggy version: 5dd087040bb13434f1ddf2f0693d0203c30f28cb + * fix commit-id: 97f4647dc3d8cf46c2b66b89a31c758a6edfb57c + * Flaky: 100/100 + */ +package main + +import ( + "context" + "errors" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Kubernetes25331", Kubernetes25331) +} + +type watchChan_kubernetes25331 struct { + ctx context.Context + cancel context.CancelFunc + resultChan chan bool + errChan chan error +} + +func (wc *watchChan_kubernetes25331) Stop() { + wc.errChan <- errors.New("Error") + wc.cancel() +} + +func (wc *watchChan_kubernetes25331) run() { + select { + case err := <-wc.errChan: + errResult := len(err.Error()) != 0 + wc.cancel() // Removed in fix + wc.resultChan <- errResult + case <-wc.ctx.Done(): + } +} + +func NewWatchChan_kubernetes25331() *watchChan_kubernetes25331 { + ctx, cancel := context.WithCancel(context.Background()) + return &watchChan_kubernetes25331{ + ctx: ctx, + cancel: cancel, + resultChan: make(chan bool), + errChan: make(chan error), + } +} + +func Kubernetes25331() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + wc := NewWatchChan_kubernetes25331() + go wc.run() // G1 + go wc.Stop() // G2 + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go new file mode 100644 index 00000000000000..38e53cf4ad7218 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes26980", Kubernetes26980) +} + +type processorListener_kubernetes26980 struct { + lock sync.RWMutex + cond sync.Cond + + pendingNotifications []interface{} +} + +func (p *processorListener_kubernetes26980) add(notification interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + + p.pendingNotifications = append(p.pendingNotifications, notification) + p.cond.Broadcast() +} + +func (p *processorListener_kubernetes26980) pop(stopCh <-chan struct{}) { + p.lock.Lock() + runtime.Gosched() + defer p.lock.Unlock() + for { + for len(p.pendingNotifications) == 0 { + select { + case <-stopCh: + return + default: + } + p.cond.Wait() + } + select { + case <-stopCh: + return + } + } +} + +func newProcessListener_kubernetes26980() *processorListener_kubernetes26980 { + ret := &processorListener_kubernetes26980{ + pendingNotifications: []interface{}{}, + } + ret.cond.L = &ret.lock + return ret +} +func Kubernetes26980() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 3000; i++ { + go func() { + pl := newProcessListener_kubernetes26980() + stopCh := make(chan struct{}) + defer close(stopCh) + pl.add(1) + runtime.Gosched() + go pl.pop(stopCh) + + resultCh := make(chan struct{}) + go func() { + pl.lock.Lock() + close(resultCh) + }() + runtime.Gosched() + <-resultCh + pl.lock.Unlock() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go new file mode 100644 index 00000000000000..00cdcf2b678692 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go @@ -0,0 +1,223 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes30872", Kubernetes30872) +} + +type PopProcessFunc_kubernetes30872 func() + +type ProcessFunc_kubernetes30872 func() + +func Util_kubernetes30872(f func(), stopCh <-chan struct{}) { + JitterUntil_kubernetes30872(f, stopCh) +} + +func JitterUntil_kubernetes30872(f func(), stopCh <-chan struct{}) { + for { + select { + case <-stopCh: + return + default: + } + func() { + f() + }() + } +} + +type Queue_kubernetes30872 interface { + HasSynced() + Pop(PopProcessFunc_kubernetes30872) +} + +type Config_kubernetes30872 struct { + Queue Queue_kubernetes30872 + Process ProcessFunc_kubernetes30872 +} + +type Controller_kubernetes30872 struct { + config Config_kubernetes30872 +} + +func (c *Controller_kubernetes30872) Run(stopCh <-chan struct{}) { + Util_kubernetes30872(c.processLoop, stopCh) +} + +func (c *Controller_kubernetes30872) HasSynced() { + c.config.Queue.HasSynced() +} + +func (c *Controller_kubernetes30872) processLoop() { + c.config.Queue.Pop(PopProcessFunc_kubernetes30872(c.config.Process)) +} + +type ControllerInterface_kubernetes30872 interface { + Run(<-chan struct{}) + HasSynced() +} + +type ResourceEventHandler_kubernetes30872 interface { + OnAdd() +} + +type ResourceEventHandlerFuncs_kubernetes30872 struct { + AddFunc func() +} + +func (r ResourceEventHandlerFuncs_kubernetes30872) OnAdd() { + if r.AddFunc != nil { + r.AddFunc() + } +} + +type informer_kubernetes30872 struct { + controller ControllerInterface_kubernetes30872 + + stopChan chan struct{} +} + +type federatedInformerImpl_kubernetes30872 struct { + sync.Mutex + clusterInformer informer_kubernetes30872 +} + +func (f *federatedInformerImpl_kubernetes30872) ClustersSynced() { + f.Lock() // L1 + defer f.Unlock() + f.clusterInformer.controller.HasSynced() +} + +func (f *federatedInformerImpl_kubernetes30872) addCluster() { + f.Lock() // L1 + defer f.Unlock() +} + +func (f *federatedInformerImpl_kubernetes30872) Start() { + f.Lock() // L1 + defer f.Unlock() + + f.clusterInformer.stopChan = make(chan struct{}) + go f.clusterInformer.controller.Run(f.clusterInformer.stopChan) // G2 + runtime.Gosched() +} + +func (f *federatedInformerImpl_kubernetes30872) Stop() { + f.Lock() // L1 + defer f.Unlock() + close(f.clusterInformer.stopChan) +} + +type DelayingDeliverer_kubernetes30872 struct{} + +func (d *DelayingDeliverer_kubernetes30872) StartWithHandler(handler func()) { + go func() { // G4 + handler() + }() +} + +type FederationView_kubernetes30872 interface { + ClustersSynced() +} + +type FederatedInformer_kubernetes30872 interface { + FederationView_kubernetes30872 + Start() + Stop() +} + +type NamespaceController_kubernetes30872 struct { + namespaceDeliverer *DelayingDeliverer_kubernetes30872 + namespaceFederatedInformer FederatedInformer_kubernetes30872 +} + +func (nc *NamespaceController_kubernetes30872) isSynced() { + nc.namespaceFederatedInformer.ClustersSynced() +} + +func (nc *NamespaceController_kubernetes30872) reconcileNamespace() { + nc.isSynced() +} + +func (nc *NamespaceController_kubernetes30872) Run(stopChan <-chan struct{}) { + nc.namespaceFederatedInformer.Start() + go func() { // G3 + <-stopChan + nc.namespaceFederatedInformer.Stop() + }() + nc.namespaceDeliverer.StartWithHandler(func() { + nc.reconcileNamespace() + }) +} + +type DeltaFIFO_kubernetes30872 struct { + lock sync.RWMutex +} + +func (f *DeltaFIFO_kubernetes30872) HasSynced() { + f.lock.Lock() // L2 + defer f.lock.Unlock() +} + +func (f *DeltaFIFO_kubernetes30872) Pop(process PopProcessFunc_kubernetes30872) { + f.lock.Lock() // L2 + defer f.lock.Unlock() + process() +} + +func NewFederatedInformer_kubernetes30872() FederatedInformer_kubernetes30872 { + federatedInformer := &federatedInformerImpl_kubernetes30872{} + federatedInformer.clusterInformer.controller = NewInformer_kubernetes30872( + ResourceEventHandlerFuncs_kubernetes30872{ + AddFunc: func() { + federatedInformer.addCluster() + }, + }) + return federatedInformer +} + +func NewInformer_kubernetes30872(h ResourceEventHandler_kubernetes30872) *Controller_kubernetes30872 { + fifo := &DeltaFIFO_kubernetes30872{} + cfg := &Config_kubernetes30872{ + Queue: fifo, + Process: func() { + h.OnAdd() + }, + } + return &Controller_kubernetes30872{config: *cfg} +} + +func NewNamespaceController_kubernetes30872() *NamespaceController_kubernetes30872 { + nc := &NamespaceController_kubernetes30872{} + nc.namespaceDeliverer = &DelayingDeliverer_kubernetes30872{} + nc.namespaceFederatedInformer = NewFederatedInformer_kubernetes30872() + return nc +} + +func Kubernetes30872() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { // G1 + namespaceController := NewNamespaceController_kubernetes30872() + stop := make(chan struct{}) + namespaceController.Run(stop) + close(stop) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go new file mode 100644 index 00000000000000..27020d580493bf --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go @@ -0,0 +1,83 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Kubernetes38669", Kubernetes38669) +} + +type Event_kubernetes38669 int +type watchCacheEvent_kubernetes38669 int + +type cacheWatcher_kubernetes38669 struct { + sync.Mutex + input chan watchCacheEvent_kubernetes38669 + result chan Event_kubernetes38669 + stopped bool +} + +func (c *cacheWatcher_kubernetes38669) process(initEvents []watchCacheEvent_kubernetes38669) { + for _, event := range initEvents { + c.sendWatchCacheEvent(&event) + } + defer close(c.result) + defer c.Stop() + for { + _, ok := <-c.input + if !ok { + return + } + } +} + +func (c *cacheWatcher_kubernetes38669) sendWatchCacheEvent(event *watchCacheEvent_kubernetes38669) { + c.result <- Event_kubernetes38669(*event) +} + +func (c *cacheWatcher_kubernetes38669) Stop() { + c.stop() +} + +func (c *cacheWatcher_kubernetes38669) stop() { + c.Lock() + defer c.Unlock() + if !c.stopped { + c.stopped = true + close(c.input) + } +} + +func newCacheWatcher_kubernetes38669(chanSize int, initEvents []watchCacheEvent_kubernetes38669) *cacheWatcher_kubernetes38669 { + watcher := &cacheWatcher_kubernetes38669{ + input: make(chan watchCacheEvent_kubernetes38669, chanSize), + result: make(chan Event_kubernetes38669, chanSize), + stopped: false, + } + go watcher.process(initEvents) + return watcher +} + +func Kubernetes38669() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + initEvents := []watchCacheEvent_kubernetes38669{1, 2} + w := newCacheWatcher_kubernetes38669(0, initEvents) + w.Stop() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go new file mode 100644 index 00000000000000..fd51484a0f773b --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go @@ -0,0 +1,68 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/5316 + * Buggy version: c868b0bbf09128960bc7c4ada1a77347a464d876 + * fix commit-id: cc3a433a7abc89d2f766d4c87eaae9448e3dc091 + * Flaky: 100/100 + */ + +package main + +import ( + "errors" + "math/rand" + "os" + "runtime" + "runtime/pprof" + "time" +) + +func init() { + register("Kubernetes5316", Kubernetes5316) +} + +func finishRequest_kubernetes5316(timeout time.Duration, fn func() error) { + ch := make(chan bool) + errCh := make(chan error) + go func() { // G2 + if err := fn(); err != nil { + errCh <- err + } else { + ch <- true + } + }() + + select { + case <-ch: + case <-errCh: + case <-time.After(timeout): + } +} + +func Kubernetes5316() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Wait a bit because the child goroutine relies on timed operations. + time.Sleep(100 * time.Millisecond) + + // Yield several times to allow the child goroutine to run + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + fn := func() error { + time.Sleep(2 * time.Millisecond) + if rand.Intn(10) > 5 { + return errors.New("Error") + } + return nil + } + go finishRequest_kubernetes5316(time.Microsecond, fn) // G1 + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go new file mode 100644 index 00000000000000..0ca707e981693a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go @@ -0,0 +1,108 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Tag: Reproduce misbehavior + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/58107 + * Buggy version: 2f17d782eb2772d6401da7ddced9ac90656a7a79 + * fix commit-id: 010a127314a935d8d038f8dd4559fc5b249813e4 + * Flaky: 53/100 + */ + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes58107", Kubernetes58107) +} + +type RateLimitingInterface_kubernetes58107 interface { + Get() + Put() +} + +type Type_kubernetes58107 struct { + cond *sync.Cond +} + +func (q *Type_kubernetes58107) Get() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.cond.Wait() +} + +func (q *Type_kubernetes58107) Put() { + q.cond.Signal() +} + +type ResourceQuotaController_kubernetes58107 struct { + workerLock sync.RWMutex + queue RateLimitingInterface_kubernetes58107 + missingUsageQueue RateLimitingInterface_kubernetes58107 +} + +func (rq *ResourceQuotaController_kubernetes58107) worker(queue RateLimitingInterface_kubernetes58107, _ string) { + workFunc := func() bool { + rq.workerLock.RLock() + defer rq.workerLock.RUnlock() + queue.Get() + return true + } + for { + if quit := workFunc(); quit { + return + } + } +} + +func (rq *ResourceQuotaController_kubernetes58107) Run() { + go rq.worker(rq.queue, "G1") // G3 + go rq.worker(rq.missingUsageQueue, "G2") // G4 +} + +func (rq *ResourceQuotaController_kubernetes58107) Sync() { + for i := 0; i < 100000; i++ { + rq.workerLock.Lock() + runtime.Gosched() + rq.workerLock.Unlock() + } +} + +func (rq *ResourceQuotaController_kubernetes58107) HelperSignals() { + for i := 0; i < 100000; i++ { + rq.queue.Put() + rq.missingUsageQueue.Put() + } +} + +func startResourceQuotaController_kubernetes58107() { + resourceQuotaController := &ResourceQuotaController_kubernetes58107{ + queue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})}, + missingUsageQueue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})}, + } + + go resourceQuotaController.Run() // G2 + go resourceQuotaController.Sync() // G5 + resourceQuotaController.HelperSignals() +} + +func Kubernetes58107() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(1000 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go startResourceQuotaController_kubernetes58107() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go new file mode 100644 index 00000000000000..0d07ebc4a9c451 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go @@ -0,0 +1,120 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/62464 + * Buggy version: a048ca888ad27367b1a7b7377c67658920adbf5d + * fix commit-id: c1b19fce903675b82e9fdd1befcc5f5d658bfe78 + * Flaky: 8/100 + */ + +package main + +import ( + "math/rand" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes62464", Kubernetes62464) +} + +type State_kubernetes62464 interface { + GetCPUSetOrDefault() + GetCPUSet() bool + GetDefaultCPUSet() + SetDefaultCPUSet() +} + +type stateMemory_kubernetes62464 struct { + sync.RWMutex +} + +func (s *stateMemory_kubernetes62464) GetCPUSetOrDefault() { + s.RLock() + defer s.RUnlock() + if ok := s.GetCPUSet(); ok { + return + } + s.GetDefaultCPUSet() +} + +func (s *stateMemory_kubernetes62464) GetCPUSet() bool { + runtime.Gosched() + s.RLock() + defer s.RUnlock() + + if rand.Intn(10) > 5 { + return true + } + return false +} + +func (s *stateMemory_kubernetes62464) GetDefaultCPUSet() { + s.RLock() + defer s.RUnlock() +} + +func (s *stateMemory_kubernetes62464) SetDefaultCPUSet() { + s.Lock() + runtime.Gosched() + defer s.Unlock() +} + +type staticPolicy_kubernetes62464 struct{} + +func (p *staticPolicy_kubernetes62464) RemoveContainer(s State_kubernetes62464) { + s.GetDefaultCPUSet() + s.SetDefaultCPUSet() +} + +type manager_kubernetes62464 struct { + state *stateMemory_kubernetes62464 +} + +func (m *manager_kubernetes62464) reconcileState() { + m.state.GetCPUSetOrDefault() +} + +func NewPolicyAndManager_kubernetes62464() (*staticPolicy_kubernetes62464, *manager_kubernetes62464) { + s := &stateMemory_kubernetes62464{} + m := &manager_kubernetes62464{s} + p := &staticPolicy_kubernetes62464{} + return p, m +} + +/// +/// G1 G2 +/// m.reconcileState() +/// m.state.GetCPUSetOrDefault() +/// s.RLock() +/// s.GetCPUSet() +/// p.RemoveContainer() +/// s.GetDefaultCPUSet() +/// s.SetDefaultCPUSet() +/// s.Lock() +/// s.RLock() +/// ---------------------G1,G2 deadlock--------------------- +/// + +func Kubernetes62464() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + p, m := NewPolicyAndManager_kubernetes62464() + go m.reconcileState() + go p.RemoveContainer(m.state) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go new file mode 100644 index 00000000000000..a3af3b24ae81ac --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go @@ -0,0 +1,86 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/6632 + * Buggy version: e597b41d939573502c8dda1dde7bf3439325fb5d + * fix commit-id: 82afb7ab1fe12cf2efceede2322d082eaf5d5adc + * Flaky: 4/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes6632", Kubernetes6632) +} + +type Connection_kubernetes6632 struct { + closeChan chan bool +} + +type idleAwareFramer_kubernetes6632 struct { + resetChan chan bool + writeLock sync.Mutex + conn *Connection_kubernetes6632 +} + +func (i *idleAwareFramer_kubernetes6632) monitor() { + var resetChan = i.resetChan +Loop: + for { + select { + case <-i.conn.closeChan: + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + break Loop + } + } +} + +func (i *idleAwareFramer_kubernetes6632) WriteFrame() { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return + } + i.resetChan <- true +} + +func NewIdleAwareFramer_kubernetes6632() *idleAwareFramer_kubernetes6632 { + return &idleAwareFramer_kubernetes6632{ + resetChan: make(chan bool), + conn: &Connection_kubernetes6632{ + closeChan: make(chan bool), + }, + } +} + +func Kubernetes6632() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + i := NewIdleAwareFramer_kubernetes6632() + + go func() { // helper goroutine + i.conn.closeChan <- true + }() + go i.monitor() // G1 + go i.WriteFrame() // G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go new file mode 100644 index 00000000000000..ae02ec83040b99 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go @@ -0,0 +1,97 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Kubernetes70277", Kubernetes70277) +} + +type WaitFunc_kubernetes70277 func(done <-chan struct{}) <-chan struct{} + +type ConditionFunc_kubernetes70277 func() (done bool, err error) + +func WaitFor_kubernetes70277(wait WaitFunc_kubernetes70277, fn ConditionFunc_kubernetes70277, done <-chan struct{}) error { + c := wait(done) + for { + _, open := <-c + ok, err := fn() + if err != nil { + return err + } + if ok { + return nil + } + if !open { + break + } + } + return nil +} + +func poller_kubernetes70277(interval, timeout time.Duration) WaitFunc_kubernetes70277 { + return WaitFunc_kubernetes70277(func(done <-chan struct{}) <-chan struct{} { + ch := make(chan struct{}) + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + for { + select { + case <-tick.C: + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-done: + return + } + } + }() + + return ch + }) +} + +func Kubernetes70277() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 1000; i++ { + go func() { + stopCh := make(chan struct{}) + defer close(stopCh) + waitFunc := poller_kubernetes70277(time.Millisecond, 80*time.Millisecond) + var doneCh <-chan struct{} + + WaitFor_kubernetes70277(func(done <-chan struct{}) <-chan struct{} { + doneCh = done + return waitFunc(done) + }, func() (bool, error) { + time.Sleep(10 * time.Millisecond) + return true, nil + }, stopCh) + + <-doneCh // block here + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/main.go b/src/runtime/testdata/testgoroutineleakprofile/goker/main.go new file mode 100644 index 00000000000000..5787c1e2b2bb90 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/main.go @@ -0,0 +1,39 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "os" + +// The number of times the main (profiling) goroutine should yield +// in order to allow the leaking goroutines to get stuck. +const yieldCount = 10 + +var cmds = map[string]func(){} + +func register(name string, f func()) { + if cmds[name] != nil { + panic("duplicate registration: " + name) + } + cmds[name] = f +} + +func registerInit(name string, f func()) { + if len(os.Args) >= 2 && os.Args[1] == name { + f() + } +} + +func main() { + if len(os.Args) < 2 { + println("usage: " + os.Args[0] + " name-of-test") + return + } + f := cmds[os.Args[1]] + if f == nil { + println("unknown function: " + os.Args[1]) + return + } + f() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go new file mode 100644 index 00000000000000..884d077550f6b8 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go @@ -0,0 +1,68 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/17176 + * Buggy version: d295dc66521e2734390473ec1f1da8a73ad3288a + * fix commit-id: 2f16895ee94848e2d8ad72bc01968b4c88d84cb8 + * Flaky: 100/100 + */ +package main + +import ( + "errors" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby17176", Moby17176) +} + +type DeviceSet_moby17176 struct { + sync.Mutex + nrDeletedDevices int +} + +func (devices *DeviceSet_moby17176) cleanupDeletedDevices() error { + devices.Lock() + if devices.nrDeletedDevices == 0 { + /// Missing devices.Unlock() + return nil + } + devices.Unlock() + return errors.New("Error") +} + +func testDevmapperLockReleasedDeviceDeletion_moby17176() { + ds := &DeviceSet_moby17176{ + nrDeletedDevices: 0, + } + ds.cleanupDeletedDevices() + doneChan := make(chan bool) + go func() { + ds.Lock() + defer ds.Unlock() + doneChan <- true + }() + + select { + case <-time.After(time.Millisecond): + case <-doneChan: + } +} +func Moby17176() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go testDevmapperLockReleasedDeviceDeletion_moby17176() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go new file mode 100644 index 00000000000000..36017a9488cc06 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go @@ -0,0 +1,146 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/21233 + * Buggy version: cc12d2bfaae135e63b1f962ad80e6943dd995337 + * fix commit-id: 2f4aa9658408ac72a598363c6e22eadf93dbb8a7 + * Flaky:100/100 + */ +package main + +import ( + "math/rand" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby21233", Moby21233) +} + +type Progress_moby21233 struct{} + +type Output_moby21233 interface { + WriteProgress(Progress_moby21233) error +} + +type chanOutput_moby21233 chan<- Progress_moby21233 + +type TransferManager_moby21233 struct { + mu sync.Mutex +} + +type Transfer_moby21233 struct { + mu sync.Mutex +} + +type Watcher_moby21233 struct { + signalChan chan struct{} + releaseChan chan struct{} + running chan struct{} +} + +func ChanOutput_moby21233(progressChan chan<- Progress_moby21233) Output_moby21233 { + return chanOutput_moby21233(progressChan) +} +func (out chanOutput_moby21233) WriteProgress(p Progress_moby21233) error { + out <- p + return nil +} +func NewTransferManager_moby21233() *TransferManager_moby21233 { + return &TransferManager_moby21233{} +} +func NewTransfer_moby21233() *Transfer_moby21233 { + return &Transfer_moby21233{} +} +func (t *Transfer_moby21233) Release(watcher *Watcher_moby21233) { + t.mu.Lock() + t.mu.Unlock() + close(watcher.releaseChan) + <-watcher.running +} +func (t *Transfer_moby21233) Watch(progressOutput Output_moby21233) *Watcher_moby21233 { + t.mu.Lock() + defer t.mu.Unlock() + lastProgress := Progress_moby21233{} + w := &Watcher_moby21233{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + go func() { // G2 + defer func() { + close(w.running) + }() + done := false + for { + t.mu.Lock() + t.mu.Unlock() + if rand.Int31n(2) >= 1 { + progressOutput.WriteProgress(lastProgress) + } + if done { + return + } + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + } + } + }() + return w +} +func (tm *TransferManager_moby21233) Transfer(progressOutput Output_moby21233) (*Transfer_moby21233, *Watcher_moby21233) { + tm.mu.Lock() + defer tm.mu.Unlock() + t := NewTransfer_moby21233() + return t, t.Watch(progressOutput) +} + +func testTransfer_moby21233() { // G1 + tm := NewTransferManager_moby21233() + progressChan := make(chan Progress_moby21233) + progressDone := make(chan struct{}) + go func() { // G3 + time.Sleep(1 * time.Millisecond) + for p := range progressChan { /// Chan consumer + if rand.Int31n(2) >= 1 { + return + } + _ = p + } + close(progressDone) + }() + time.Sleep(1 * time.Millisecond) + ids := []string{"id1", "id2", "id3"} + xrefs := make([]*Transfer_moby21233, len(ids)) + watchers := make([]*Watcher_moby21233, len(ids)) + for i := range ids { + xrefs[i], watchers[i] = tm.Transfer(ChanOutput_moby21233(progressChan)) /// Chan producer + time.Sleep(2 * time.Millisecond) + } + + for i := range xrefs { + xrefs[i].Release(watchers[i]) + } + + close(progressChan) + <-progressDone +} + +func Moby21233() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go testTransfer_moby21233() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go new file mode 100644 index 00000000000000..d653731f6caea8 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go @@ -0,0 +1,59 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/25384 + * Buggy version: 58befe3081726ef74ea09198cd9488fb42c51f51 + * fix commit-id: 42360d164b9f25fb4b150ef066fcf57fa39559a7 + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Moby25348", Moby25348) +} + +type plugin_moby25348 struct{} + +type Manager_moby25348 struct { + plugins []*plugin_moby25348 +} + +func (pm *Manager_moby25348) init() { + var group sync.WaitGroup + group.Add(len(pm.plugins)) + for _, p := range pm.plugins { + go func(p *plugin_moby25348) { + defer group.Done() + }(p) + group.Wait() // Block here + } +} + +func Moby25348() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + p1 := &plugin_moby25348{} + p2 := &plugin_moby25348{} + pm := &Manager_moby25348{ + plugins: []*plugin_moby25348{p1, p2}, + } + go pm.init() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go new file mode 100644 index 00000000000000..7b3398fd381210 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go @@ -0,0 +1,242 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/27782 + * Buggy version: 18768fdc2e76ec6c600c8ab57d2d487ee7877794 + * fix commit-id: a69a59ffc7e3d028a72d1195c2c1535f447eaa84 + * Flaky: 2/100 + */ +package main + +import ( + "errors" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby27782", Moby27782) +} + +type Event_moby27782 struct { + Op Op_moby27782 +} + +type Op_moby27782 uint32 + +const ( + Create_moby27782 Op_moby27782 = 1 << iota + Write_moby27782 + Remove_moby27782 + Rename_moby27782 + Chmod_moby27782 +) + +func newEvent(op Op_moby27782) Event_moby27782 { + return Event_moby27782{op} +} + +func (e *Event_moby27782) ignoreLinux(w *Watcher_moby27782) bool { + if e.Op != Write_moby27782 { + w.mu.Lock() + defer w.mu.Unlock() + w.cv.Broadcast() + return true + } + runtime.Gosched() + return false +} + +type Watcher_moby27782 struct { + Events chan Event_moby27782 + mu sync.Mutex // L1 + cv *sync.Cond // C1 + done chan struct{} +} + +func NewWatcher_moby27782() *Watcher_moby27782 { + w := &Watcher_moby27782{ + Events: make(chan Event_moby27782), + done: make(chan struct{}), + } + w.cv = sync.NewCond(&w.mu) + go w.readEvents() // G3 + return w +} + +func (w *Watcher_moby27782) readEvents() { + defer close(w.Events) + for { + if w.isClosed() { + return + } + event := newEvent(Write_moby27782) // MODIFY event + if !event.ignoreLinux(w) { + runtime.Gosched() + select { + case w.Events <- event: + case <-w.done: + return + } + } + } +} + +func (w *Watcher_moby27782) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +func (w *Watcher_moby27782) Close() { + if w.isClosed() { + return + } + close(w.done) +} + +func (w *Watcher_moby27782) Remove() { + w.mu.Lock() + defer w.mu.Unlock() + exists := true + for exists { + w.cv.Wait() + runtime.Gosched() + } +} + +type FileWatcher_moby27782 interface { + Events() <-chan Event_moby27782 + Remove() + Close() +} + +func New_moby27782() FileWatcher_moby27782 { + return NewEventWatcher_moby27782() +} + +func NewEventWatcher_moby27782() FileWatcher_moby27782 { + return &fsNotifyWatcher_moby27782{NewWatcher_moby27782()} +} + +type fsNotifyWatcher_moby27782 struct { + *Watcher_moby27782 +} + +func (w *fsNotifyWatcher_moby27782) Events() <-chan Event_moby27782 { + return w.Watcher_moby27782.Events +} + +func watchFile_moby27782() FileWatcher_moby27782 { + fileWatcher := New_moby27782() + return fileWatcher +} + +type LogWatcher_moby27782 struct { + closeOnce sync.Once + closeNotifier chan struct{} +} + +func (w *LogWatcher_moby27782) Close() { + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +func (w *LogWatcher_moby27782) WatchClose() <-chan struct{} { + return w.closeNotifier +} + +func NewLogWatcher_moby27782() *LogWatcher_moby27782 { + return &LogWatcher_moby27782{ + closeNotifier: make(chan struct{}), + } +} + +func followLogs_moby27782(logWatcher *LogWatcher_moby27782) { + fileWatcher := watchFile_moby27782() + defer func() { + fileWatcher.Close() + }() + waitRead := func() { + runtime.Gosched() + select { + case <-fileWatcher.Events(): + case <-logWatcher.WatchClose(): + fileWatcher.Remove() + return + } + } + handleDecodeErr := func() { + waitRead() + } + handleDecodeErr() +} + +type Container_moby27782 struct { + LogDriver *JSONFileLogger_moby27782 +} + +func (container *Container_moby27782) InitializeStdio() { + if err := container.startLogging(); err != nil { + container.Reset() + } +} + +func (container *Container_moby27782) startLogging() error { + l := &JSONFileLogger_moby27782{ + readers: make(map[*LogWatcher_moby27782]struct{}), + } + container.LogDriver = l + l.ReadLogs() + return errors.New("Some error") +} + +func (container *Container_moby27782) Reset() { + if container.LogDriver != nil { + container.LogDriver.Close() + } +} + +type JSONFileLogger_moby27782 struct { + readers map[*LogWatcher_moby27782]struct{} +} + +func (l *JSONFileLogger_moby27782) ReadLogs() *LogWatcher_moby27782 { + logWatcher := NewLogWatcher_moby27782() + go l.readLogs(logWatcher) // G2 + return logWatcher +} + +func (l *JSONFileLogger_moby27782) readLogs(logWatcher *LogWatcher_moby27782) { + l.readers[logWatcher] = struct{}{} + followLogs_moby27782(logWatcher) +} + +func (l *JSONFileLogger_moby27782) Close() { + for r := range l.readers { + r.Close() + delete(l.readers, r) + } +} + +func Moby27782() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 10000; i++ { + go (&Container_moby27782{}).InitializeStdio() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go new file mode 100644 index 00000000000000..56467e0b56f7cc --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go @@ -0,0 +1,125 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/28462 + * Buggy version: b184bdabf7a01c4b802304ac64ac133743c484be + * fix commit-id: 89b123473774248fc3a0356dd3ce5b116cc69b29 + * Flaky: 69/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby28462", Moby28462) +} + +type State_moby28462 struct { + Health *Health_moby28462 +} + +type Container_moby28462 struct { + sync.Mutex + State *State_moby28462 +} + +func (ctr *Container_moby28462) start() { + go ctr.waitExit() +} +func (ctr *Container_moby28462) waitExit() { + +} + +type Store_moby28462 struct { + ctr *Container_moby28462 +} + +func (s *Store_moby28462) Get() *Container_moby28462 { + return s.ctr +} + +type Daemon_moby28462 struct { + containers Store_moby28462 +} + +func (d *Daemon_moby28462) StateChanged() { + c := d.containers.Get() + c.Lock() + d.updateHealthMonitorElseBranch(c) + defer c.Unlock() +} + +func (d *Daemon_moby28462) updateHealthMonitorIfBranch(c *Container_moby28462) { + h := c.State.Health + if stop := h.OpenMonitorChannel(); stop != nil { + go monitor_moby28462(c, stop) + } +} +func (d *Daemon_moby28462) updateHealthMonitorElseBranch(c *Container_moby28462) { + h := c.State.Health + h.CloseMonitorChannel() +} + +type Health_moby28462 struct { + stop chan struct{} +} + +func (s *Health_moby28462) OpenMonitorChannel() chan struct{} { + return s.stop +} + +func (s *Health_moby28462) CloseMonitorChannel() { + if s.stop != nil { + s.stop <- struct{}{} + } +} + +func monitor_moby28462(c *Container_moby28462, stop chan struct{}) { + for { + select { + case <-stop: + return + default: + handleProbeResult_moby28462(c) + } + } +} + +func handleProbeResult_moby28462(c *Container_moby28462) { + runtime.Gosched() + c.Lock() + defer c.Unlock() +} + +func NewDaemonAndContainer_moby28462() (*Daemon_moby28462, *Container_moby28462) { + c := &Container_moby28462{ + State: &State_moby28462{&Health_moby28462{make(chan struct{})}}, + } + d := &Daemon_moby28462{Store_moby28462{c}} + return d, c +} + +func Moby28462() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + d, c := NewDaemonAndContainer_moby28462() + go monitor_moby28462(c, c.State.Health.OpenMonitorChannel()) // G1 + go d.StateChanged() // G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go new file mode 100644 index 00000000000000..561e2faf576ab0 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go @@ -0,0 +1,67 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Moby30408", Moby30408) +} + +type Manifest_moby30408 struct { + Implements []string +} + +type Plugin_moby30408 struct { + activateWait *sync.Cond + activateErr error + Manifest *Manifest_moby30408 +} + +func (p *Plugin_moby30408) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin_moby30408) activated() bool { + return p.Manifest != nil +} + +func testActive_moby30408(p *Plugin_moby30408) { + done := make(chan struct{}) + go func() { // G2 + p.waitActive() + close(done) + }() + <-done +} + +func Moby30408() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { // G1 + p := &Plugin_moby30408{activateWait: sync.NewCond(&sync.Mutex{})} + p.activateErr = errors.New("some junk happened") + + testActive_moby30408(p) + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go new file mode 100644 index 00000000000000..4be50546e9be2c --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go @@ -0,0 +1,71 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/33781 + * Buggy version: 33fd3817b0f5ca4b87f0a75c2bd583b4425d392b + * fix commit-id: 67297ba0051d39be544009ba76abea14bc0be8a4 + * Flaky: 25/100 + */ + +package main + +import ( + "context" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Moby33781", Moby33781) +} + +func monitor_moby33781(stop chan bool) { + probeInterval := time.Millisecond + probeTimeout := time.Millisecond + for { + select { + case <-stop: + return + case <-time.After(probeInterval): + results := make(chan bool) + ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) + go func() { // G3 + results <- true + close(results) + }() + select { + case <-stop: + // results should be drained here + cancelProbe() + return + case <-results: + cancelProbe() + case <-ctx.Done(): + cancelProbe() + <-results + } + } + } +} + +func Moby33781() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go func(i int) { + stop := make(chan bool) + go monitor_moby33781(stop) // G1 + go func() { // G2 + time.Sleep(time.Duration(i) * time.Millisecond) + stop <- true + }() + }(i) + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go new file mode 100644 index 00000000000000..577c81651a0668 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/36114 + * Buggy version: 6d4d3c52ae7c3f910bfc7552a2a673a8338e5b9f + * fix commit-id: a44fcd3d27c06aaa60d8d1cbce169f0d982e74b1 + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby36114", Moby36114) +} + +type serviceVM_moby36114 struct { + sync.Mutex +} + +func (svm *serviceVM_moby36114) hotAddVHDsAtStart() { + svm.Lock() + defer svm.Unlock() + svm.hotRemoveVHDsAtStart() +} + +func (svm *serviceVM_moby36114) hotRemoveVHDsAtStart() { + svm.Lock() + defer svm.Unlock() +} + +func Moby36114() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + s := &serviceVM_moby36114{} + go s.hotAddVHDsAtStart() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go new file mode 100644 index 00000000000000..7f0497648cf606 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go @@ -0,0 +1,101 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/4951 + * Buggy version: 81f148be566ab2b17810ad4be61a5d8beac8330f + * fix commit-id: 2ffef1b7eb618162673c6ffabccb9ca57c7dfce3 + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby4951", Moby4951) +} + +type DeviceSet_moby4951 struct { + sync.Mutex + infos map[string]*DevInfo_moby4951 + nrDeletedDevices int +} + +func (devices *DeviceSet_moby4951) DeleteDevice(hash string) { + devices.Lock() + defer devices.Unlock() + + info := devices.lookupDevice(hash) + + info.lock.Lock() + runtime.Gosched() + defer info.lock.Unlock() + + devices.deleteDevice(info) +} + +func (devices *DeviceSet_moby4951) lookupDevice(hash string) *DevInfo_moby4951 { + existing, ok := devices.infos[hash] + if !ok { + return nil + } + return existing +} + +func (devices *DeviceSet_moby4951) deleteDevice(info *DevInfo_moby4951) { + devices.removeDeviceAndWait(info.Name()) +} + +func (devices *DeviceSet_moby4951) removeDeviceAndWait(devname string) { + /// remove devices by devname + devices.Unlock() + time.Sleep(300 * time.Nanosecond) + devices.Lock() +} + +type DevInfo_moby4951 struct { + lock sync.Mutex + name string +} + +func (info *DevInfo_moby4951) Name() string { + return info.name +} + +func NewDeviceSet_moby4951() *DeviceSet_moby4951 { + devices := &DeviceSet_moby4951{ + infos: make(map[string]*DevInfo_moby4951), + } + info1 := &DevInfo_moby4951{ + name: "info1", + } + info2 := &DevInfo_moby4951{ + name: "info2", + } + devices.infos[info1.name] = info1 + devices.infos[info2.name] = info2 + return devices +} + +func Moby4951() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + ds := NewDeviceSet_moby4951() + /// Delete devices by the same info + go ds.DeleteDevice("info1") + go ds.DeleteDevice("info1") + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go new file mode 100644 index 00000000000000..212f65b1f3f2c6 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go @@ -0,0 +1,55 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/7559 + * Buggy version: 64579f51fcb439c36377c0068ccc9a007b368b5a + * fix commit-id: 6cbb8e070d6c3a66bf48fbe5cbf689557eee23db + * Flaky: 100/100 + */ +package main + +import ( + "net" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby7559", Moby7559) +} + +type UDPProxy_moby7559 struct { + connTrackLock sync.Mutex +} + +func (proxy *UDPProxy_moby7559) Run() { + for i := 0; i < 2; i++ { + proxy.connTrackLock.Lock() + _, err := net.DialUDP("udp", nil, nil) + if err != nil { + continue + /// Missing unlock here + } + if i == 0 { + break + } + } + proxy.connTrackLock.Unlock() +} + +func Moby7559() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 20; i++ { + go (&UDPProxy_moby7559{}).Run() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go b/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go new file mode 100644 index 00000000000000..49905315a01b4c --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go @@ -0,0 +1,122 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Serving2137", Serving2137) +} + +type token_serving2137 struct{} + +type request_serving2137 struct { + lock *sync.Mutex + accepted chan bool +} + +type Breaker_serving2137 struct { + pendingRequests chan token_serving2137 + activeRequests chan token_serving2137 +} + +func (b *Breaker_serving2137) Maybe(thunk func()) bool { + var t token_serving2137 + select { + default: + // Pending request queue is full. Report failure. + return false + case b.pendingRequests <- t: + // Pending request has capacity. + // Wait for capacity in the active queue. + b.activeRequests <- t + // Defer releasing capacity in the active and pending request queue. + defer func() { + <-b.activeRequests + runtime.Gosched() + <-b.pendingRequests + }() + // Do the thing. + thunk() + // Report success + return true + } +} + +func (b *Breaker_serving2137) concurrentRequest() request_serving2137 { + r := request_serving2137{lock: &sync.Mutex{}, accepted: make(chan bool, 1)} + r.lock.Lock() + var start sync.WaitGroup + start.Add(1) + go func() { // G2, G3 + start.Done() + runtime.Gosched() + ok := b.Maybe(func() { + // Will block on locked mutex. + r.lock.Lock() + runtime.Gosched() + r.lock.Unlock() + }) + r.accepted <- ok + }() + start.Wait() // Ensure that the go func has had a chance to execute. + return r +} + +// Perform n requests against the breaker, returning mutexes for each +// request which succeeded, and a slice of bools for all requests. +func (b *Breaker_serving2137) concurrentRequests(n int) []request_serving2137 { + requests := make([]request_serving2137, n) + for i := range requests { + requests[i] = b.concurrentRequest() + } + return requests +} + +func NewBreaker_serving2137(queueDepth, maxConcurrency int32) *Breaker_serving2137 { + return &Breaker_serving2137{ + pendingRequests: make(chan token_serving2137, queueDepth+maxConcurrency), + activeRequests: make(chan token_serving2137, maxConcurrency), + } +} + +func unlock_serving2137(req request_serving2137) { + req.lock.Unlock() + runtime.Gosched() + // Verify that function has completed + ok := <-req.accepted + runtime.Gosched() + // Requeue for next usage + req.accepted <- ok +} + +func unlockAll_serving2137(requests []request_serving2137) { + for _, lc := range requests { + unlock_serving2137(lc) + } +} + +func Serving2137() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + b := NewBreaker_serving2137(1, 1) + + locks := b.concurrentRequests(2) // G1 + unlockAll_serving2137(locks) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go new file mode 100644 index 00000000000000..7967db7cfe083f --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Syncthing4829", Syncthing4829) +} + +type Address_syncthing4829 int + +type Mapping_syncthing4829 struct { + mut sync.RWMutex // L2 + + extAddresses map[string]Address_syncthing4829 +} + +func (m *Mapping_syncthing4829) clearAddresses() { + m.mut.Lock() // L2 + var removed []Address_syncthing4829 + for id, addr := range m.extAddresses { + removed = append(removed, addr) + delete(m.extAddresses, id) + } + if len(removed) > 0 { + m.notify(nil, removed) + } + m.mut.Unlock() // L2 +} + +func (m *Mapping_syncthing4829) notify(added, remove []Address_syncthing4829) { + m.mut.RLock() // L2 + m.mut.RUnlock() // L2 +} + +type Service_syncthing4829 struct { + mut sync.RWMutex // L1 + + mappings []*Mapping_syncthing4829 +} + +func (s *Service_syncthing4829) NewMapping() *Mapping_syncthing4829 { + mapping := &Mapping_syncthing4829{ + extAddresses: make(map[string]Address_syncthing4829), + } + s.mut.Lock() // L1 + s.mappings = append(s.mappings, mapping) + s.mut.Unlock() // L1 + return mapping +} + +func (s *Service_syncthing4829) RemoveMapping(mapping *Mapping_syncthing4829) { + s.mut.Lock() // L1 + defer s.mut.Unlock() // L1 + for _, existing := range s.mappings { + if existing == mapping { + mapping.clearAddresses() + } + } +} + +func Syncthing4829() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { // G1 + natSvc := &Service_syncthing4829{} + m := natSvc.NewMapping() + m.extAddresses["test"] = 0 + + natSvc.RemoveMapping(m) + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go new file mode 100644 index 00000000000000..e25494a688dd5c --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go @@ -0,0 +1,103 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Syncthing5795", Syncthing5795) +} + +type message_syncthing5795 interface{} + +type ClusterConfig_syncthing5795 struct{} + +type Model_syncthing5795 interface { + ClusterConfig(message_syncthing5795) +} + +type TestModel_syncthing5795 struct { + ccFn func() +} + +func (t *TestModel_syncthing5795) ClusterConfig(msg message_syncthing5795) { + if t.ccFn != nil { + t.ccFn() + } +} + +type Connection_syncthing5795 interface { + Start() + Close() +} + +type rawConnection_syncthing5795 struct { + receiver Model_syncthing5795 + + inbox chan message_syncthing5795 + dispatcherLoopStopped chan struct{} + closed chan struct{} + closeOnce sync.Once +} + +func (c *rawConnection_syncthing5795) Start() { + go c.dispatcherLoop() // G2 +} + +func (c *rawConnection_syncthing5795) dispatcherLoop() { + defer close(c.dispatcherLoopStopped) + var msg message_syncthing5795 + for { + select { + case msg = <-c.inbox: + case <-c.closed: + return + } + switch msg := msg.(type) { + case *ClusterConfig_syncthing5795: + c.receiver.ClusterConfig(msg) + default: + return + } + } +} + +func (c *rawConnection_syncthing5795) Close() { + c.closeOnce.Do(func() { + close(c.closed) + <-c.dispatcherLoopStopped + }) +} + +func Syncthing5795() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { // G1 + m := &TestModel_syncthing5795{} + c := &rawConnection_syncthing5795{ + dispatcherLoopStopped: make(chan struct{}), + closed: make(chan struct{}), + inbox: make(chan message_syncthing5795), + receiver: m, + } + m.ccFn = c.Close + + c.Start() + c.inbox <- &ClusterConfig_syncthing5795{} + + <-c.dispatcherLoopStopped + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/main.go b/src/runtime/testdata/testgoroutineleakprofile/main.go new file mode 100644 index 00000000000000..5787c1e2b2bb90 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/main.go @@ -0,0 +1,39 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "os" + +// The number of times the main (profiling) goroutine should yield +// in order to allow the leaking goroutines to get stuck. +const yieldCount = 10 + +var cmds = map[string]func(){} + +func register(name string, f func()) { + if cmds[name] != nil { + panic("duplicate registration: " + name) + } + cmds[name] = f +} + +func registerInit(name string, f func()) { + if len(os.Args) >= 2 && os.Args[1] == name { + f() + } +} + +func main() { + if len(os.Args) < 2 { + println("usage: " + os.Args[0] + " name-of-test") + return + } + f := cmds[os.Args[1]] + if f == nil { + println("unknown function: " + os.Args[1]) + return + } + f() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/simple.go b/src/runtime/testdata/testgoroutineleakprofile/simple.go new file mode 100644 index 00000000000000..b8172cd6df8aa7 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/simple.go @@ -0,0 +1,253 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +// This is a set of micro-tests with obvious goroutine leaks that +// ensures goroutine leak detection works. +// +// Tests in this file are not flaky iff. run with GOMAXPROCS=1. +// The main goroutine forcefully yields via `runtime.Gosched()` before +// running the profiler. This moves them to the back of the run queue, +// allowing the leaky goroutines to be scheduled beforehand and get stuck. + +func init() { + register("NilRecv", NilRecv) + register("NilSend", NilSend) + register("SelectNoCases", SelectNoCases) + register("ChanRecv", ChanRecv) + register("ChanSend", ChanSend) + register("Select", Select) + register("WaitGroup", WaitGroup) + register("MutexStack", MutexStack) + register("MutexHeap", MutexHeap) + register("RWMutexRLock", RWMutexRLock) + register("RWMutexLock", RWMutexLock) + register("Cond", Cond) + register("Mixed", Mixed) + register("NoLeakGlobal", NoLeakGlobal) +} + +func NilRecv() { + prof := pprof.Lookup("goroutineleak") + go func() { + var c chan int + <-c + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func NilSend() { + prof := pprof.Lookup("goroutineleak") + go func() { + var c chan int + c <- 0 + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func ChanRecv() { + prof := pprof.Lookup("goroutineleak") + go func() { + <-make(chan int) + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func SelectNoCases() { + prof := pprof.Lookup("goroutineleak") + go func() { + select {} + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func ChanSend() { + prof := pprof.Lookup("goroutineleak") + go func() { + make(chan int) <- 0 + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func Select() { + prof := pprof.Lookup("goroutineleak") + go func() { + select { + case make(chan int) <- 0: + case <-make(chan int): + } + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func WaitGroup() { + prof := pprof.Lookup("goroutineleak") + go func() { + var wg sync.WaitGroup + wg.Add(1) + wg.Wait() + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func MutexStack() { + prof := pprof.Lookup("goroutineleak") + for i := 0; i < 1000; i++ { + go func() { + var mu sync.Mutex + mu.Lock() + mu.Lock() + panic("should not be reached") + }() + } + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func MutexHeap() { + prof := pprof.Lookup("goroutineleak") + for i := 0; i < 1000; i++ { + go func() { + mu := &sync.Mutex{} + go func() { + mu.Lock() + mu.Lock() + panic("should not be reached") + }() + }() + } + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func RWMutexRLock() { + prof := pprof.Lookup("goroutineleak") + go func() { + mu := &sync.RWMutex{} + mu.Lock() + mu.RLock() + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func RWMutexLock() { + prof := pprof.Lookup("goroutineleak") + go func() { + mu := &sync.RWMutex{} + mu.Lock() + mu.Lock() + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func Cond() { + prof := pprof.Lookup("goroutineleak") + go func() { + cond := sync.NewCond(&sync.Mutex{}) + cond.L.Lock() + cond.Wait() + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func Mixed() { + prof := pprof.Lookup("goroutineleak") + go func() { + ch := make(chan int) + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + ch <- 0 + wg.Done() + panic("should not be reached") + }() + wg.Wait() + <-ch + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +var ch = make(chan int) + +// No leak should be reported by this test +func NoLeakGlobal() { + prof := pprof.Lookup("goroutineleak") + go func() { + <-ch + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/stresstests.go b/src/runtime/testdata/testgoroutineleakprofile/stresstests.go new file mode 100644 index 00000000000000..64b535f51c87ef --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/stresstests.go @@ -0,0 +1,89 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "io" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +const spawnGCMaxDepth = 5 + +func init() { + register("SpawnGC", SpawnGC) + register("DaisyChain", DaisyChain) +} + +func spawnGC(i int) { + prof := pprof.Lookup("goroutineleak") + if i == 0 { + return + } + wg := &sync.WaitGroup{} + wg.Add(i + 1) + go func() { + wg.Done() + <-make(chan int) + }() + for j := 0; j < i; j++ { + go func() { + wg.Done() + spawnGC(i - 1) + }() + } + wg.Wait() + runtime.Gosched() + if i == spawnGCMaxDepth { + prof.WriteTo(os.Stdout, 2) + } else { + // We want to concurrently trigger the profile in order to concurrently run + // the GC, but we don't want to stream all the profiles to standard output. + // + // Only output the profile for the root call to spawnGC, and otherwise stream + // the profile outputs to /dev/null to avoid jumbling. + prof.WriteTo(io.Discard, 2) + } +} + +// SpawnGC spawns a tree of goroutine leaks and calls the goroutine leak profiler +// for each node in the tree. It is supposed to stress the goroutine leak profiler +// under a heavily concurrent workload. +func SpawnGC() { + spawnGC(spawnGCMaxDepth) +} + +// DaisyChain spawns a daisy-chain of runnable goroutines. +// +// Each goroutine in the chain creates a new channel and goroutine. +// +// This illustrates a pathological worstcase for the goroutine leak GC complexity, +// as opposed to the regular GC, which is not negatively affected by this pattern. +func DaisyChain() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(time.Second) + prof.WriteTo(os.Stdout, 2) + }() + var chain func(i int, ch chan struct{}) + chain = func(i int, ch chan struct{}) { + if i <= 0 { + go func() { + time.Sleep(time.Hour) + ch <- struct{}{} + }() + return + } + ch2 := make(chan struct{}) + go chain(i-1, ch2) + <-ch2 + ch <- struct{}{} + } + // The channel buffer avoids goroutine leaks. + go chain(1000, make(chan struct{}, 1)) +} diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 53f41bca0b11ff..8882c306edb736 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -1206,6 +1206,7 @@ var gStatusStrings = [...]string{ _Gwaiting: "waiting", _Gdead: "dead", _Gcopystack: "copystack", + _Gleaked: "leaked", _Gpreempted: "preempted", } @@ -1226,7 +1227,7 @@ func goroutineheader(gp *g) { } // Override. - if gpstatus == _Gwaiting && gp.waitreason != waitReasonZero { + if (gpstatus == _Gwaiting || gpstatus == _Gleaked) && gp.waitreason != waitReasonZero { status = gp.waitreason.String() } @@ -1245,6 +1246,9 @@ func goroutineheader(gp *g) { } } print(" [", status) + if gpstatus == _Gleaked { + print(" (leaked)") + } if isScan { print(" (scan)") } diff --git a/src/runtime/tracestatus.go b/src/runtime/tracestatus.go index 03ec81fc0262a1..8b5eafd170f488 100644 --- a/src/runtime/tracestatus.go +++ b/src/runtime/tracestatus.go @@ -122,7 +122,7 @@ func goStatusToTraceGoStatus(status uint32, wr waitReason) tracev2.GoStatus { tgs = tracev2.GoRunning case _Gsyscall: tgs = tracev2.GoSyscall - case _Gwaiting, _Gpreempted: + case _Gwaiting, _Gpreempted, _Gleaked: // There are a number of cases where a G might end up in // _Gwaiting but it's actually running in a non-preemptive // state but needs to present itself as preempted to the From 707454b41fa1fea7e0286b1370dea47d3422b2cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Mart=C3=AD?= Date: Sat, 30 Aug 2025 21:13:12 +0100 Subject: [PATCH 035/152] cmd/go: update `go help mod edit` with the tool and ignore sections The types were added to the docs, but not the fields in GoMod. While here, I was initially confused about what is the top-level type given that `type Module` comes first. Move `type GoMod` to the top as it is the actual top-level type. Change-Id: I1270154837501f5c7f5b21959b2841fd4ac808d0 Reviewed-on: https://go-review.googlesource.com/c/go/+/700116 Reviewed-by: Sean Liao Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Matloob Reviewed-by: Junyang Shao --- src/cmd/go/alldocs.go | 12 +++++++----- src/cmd/go/internal/modcmd/edit.go | 12 +++++++----- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index f1e1b1c333542c..19b48f0579bb29 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -1280,11 +1280,6 @@ // The -json flag prints the final go.mod file in JSON format instead of // writing it back to go.mod. The JSON output corresponds to these Go types: // -// type Module struct { -// Path string -// Version string -// } -// // type GoMod struct { // Module ModPath // Go string @@ -1294,6 +1289,13 @@ // Exclude []Module // Replace []Replace // Retract []Retract +// Tool []Tool +// Ignore []Ignore +// } +// +// type Module struct { +// Path string +// Version string // } // // type ModPath struct { diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go index aafd9752a8e487..041b4432bfd4f0 100644 --- a/src/cmd/go/internal/modcmd/edit.go +++ b/src/cmd/go/internal/modcmd/edit.go @@ -104,11 +104,6 @@ writing it back to go.mod. The -json flag prints the final go.mod file in JSON format instead of writing it back to go.mod. The JSON output corresponds to these Go types: - type Module struct { - Path string - Version string - } - type GoMod struct { Module ModPath Go string @@ -118,6 +113,13 @@ writing it back to go.mod. The JSON output corresponds to these Go types: Exclude []Module Replace []Replace Retract []Retract + Tool []Tool + Ignore []Ignore + } + + type Module struct { + Path string + Version string } type ModPath struct { From f03c392295cfd57c29c92fcc300181f8016cf5ac Mon Sep 17 00:00:00 2001 From: qmuntal Date: Wed, 1 Oct 2025 10:59:53 +0200 Subject: [PATCH 036/152] runtime: fix aix/ppc64 library initialization AIX sets the argc and argv parameters in R14 and R15, but _rt0_ppc64x_lib expects them to be in R3 and R4. Also, call reginit in _rt0_ppc64x_lib. These issues were oversights from CL 706395 which went unnoticed because there if no LUCI aix/ppc64 builder (see #67299). Change-Id: I93a2798739935fbcead3e6162b4b90db7e740aa5 Reviewed-on: https://go-review.googlesource.com/c/go/+/708255 Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Paul Murphy --- src/runtime/asm_ppc64x.s | 3 +++ src/runtime/rt0_aix_ppc64.s | 2 ++ 2 files changed, 5 insertions(+) diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index 3fbf11b5e9a2ad..b42e0b62f850ff 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -21,6 +21,9 @@ TEXT _rt0_ppc64x_lib(SB),NOSPLIT|NOFRAME,$0 MOVD R4, _rt0_ppc64x_lib_argv<>(SB) // Synchronous initialization. + MOVD $runtime·reginit(SB), R12 + MOVD R12, CTR + BL (CTR) MOVD $runtime·libpreinit(SB), R12 MOVD R12, CTR BL (CTR) diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s index 32f8c72156c9ee..518fcb3b88d670 100644 --- a/src/runtime/rt0_aix_ppc64.s +++ b/src/runtime/rt0_aix_ppc64.s @@ -42,4 +42,6 @@ TEXT _main(SB),NOSPLIT,$-8 BR (CTR) TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$0 + MOVD R14, R3 // argc + MOVD R15, R4 // argv JMP _rt0_ppc64x_lib(SB) From 0e4e2e68323df08d9e4c876e5abc5b549bd247f5 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Thu, 2 Oct 2025 19:15:34 +0000 Subject: [PATCH 037/152] runtime: skip TestGoroutineLeakProfile under mayMoreStackPreempt This may be the long-term fix, but we first need to understand if this just makes the tests flaky, or if it's revealing an actual underlying issue. I'm leaning toward the former. If it is the former, ideally we just make the tests robust (wait longer, maybe?). For now, this change will make the longtest builders OK again. For #75729. Change-Id: If9b30107d04a8e5af5670850add3a53f9471eec6 Reviewed-on: https://go-review.googlesource.com/c/go/+/708715 Reviewed-by: Roland Shoemaker Reviewed-by: Michael Pratt Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee --- src/runtime/goroutineleakprofile_test.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/runtime/goroutineleakprofile_test.go b/src/runtime/goroutineleakprofile_test.go index 9857d725deef32..6e26bcab132831 100644 --- a/src/runtime/goroutineleakprofile_test.go +++ b/src/runtime/goroutineleakprofile_test.go @@ -6,12 +6,20 @@ package runtime_test import ( "fmt" + "internal/testenv" + "os" "regexp" "strings" "testing" ) func TestGoroutineLeakProfile(t *testing.T) { + if strings.Contains(os.Getenv("GOFLAGS"), "mayMoreStackPreempt") { + // Some tests have false negatives under mayMoreStackPreempt. This may be a test-only issue, + // but needs more investigation. + testenv.SkipFlaky(t, 75729) + } + // Goroutine leak test case. // // Test cases can be configured with test name, the name of the entry point function, @@ -356,7 +364,7 @@ func TestGoroutineLeakProfile(t *testing.T) { `\(\*Page_hugo5379\)\.initContent\.func1\.1\(.* \[sync\.Mutex\.Lock\]`, `pageRenderer_hugo5379\(.* \[sync\.Mutex\.Lock\]`, `Hugo5379\.func2\(.* \[sync\.WaitGroup\.Wait\]`, - ), + ), makeFlakyTest("Istio16224", `Istio16224\.func2\(.* \[sync\.Mutex\.Lock\]`, `\(\*controller_istio16224\)\.Run\(.* \[chan send\]`, From 4008e07080ef215e46f48e5e2f6b5d37d6d9cb9f Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Thu, 2 Oct 2025 10:45:01 -0700 Subject: [PATCH 038/152] io/fs: move path name documentation up to the package doc comment Perhaps surprisingly to users, io/fs path names are slash-separated. Move the documentation for path names up to the top of the package rather than burying it in the ValidPath documentation. Change-Id: Id338df07c74a16be74c687ac4c45e0513ee40a8c Reviewed-on: https://go-review.googlesource.com/c/go/+/708616 LUCI-TryBot-Result: Go LUCI Reviewed-by: Alan Donovan Auto-Submit: Damien Neil --- src/io/fs/fs.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/io/fs/fs.go b/src/io/fs/fs.go index 8f693f2574a5b6..fca07b818c9a3b 100644 --- a/src/io/fs/fs.go +++ b/src/io/fs/fs.go @@ -6,6 +6,19 @@ // A file system can be provided by the host operating system // but also by other packages. // +// # Path Names +// +// The interfaces in this package all operate on the same +// path name syntax, regardless of the host operating system. +// +// Path names are UTF-8-encoded, +// unrooted, slash-separated sequences of path elements, like “x/y/z”. +// Path names must not contain an element that is “.” or “..” or the empty string, +// except for the special case that the name "." may be used for the root directory. +// Paths must not start or end with a slash: “/x” and “x/” are invalid. +// +// # Testing +// // See the [testing/fstest] package for support with testing // implementations of file systems. package fs @@ -41,16 +54,13 @@ type FS interface { // ValidPath reports whether the given path name // is valid for use in a call to Open. // -// Path names passed to open are UTF-8-encoded, -// unrooted, slash-separated sequences of path elements, like “x/y/z”. -// Path names must not contain an element that is “.” or “..” or the empty string, -// except for the special case that the name "." may be used for the root directory. -// Paths must not start or end with a slash: “/x” and “x/” are invalid. -// // Note that paths are slash-separated on all systems, even Windows. // Paths containing other characters such as backslash and colon // are accepted as valid, but those characters must never be // interpreted by an [FS] implementation as path element separators. +// See the [Path Names] section for more details. +// +// [Path Names]: https://pkg.go.dev/io/fs#hdr-Path_Names func ValidPath(name string) bool { if !utf8.ValidString(name) { return false From bbdff9e8e1fca772a13acb0c4c7828cfe246d403 Mon Sep 17 00:00:00 2001 From: "Nicholas S. Husin" Date: Thu, 2 Oct 2025 12:41:25 -0400 Subject: [PATCH 039/152] net/http: update bundled x/net/http2 and delete obsolete http2inTests http2inTests is no longer needed after go.dev/cl/708135 and should be deleted. To prevent errors in future vendored dependency updates, h2_bundle.go is also updated together in this change. Change-Id: I7b8c3f6854203fab4ec639a2a268df0cd2b1dee7 Reviewed-on: https://go-review.googlesource.com/c/go/+/708595 Reviewed-by: Damien Neil Reviewed-by: Nicholas Husin LUCI-TryBot-Result: Go LUCI --- src/go.mod | 2 +- src/go.sum | 4 +- src/net/http/h2_bundle.go | 415 ++++++++++++++---- src/net/http/http2_test.go | 12 - .../http/internal/httpcommon/httpcommon.go | 4 +- src/net/http/socks_bundle.go | 2 +- .../golang.org/x/net/nettest/conntest.go | 6 +- src/vendor/modules.txt | 2 +- 8 files changed, 336 insertions(+), 111 deletions(-) delete mode 100644 src/net/http/http2_test.go diff --git a/src/go.mod b/src/go.mod index 4b400fe87189d1..f134f0c7b571da 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.26 require ( golang.org/x/crypto v0.42.0 - golang.org/x/net v0.44.0 + golang.org/x/net v0.44.1-0.20251002015445-edb764c2296f ) require ( diff --git a/src/go.sum b/src/go.sum index c90970a9cbaaad..f24bea029a2239 100644 --- a/src/go.sum +++ b/src/go.sum @@ -1,7 +1,7 @@ golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= -golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= -golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/net v0.44.1-0.20251002015445-edb764c2296f h1:vNklv+oJQSYNGsWXHoCPi2MHMcpj9/Q7aBhvvfnJvGg= +golang.org/x/net v0.44.1-0.20251002015445-edb764c2296f/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index 3abd7476f04e73..f09e102efb7b1c 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -1047,6 +1047,7 @@ func http2shouldRetryDial(call *http2dialCall, req *Request) bool { // - If the resulting value is zero or out of range, use a default. type http2http2Config struct { MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool MaxDecoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32 MaxReadFrameSize uint32 @@ -1084,12 +1085,13 @@ func http2configFromServer(h1 *Server, h2 *http2Server) http2http2Config { // (the net/http Transport). func http2configFromTransport(h2 *http2Transport) http2http2Config { conf := http2http2Config{ - MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, - MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, - MaxReadFrameSize: h2.MaxReadFrameSize, - SendPingTimeout: h2.ReadIdleTimeout, - PingTimeout: h2.PingTimeout, - WriteByteTimeout: h2.WriteByteTimeout, + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, } // Unlike most config fields, where out-of-range values revert to the default, @@ -1148,6 +1150,9 @@ func http2fillNetHTTPConfig(conf *http2http2Config, h2 *HTTP2Config) { if h2.MaxConcurrentStreams != 0 { conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) } + if http2http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } if h2.MaxEncoderHeaderTableSize != 0 { conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) } @@ -1183,6 +1188,10 @@ func http2fillNetHTTPConfig(conf *http2http2Config, h2 *HTTP2Config) { } } +func http2http2ConfigStrictMaxConcurrentRequests(h2 *HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} + // Buffer chunks are allocated from a pool to reduce pressure on GC. // The maximum wasted space per dataBuffer is 2x the largest size class, // which happens when the dataBuffer has multiple chunks and there is @@ -1900,7 +1909,7 @@ func (fr *http2Framer) maxHeaderListSize() uint32 { func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -2708,6 +2717,15 @@ type http2PriorityFrame struct { http2PriorityParam } +var http2defaultRFC9218Priority = http2PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + // PriorityParam are the stream prioritzation parameters. type http2PriorityParam struct { // StreamDep is a 31-bit stream identifier for the @@ -2723,6 +2741,20 @@ type http2PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p http2PriorityParam) IsZero() bool { @@ -3423,7 +3455,6 @@ var ( http2VerboseLogs bool http2logFrameWrites bool http2logFrameReads bool - http2inTests bool // Enabling extended CONNECT by causes browsers to attempt to use // WebSockets-over-HTTP/2. This results in problems when the server's websocket @@ -4103,6 +4134,10 @@ type http2Server struct { type http2serverInternalState struct { mu sync.Mutex activeConns map[*http2serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *http2serverInternalState) registerConn(sc *http2serverConn) { @@ -4134,6 +4169,27 @@ func (s *http2serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var http2errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *http2serverInternalState) getErrChan() chan error { + if s == nil { + return http2errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *http2serverInternalState) putErrChan(ch chan error) { + if s == nil { + http2errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -4146,7 +4202,10 @@ func http2ConfigureServer(s *Server, conf *http2Server) error { if conf == nil { conf = new(http2Server) } - conf.state = &http2serverInternalState{activeConns: make(map[*http2serverConn]struct{})} + conf.state = &http2serverInternalState{ + activeConns: make(map[*http2serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -5052,25 +5111,6 @@ func (sc *http2serverConn) readPreface() error { } } -var http2errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -func http2getErrChan() chan error { - if http2inTests { - // Channels cannot be reused across synctest tests. - return make(chan error, 1) - } else { - return http2errChanPool.Get().(chan error) - } -} - -func http2putErrChan(ch chan error) { - if !http2inTests { - http2errChanPool.Put(ch) - } -} - var http2writeDataPool = sync.Pool{ New: func() interface{} { return new(http2writeData) }, } @@ -5078,7 +5118,7 @@ var http2writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte, endStream bool) error { - ch := http2getErrChan() + ch := sc.srv.state.getErrChan() writeArg := http2writeDataPool.Get().(*http2writeData) *writeArg = http2writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(http2FrameWriteRequest{ @@ -5110,7 +5150,7 @@ func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte return http2errStreamClosed } } - http2putErrChan(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { http2writeDataPool.Put(writeArg) } @@ -6364,7 +6404,7 @@ func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeR // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = http2getErrChan() + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(http2FrameWriteRequest{ write: headerData, @@ -6376,7 +6416,7 @@ func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeR if errc != nil { select { case err := <-errc: - http2putErrChan(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return http2errClientDisconnected @@ -7057,7 +7097,7 @@ func (w *http2responseWriter) Push(target string, opts *PushOptions) error { method: opts.Method, url: u, header: http2cloneHeader(opts.Header), - done: http2getErrChan(), + done: sc.srv.state.getErrChan(), } select { @@ -7074,7 +7114,7 @@ func (w *http2responseWriter) Push(target string, opts *PushOptions) error { case <-st.cw: return http2errStreamClosed case err := <-msg.done: - http2putErrChan(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } @@ -7577,6 +7617,7 @@ type http2ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration extendedConnectAllowed bool + strictMaxConcurrentStreams bool // rstStreamPingsBlocked works around an unfortunate gRPC behavior. // gRPC strictly limits the number of PING frames that it will receive. @@ -8007,7 +8048,8 @@ func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2Client initialWindowSize: 65535, // spec default initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: http2initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*http2clientStream), singleUse: singleUse, seenSettingsChan: make(chan struct{}), @@ -8241,7 +8283,7 @@ func (cc *http2ClientConn) idleStateLocked() (st http2clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before @@ -10884,6 +10926,8 @@ type http2OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority http2PriorityParam } // FrameWriteRequest is a request to write a frame. @@ -11095,7 +11139,7 @@ func (p *http2writeQueuePool) get() *http2writeQueue { } // RFC 7540, Section 5.3.5: the default weight is 16. -const http2priorityDefaultWeight = 15 // 16 = 15 + 1 +const http2priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. type http2PriorityWriteSchedulerConfig struct { @@ -11150,8 +11194,8 @@ func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http } } - ws := &http2priorityWriteScheduler{ - nodes: make(map[uint32]*http2priorityNode), + ws := &http2priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*http2priorityNodeRFC7540), maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, @@ -11165,32 +11209,32 @@ func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http return ws } -type http2priorityNodeState int +type http2priorityNodeStateRFC7540 int const ( - http2priorityNodeOpen http2priorityNodeState = iota - http2priorityNodeClosed - http2priorityNodeIdle + http2priorityNodeOpenRFC7540 http2priorityNodeStateRFC7540 = iota + http2priorityNodeClosedRFC7540 + http2priorityNodeIdleRFC7540 ) -// priorityNode is a node in an HTTP/2 priority tree. +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. // Each node is associated with a single stream ID. // See RFC 7540, Section 5.3. -type http2priorityNode struct { - q http2writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state http2priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree +type http2priorityNodeRFC7540 struct { + q http2writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state http2priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree // These links form the priority tree. - parent *http2priorityNode - kids *http2priorityNode // start of the kids list - prev, next *http2priorityNode // doubly-linked list of siblings + parent *http2priorityNodeRFC7540 + kids *http2priorityNodeRFC7540 // start of the kids list + prev, next *http2priorityNodeRFC7540 // doubly-linked list of siblings } -func (n *http2priorityNode) setParent(parent *http2priorityNode) { +func (n *http2priorityNodeRFC7540) setParent(parent *http2priorityNodeRFC7540) { if n == parent { panic("setParent to self") } @@ -11225,7 +11269,7 @@ func (n *http2priorityNode) setParent(parent *http2priorityNode) { } } -func (n *http2priorityNode) addBytes(b int64) { +func (n *http2priorityNodeRFC7540) addBytes(b int64) { n.bytes += b for ; n != nil; n = n.parent { n.subtreeBytes += b @@ -11238,7 +11282,7 @@ func (n *http2priorityNode) addBytes(b int64) { // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // if any ancestor p of n is still open (ignoring the root node). -func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2priorityNode, f func(*http2priorityNode, bool) bool) bool { +func (n *http2priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*http2priorityNodeRFC7540, f func(*http2priorityNodeRFC7540, bool) bool) bool { if !n.q.empty() && f(n, openParent) { return true } @@ -11249,7 +11293,7 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior // Don't consider the root "open" when updating openParent since // we can't send data frames on the root stream (only control frames). if n.id != 0 { - openParent = openParent || (n.state == http2priorityNodeOpen) + openParent = openParent || (n.state == http2priorityNodeOpenRFC7540) } // Common case: only one kid or all kids have the same weight. @@ -11279,7 +11323,7 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior *tmp = append(*tmp, n.kids) n.kids.setParent(nil) } - sort.Sort(http2sortPriorityNodeSiblings(*tmp)) + sort.Sort(http2sortPriorityNodeSiblingsRFC7540(*tmp)) for i := len(*tmp) - 1; i >= 0; i-- { (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids } @@ -11291,13 +11335,13 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior return false } -type http2sortPriorityNodeSiblings []*http2priorityNode +type http2sortPriorityNodeSiblingsRFC7540 []*http2priorityNodeRFC7540 -func (z http2sortPriorityNodeSiblings) Len() int { return len(z) } +func (z http2sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } -func (z http2sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z http2sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z http2sortPriorityNodeSiblings) Less(i, k int) bool { +func (z http2sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) @@ -11311,13 +11355,13 @@ func (z http2sortPriorityNodeSiblings) Less(i, k int) bool { return bi/bk <= wi/wk } -type http2priorityWriteScheduler struct { +type http2priorityWriteSchedulerRFC7540 struct { // root is the root of the priority tree, where root.id = 0. // The root queues control frames that are not associated with any stream. - root http2priorityNode + root http2priorityNodeRFC7540 // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*http2priorityNode + nodes map[uint32]*http2priorityNodeRFC7540 // maxID is the maximum stream id in nodes. maxID uint32 @@ -11325,7 +11369,7 @@ type http2priorityWriteScheduler struct { // lists of nodes that have been closed or are idle, but are kept in // the tree for improved prioritization. When the lengths exceed either // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*http2priorityNode + closedNodes, idleNodes []*http2priorityNodeRFC7540 // From the config. maxClosedNodesInTree int @@ -11334,19 +11378,19 @@ type http2priorityWriteScheduler struct { enableWriteThrottle bool // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*http2priorityNode + tmp []*http2priorityNodeRFC7540 // pool of empty queues for reuse. queuePool http2writeQueuePool } -func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) { +func (ws *http2priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options http2OpenStreamOptions) { // The stream may be currently idle but cannot be opened or closed. if curr := ws.nodes[streamID]; curr != nil { - if curr.state != http2priorityNodeIdle { + if curr.state != http2priorityNodeIdleRFC7540 { panic(fmt.Sprintf("stream %d already opened", streamID)) } - curr.state = http2priorityNodeOpen + curr.state = http2priorityNodeOpenRFC7540 return } @@ -11358,11 +11402,11 @@ func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2 if parent == nil { parent = &ws.root } - n := &http2priorityNode{ + n := &http2priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: http2priorityDefaultWeight, - state: http2priorityNodeOpen, + weight: http2priorityDefaultWeightRFC7540, + state: http2priorityNodeOpenRFC7540, } n.setParent(parent) ws.nodes[streamID] = n @@ -11371,19 +11415,19 @@ func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2 } } -func (ws *http2priorityWriteScheduler) CloseStream(streamID uint32) { +func (ws *http2priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { if streamID == 0 { panic("violation of WriteScheduler interface: cannot close stream 0") } if ws.nodes[streamID] == nil { panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) } - if ws.nodes[streamID].state != http2priorityNodeOpen { + if ws.nodes[streamID].state != http2priorityNodeOpenRFC7540 { panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) } n := ws.nodes[streamID] - n.state = http2priorityNodeClosed + n.state = http2priorityNodeClosedRFC7540 n.addBytes(-n.bytes) q := n.q @@ -11396,7 +11440,7 @@ func (ws *http2priorityWriteScheduler) CloseStream(streamID uint32) { } } -func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) { +func (ws *http2priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority http2PriorityParam) { if streamID == 0 { panic("adjustPriority on root") } @@ -11410,11 +11454,11 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht return } ws.maxID = streamID - n = &http2priorityNode{ + n = &http2priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: http2priorityDefaultWeight, - state: http2priorityNodeIdle, + weight: http2priorityDefaultWeightRFC7540, + state: http2priorityNodeIdleRFC7540, } n.setParent(&ws.root) ws.nodes[streamID] = n @@ -11426,7 +11470,7 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht parent := ws.nodes[priority.StreamDep] if parent == nil { n.setParent(&ws.root) - n.weight = http2priorityDefaultWeight + n.weight = http2priorityDefaultWeightRFC7540 return } @@ -11467,8 +11511,8 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht n.weight = priority.Weight } -func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) { - var n *http2priorityNode +func (ws *http2priorityWriteSchedulerRFC7540) Push(wr http2FrameWriteRequest) { + var n *http2priorityNodeRFC7540 if wr.isControl() { n = &ws.root } else { @@ -11487,8 +11531,8 @@ func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) { n.q.push(wr) } -func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *http2priorityNode, openParent bool) bool { +func (ws *http2priorityWriteSchedulerRFC7540) Pop() (wr http2FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *http2priorityNodeRFC7540, openParent bool) bool { limit := int32(math.MaxInt32) if openParent { limit = ws.writeThrottleLimit @@ -11514,7 +11558,7 @@ func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool return wr, ok } -func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorityNode, maxSize int, n *http2priorityNode) { +func (ws *http2priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*http2priorityNodeRFC7540, maxSize int, n *http2priorityNodeRFC7540) { if maxSize == 0 { return } @@ -11528,7 +11572,7 @@ func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorit *list = append(*list, n) } -func (ws *http2priorityWriteScheduler) removeNode(n *http2priorityNode) { +func (ws *http2priorityWriteSchedulerRFC7540) removeNode(n *http2priorityNodeRFC7540) { for n.kids != nil { n.kids.setParent(n.parent) } @@ -11536,6 +11580,199 @@ func (ws *http2priorityWriteScheduler) removeNode(n *http2priorityNode) { delete(ws.nodes, n.id) } +type http2streamMetadata struct { + location *http2writeQueue + priority http2PriorityParam +} + +type http2priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control http2writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*http2writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]http2streamMetadata + + // queuePool are empty queues for reuse. + queuePool http2writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool +} + +func http2newPriorityWriteSchedulerRFC9128() http2WriteScheduler { + ws := &http2priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]http2streamMetadata), + } + return ws +} + +func (ws *http2priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt http2OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = http2streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *http2priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *http2priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority http2PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *http2priorityWriteSchedulerRFC9218) Push(wr http2FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *http2priorityWriteSchedulerRFC9218) Pop() (http2FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return http2FrameWriteRequest{}, false +} + // NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 // priorities. Control frames like SETTINGS and PING are written before DATA // frames, but if no control frames are queued and multiple streams have queued @@ -11622,7 +11859,7 @@ type http2roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/src/net/http/http2_test.go b/src/net/http/http2_test.go deleted file mode 100644 index 7841b05b593400..00000000000000 --- a/src/net/http/http2_test.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !nethttpomithttp2 - -package http - -func init() { - // Disable HTTP/2 internal channel pooling which interferes with synctest. - http2inTests = true -} diff --git a/src/net/http/internal/httpcommon/httpcommon.go b/src/net/http/internal/httpcommon/httpcommon.go index 5e0c24035aba20..e3f8ec79e094d3 100644 --- a/src/net/http/internal/httpcommon/httpcommon.go +++ b/src/net/http/internal/httpcommon/httpcommon.go @@ -202,7 +202,7 @@ type EncodeHeadersParam struct { DefaultUserAgent string } -// EncodeHeadersParam is the result of EncodeHeaders. +// EncodeHeadersResult is the result of EncodeHeaders. type EncodeHeadersResult struct { HasBody bool HasTrailers bool @@ -550,7 +550,7 @@ type ServerRequestResult struct { // If the request should be rejected, this is a short string suitable for passing // to the http2 package's CountError function. - // It might be a bit odd to return errors this way rather than returing an error, + // It might be a bit odd to return errors this way rather than returning an error, // but this ensures we don't forget to include a CountError reason. InvalidReason string } diff --git a/src/net/http/socks_bundle.go b/src/net/http/socks_bundle.go index 776b03d941a405..862e5fa2a5165d 100644 --- a/src/net/http/socks_bundle.go +++ b/src/net/http/socks_bundle.go @@ -453,7 +453,7 @@ func (up *socksUsernamePassword) Authenticate(ctx context.Context, rw io.ReadWri b = append(b, up.Username...) b = append(b, byte(len(up.Password))) b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if + // TODO(mikio): handle IO deadlines and cancellation if // necessary if _, err := rw.Write(b); err != nil { return err diff --git a/src/vendor/golang.org/x/net/nettest/conntest.go b/src/vendor/golang.org/x/net/nettest/conntest.go index 4297d408c0477c..8b98dfe21c5d30 100644 --- a/src/vendor/golang.org/x/net/nettest/conntest.go +++ b/src/vendor/golang.org/x/net/nettest/conntest.go @@ -142,7 +142,7 @@ func testPingPong(t *testing.T, c1, c2 net.Conn) { } // testRacyRead tests that it is safe to mutate the input Read buffer -// immediately after cancelation has occurred. +// immediately after cancellation has occurred. func testRacyRead(t *testing.T, c1, c2 net.Conn) { go chunkedCopy(c2, rand.New(rand.NewSource(0))) @@ -170,7 +170,7 @@ func testRacyRead(t *testing.T, c1, c2 net.Conn) { } // testRacyWrite tests that it is safe to mutate the input Write buffer -// immediately after cancelation has occurred. +// immediately after cancellation has occurred. func testRacyWrite(t *testing.T, c1, c2 net.Conn) { go chunkedCopy(io.Discard, c2) @@ -318,7 +318,7 @@ func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { defer wg.Wait() wg.Add(3) - // Test for cancelation upon connection closure. + // Test for cancellation upon connection closure. c1.SetDeadline(neverTimeout) go func() { defer wg.Done() diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index cf5c27c74579d0..a2a0c0b3e85f23 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -6,7 +6,7 @@ golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -# golang.org/x/net v0.44.0 +# golang.org/x/net v0.44.1-0.20251002015445-edb764c2296f ## explicit; go 1.24.0 golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts From 53845004d647e16b3de7c74e50cffaca77e028e9 Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Thu, 2 Oct 2025 09:42:57 -0700 Subject: [PATCH 040/152] net/http/httputil: deprecate ReverseProxy.Director The Director function has been superseded by Rewrite. Rewrite avoids fundamental security issues with hop-by-hop header handling in the Director API and has better default handling of X-Forwarded-* headers. Fixes #73161 Change-Id: Iadaf3070e0082458f79fb892ade51cb7ce832802 Reviewed-on: https://go-review.googlesource.com/c/go/+/708615 Reviewed-by: Nicholas Husin LUCI-TryBot-Result: Go LUCI Reviewed-by: Nicholas Husin --- api/next/73161.txt | 1 + .../99-minor/net/http/httputil/73161.md | 11 ++ src/net/http/httputil/reverseproxy.go | 116 +++++++++++++----- 3 files changed, 98 insertions(+), 30 deletions(-) create mode 100644 api/next/73161.txt create mode 100644 doc/next/6-stdlib/99-minor/net/http/httputil/73161.md diff --git a/api/next/73161.txt b/api/next/73161.txt new file mode 100644 index 00000000000000..86526b597a9463 --- /dev/null +++ b/api/next/73161.txt @@ -0,0 +1 @@ +pkg net/http/httputil, type ReverseProxy struct, Director //deprecated #73161 diff --git a/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md b/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md new file mode 100644 index 00000000000000..f6318f85534746 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md @@ -0,0 +1,11 @@ +The [ReverseProxy.Director] configuration field is deprecated +in favor of [ReverseProxy.Rewrite]. + +A malicious client can remove headers added by a `Director` function +by designating those headers as hop-by-hop. Since there is no way to address +this problem within the scope of the `Director` API, we added a new +`Rewrite` hook in Go 1.20. `Rewrite` hooks are provided with both the +unmodified inbound request received by the proxy and the outbound request +which will be sent by the proxy. + +Since the `Director` hook is fundamentally unsafe, we are now deprecating it. diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go index 6ed4930727404b..9d8784cd2bc7a9 100644 --- a/src/net/http/httputil/reverseproxy.go +++ b/src/net/http/httputil/reverseproxy.go @@ -133,36 +133,6 @@ type ReverseProxy struct { // At most one of Rewrite or Director may be set. Rewrite func(*ProxyRequest) - // Director is a function which modifies - // the request into a new request to be sent - // using Transport. Its response is then copied - // back to the original client unmodified. - // Director must not access the provided Request - // after returning. - // - // By default, the X-Forwarded-For header is set to the - // value of the client IP address. If an X-Forwarded-For - // header already exists, the client IP is appended to the - // existing values. As a special case, if the header - // exists in the Request.Header map but has a nil value - // (such as when set by the Director func), the X-Forwarded-For - // header is not modified. - // - // To prevent IP spoofing, be sure to delete any pre-existing - // X-Forwarded-For header coming from the client or - // an untrusted proxy. - // - // Hop-by-hop headers are removed from the request after - // Director returns, which can remove headers added by - // Director. Use a Rewrite function instead to ensure - // modifications to the request are preserved. - // - // Unparsable query parameters are removed from the outbound - // request if Request.Form is set after Director returns. - // - // At most one of Rewrite or Director may be set. - Director func(*http.Request) - // The transport used to perform proxy requests. // If nil, http.DefaultTransport is used. Transport http.RoundTripper @@ -210,6 +180,88 @@ type ReverseProxy struct { // If nil, the default is to log the provided error and return // a 502 Status Bad Gateway response. ErrorHandler func(http.ResponseWriter, *http.Request, error) + + // Director is deprecated. Use Rewrite instead. + // + // This function is insecure: + // + // - Hop-by-hop headers are removed from the request after Director + // returns, which can remove headers added by Director. + // A client can designate headers as hop-by-hop by listing them + // in the Connection header, so this permits a malicious client + // to remove any headers that may be added by Director. + // + // - X-Forwarded-For, X-Forwarded-Host, and X-Forwarded-Proto + // headers in inbound requests are preserved by default, + // which can permit IP spoofing if the Director function is + // not careful to remove these headers. + // + // Rewrite addresses these issues. + // + // As an example of converting a Director function to Rewrite: + // + // // ReverseProxy with a Director function. + // proxy := &httputil.ReverseProxy{ + // Director: func(req *http.Request) { + // req.URL.Scheme = "https" + // req.URL.Host = proxyHost + // + // // A malicious client can remove this header. + // req.Header.Set("Some-Header", "some-header-value") + // + // // X-Forwarded-* headers sent by the client are preserved, + // // since Director did not remove them. + // }, + // } + // + // // ReverseProxy with a Rewrite function. + // proxy := &httputil.ReverseProxy{ + // Rewrite: func(preq *httputil.ProxyRequest) { + // // See also ProxyRequest.SetURL. + // preq.Out.URL.Scheme = "https" + // preq.Out.URL.Host = proxyHost + // + // // This header cannot be affected by a malicious client. + // preq.Out.Header.Set("Some-Header", "some-header-value") + // + // // X-Forwarded- headers sent by the client have been + // // removed from preq.Out. + // // ProxyRequest.SetXForwarded optionally adds new ones. + // preq.SetXForwarded() + // }, + // } + // + // Director is a function which modifies + // the request into a new request to be sent + // using Transport. Its response is then copied + // back to the original client unmodified. + // Director must not access the provided Request + // after returning. + // + // By default, the X-Forwarded-For header is set to the + // value of the client IP address. If an X-Forwarded-For + // header already exists, the client IP is appended to the + // existing values. As a special case, if the header + // exists in the Request.Header map but has a nil value + // (such as when set by the Director func), the X-Forwarded-For + // header is not modified. + // + // To prevent IP spoofing, be sure to delete any pre-existing + // X-Forwarded-For header coming from the client or + // an untrusted proxy. + // + // Hop-by-hop headers are removed from the request after + // Director returns, which can remove headers added by + // Director. Use a Rewrite function instead to ensure + // modifications to the request are preserved. + // + // Unparsable query parameters are removed from the outbound + // request if Request.Form is set after Director returns. + // + // At most one of Rewrite or Director may be set. + // + // Deprecated: Use Rewrite instead. + Director func(*http.Request) } // A BufferPool is an interface for getting and returning temporary @@ -259,6 +311,10 @@ func joinURLPath(a, b *url.URL) (path, rawpath string) { // // NewSingleHostReverseProxy does not rewrite the Host header. // +// For backwards compatibility reasons, NewSingleHostReverseProxy +// returns a ReverseProxy using the deprecated Director function. +// This proxy preserves X-Forwarded-* headers sent by the client. +// // To customize the ReverseProxy behavior beyond what // NewSingleHostReverseProxy provides, use ReverseProxy directly // with a Rewrite function. The ProxyRequest SetURL method From d5b950399de01a0e28eeb48d2c8474db4aad0e8a Mon Sep 17 00:00:00 2001 From: Tim Cooijmans Date: Tue, 30 Sep 2025 21:53:11 +0000 Subject: [PATCH 041/152] cmd/cgo: fix unaligned arguments typedmemmove crash on iOS Irregularly typedmemmove and bulkBarrierPreWrite crashes on unaligned arguments. By aligning the arguments this is fixed. Fixes #46893 Change-Id: I7beb9fdc31053fcb71bee6c6cb906dea31718c56 GitHub-Last-Rev: 46ae8b96889644aab60ea4284cf447a740354c6a GitHub-Pull-Request: golang/go#74868 Reviewed-on: https://go-review.googlesource.com/c/go/+/692935 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall --- src/cmd/cgo/internal/testout/out_test.go | 144 ++++++++++++++++++ .../cgo/internal/testout/testdata/aligned.go | 63 ++++++++ src/cmd/cgo/out.go | 13 +- 3 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 src/cmd/cgo/internal/testout/out_test.go create mode 100644 src/cmd/cgo/internal/testout/testdata/aligned.go diff --git a/src/cmd/cgo/internal/testout/out_test.go b/src/cmd/cgo/internal/testout/out_test.go new file mode 100644 index 00000000000000..81dfa365871372 --- /dev/null +++ b/src/cmd/cgo/internal/testout/out_test.go @@ -0,0 +1,144 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package out_test + +import ( + "bufio" + "bytes" + "fmt" + "internal/testenv" + "internal/goarch" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +type methodAlign struct { + Method string + Align int +} + +var wantAligns = map[string]int{ + "ReturnEmpty": 1, + "ReturnOnlyUint8": 1, + "ReturnOnlyUint16": 2, + "ReturnOnlyUint32": 4, + "ReturnOnlyUint64": goarch.PtrSize, + "ReturnOnlyInt": goarch.PtrSize, + "ReturnOnlyPtr": goarch.PtrSize, + "ReturnByteSlice": goarch.PtrSize, + "ReturnString": goarch.PtrSize, + "InputAndReturnUint8": 1, + "MixedTypes": goarch.PtrSize, +} + +// TestAligned tests that the generated _cgo_export.c file has the wanted +// align attributes for struct types used as arguments or results of +// //exported functions. +func TestAligned(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveCGO(t) + + testdata, err := filepath.Abs("testdata") + if err != nil { + t.Fatal(err) + } + + objDir := t.TempDir() + + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "cgo", + "-objdir", objDir, + filepath.Join(testdata, "aligned.go")) + cmd.Stderr = new(bytes.Buffer) + + err = cmd.Run() + if err != nil { + t.Fatalf("%#q: %v\n%s", cmd, err, cmd.Stderr) + } + + haveAligns, err := parseAlign(filepath.Join(objDir, "_cgo_export.c")) + if err != nil { + t.Fatal(err) + } + + // Check that we have all the wanted methods + if len(haveAligns) != len(wantAligns) { + t.Fatalf("have %d methods with aligned, want %d", len(haveAligns), len(wantAligns)) + } + + for i := range haveAligns { + method := haveAligns[i].Method + haveAlign := haveAligns[i].Align + + wantAlign, ok := wantAligns[method] + if !ok { + t.Errorf("method %s: have aligned %d, want missing entry", method, haveAlign) + } else if haveAlign != wantAlign { + t.Errorf("method %s: have aligned %d, want %d", method, haveAlign, wantAlign) + } + } +} + +func parseAlign(filename string) ([]methodAlign, error) { + file, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + var results []methodAlign + scanner := bufio.NewScanner(file) + + // Regex to match function declarations like "struct MethodName_return MethodName(" + funcRegex := regexp.MustCompile(`^struct\s+(\w+)_return\s+(\w+)\(`) + // Regex to match simple function declarations like "GoSlice MethodName(" + simpleFuncRegex := regexp.MustCompile(`^Go\w+\s+(\w+)\(`) + // Regex to match void-returning exported functions like "void ReturnEmpty(" + voidFuncRegex := regexp.MustCompile(`^void\s+(\w+)\(`) + // Regex to match align attributes like "__attribute__((aligned(8)))" + alignRegex := regexp.MustCompile(`__attribute__\(\(aligned\((\d+)\)\)\)`) + + var currentMethod string + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // Check if this line declares a function with struct return type + if matches := funcRegex.FindStringSubmatch(line); matches != nil { + currentMethod = matches[2] // Extract the method name + } else if matches := simpleFuncRegex.FindStringSubmatch(line); matches != nil { + // Check if this line declares a function with simple return type (like GoSlice) + currentMethod = matches[1] // Extract the method name + } else if matches := voidFuncRegex.FindStringSubmatch(line); matches != nil { + // Check if this line declares a void-returning function + currentMethod = matches[1] // Extract the method name + } + + // Check if this line contains align information + if alignMatches := alignRegex.FindStringSubmatch(line); alignMatches != nil && currentMethod != "" { + alignStr := alignMatches[1] + align, err := strconv.Atoi(alignStr) + if err != nil { + // Skip this entry if we can't parse the align as integer + currentMethod = "" + continue + } + results = append(results, methodAlign{ + Method: currentMethod, + Align: align, + }) + currentMethod = "" // Reset for next method + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading file: %w", err) + } + + return results, nil +} diff --git a/src/cmd/cgo/internal/testout/testdata/aligned.go b/src/cmd/cgo/internal/testout/testdata/aligned.go new file mode 100644 index 00000000000000..cea6f2889a0cad --- /dev/null +++ b/src/cmd/cgo/internal/testout/testdata/aligned.go @@ -0,0 +1,63 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export ReturnEmpty +func ReturnEmpty() { + return +} + +//export ReturnOnlyUint8 +func ReturnOnlyUint8() (uint8, uint8, uint8) { + return 1, 2, 3 +} + +//export ReturnOnlyUint16 +func ReturnOnlyUint16() (uint16, uint16, uint16) { + return 1, 2, 3 +} + +//export ReturnOnlyUint32 +func ReturnOnlyUint32() (uint32, uint32, uint32) { + return 1, 2, 3 +} + +//export ReturnOnlyUint64 +func ReturnOnlyUint64() (uint64, uint64, uint64) { + return 1, 2, 3 +} + +//export ReturnOnlyInt +func ReturnOnlyInt() (int, int, int) { + return 1, 2, 3 +} + +//export ReturnOnlyPtr +func ReturnOnlyPtr() (*int, *int, *int) { + a, b, c := 1, 2, 3 + return &a, &b, &c +} + +//export ReturnString +func ReturnString() string { + return "hello" +} + +//export ReturnByteSlice +func ReturnByteSlice() []byte { + return []byte{1, 2, 3} +} + +//export InputAndReturnUint8 +func InputAndReturnUint8(a, b, c uint8) (uint8, uint8, uint8) { + return a, b, c +} + +//export MixedTypes +func MixedTypes(a uint8, b uint16, c uint32, d uint64, e int, f *int) (uint8, uint16, uint32, uint64, int, *int) { + return a, b, c, d, e, f +} diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index 622d35ac7b3bab..a2bcdf89c5ad44 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -949,6 +949,8 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { fmt.Fprintf(gotype, "struct {\n") off := int64(0) npad := 0 + // the align is at least 1 (for char) + maxAlign := int64(1) argField := func(typ ast.Expr, namePat string, args ...interface{}) { name := fmt.Sprintf(namePat, args...) t := p.cgoType(typ) @@ -963,6 +965,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { noSourceConf.Fprint(gotype, fset, typ) fmt.Fprintf(gotype, "\n") off += t.Size + // keep track of the maximum alignment among all fields + // so that we can align the struct correctly + if t.Align > maxAlign { + maxAlign = t.Align + } } if fn.Recv != nil { argField(fn.Recv.List[0].Type, "recv") @@ -1047,7 +1054,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { // string.h for memset, and is also robust to C++ // types with constructors. Both GCC and LLVM optimize // this into just zeroing _cgo_a. - fmt.Fprintf(fgcc, "\ttypedef %s %v _cgo_argtype;\n", ctype.String(), p.packedAttribute()) + // + // The struct should be aligned to the maximum alignment + // of any of its fields. This to avoid alignment + // issues. + fmt.Fprintf(fgcc, "\ttypedef %s %v __attribute__((aligned(%d))) _cgo_argtype;\n", ctype.String(), p.packedAttribute(), maxAlign) fmt.Fprintf(fgcc, "\tstatic _cgo_argtype _cgo_zero;\n") fmt.Fprintf(fgcc, "\t_cgo_argtype _cgo_a = _cgo_zero;\n") if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) { From adce7f196e6ac6d22e9bc851efea5f3ab650947c Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Thu, 2 Oct 2025 18:24:12 -0400 Subject: [PATCH 042/152] cmd/link: support .def file with MSVC clang toolchain lld-link supports .def file, but requires a "-def:" (or "/def:") flag. (MinGW linker, on the other hand, requires no flag.) Pass the flag when using MSVC-based toolchain. CL originally authored by Chressie Himpel. Change-Id: I8c327ab48d36b0bcbb1d127cff544ffdb06be38e Reviewed-on: https://go-review.googlesource.com/c/go/+/708716 LUCI-TryBot-Result: Go LUCI Reviewed-by: Chressie Himpel --- src/cmd/link/internal/ld/lib.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 79d3d37835e9ad..2c861129b52f9a 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1787,9 +1787,13 @@ func (ctxt *Link) hostlink() { case ctxt.IsAIX(): fileName := xcoffCreateExportFile(ctxt) argv = append(argv, "-Wl,-bE:"+fileName) - case ctxt.IsWindows() && !slices.Contains(flagExtldflags, "-Wl,--export-all-symbols"): + case ctxt.IsWindows() && !slices.Contains(flagExtldflags, wlPrefix+"export-all-symbols"): fileName := peCreateExportFile(ctxt, filepath.Base(outopt)) - argv = append(argv, fileName) + prefix := "" + if isMSVC { + prefix = "-Wl,-def:" + } + argv = append(argv, prefix+fileName) } const unusedArguments = "-Qunused-arguments" From 630799c6c957994a7720b5d0b6bdd10f3acfc605 Mon Sep 17 00:00:00 2001 From: Daniel McCarney Date: Mon, 30 Jun 2025 12:45:36 -0400 Subject: [PATCH 043/152] crypto/tls: add flag to render HTML BoGo report Updates the BoGo test runner to add a `-bogo-html-report` flag. When provided, an HTML report is written to the flag argument path. The report shows the fail/pass/skip status of run tests and allows sorting/searching the output. Change-Id: I8c704a51fbb03500f4134ebfaba06248baa3ca2f Reviewed-on: https://go-review.googlesource.com/c/go/+/684955 Auto-Submit: Daniel McCarney Reviewed-by: Roland Shoemaker Reviewed-by: Carlos Amedee TryBot-Bypass: Daniel McCarney Commit-Queue: Carlos Amedee --- src/crypto/tls/bogo_shim_test.go | 152 ++++++++++++++++++++++++++++++- src/crypto/tls/handshake_test.go | 1 + 2 files changed, 152 insertions(+), 1 deletion(-) diff --git a/src/crypto/tls/bogo_shim_test.go b/src/crypto/tls/bogo_shim_test.go index 7cab568db80953..f3f19b34e9e4cc 100644 --- a/src/crypto/tls/bogo_shim_test.go +++ b/src/crypto/tls/bogo_shim_test.go @@ -13,6 +13,7 @@ import ( "encoding/pem" "flag" "fmt" + "html/template" "internal/byteorder" "internal/testenv" "io" @@ -25,10 +26,13 @@ import ( "strconv" "strings" "testing" + "time" "golang.org/x/crypto/cryptobyte" ) +const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832" + var ( port = flag.String("port", "", "") server = flag.Bool("server", false, "") @@ -557,7 +561,6 @@ func TestBogoSuite(t *testing.T) { if *bogoLocalDir != "" { bogoDir = *bogoLocalDir } else { - const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832" bogoDir = cryptotest.FetchModule(t, "boringssl.googlesource.com/boringssl.git", boringsslModVer) } @@ -606,6 +609,12 @@ func TestBogoSuite(t *testing.T) { t.Fatalf("failed to parse results JSON: %s", err) } + if *bogoReport != "" { + if err := generateReport(results, *bogoReport); err != nil { + t.Fatalf("failed to generate report: %v", err) + } + } + // assertResults contains test results we want to make sure // are present in the output. They are only checked if -bogo-filter // was not passed. @@ -655,6 +664,23 @@ func TestBogoSuite(t *testing.T) { } } +func generateReport(results bogoResults, outPath string) error { + data := reportData{ + Results: results, + Timestamp: time.Unix(int64(results.SecondsSinceEpoch), 0).Format("2006-01-02 15:04:05"), + Revision: boringsslModVer, + } + + tmpl := template.Must(template.New("report").Parse(reportTemplate)) + file, err := os.Create(outPath) + if err != nil { + return err + } + defer file.Close() + + return tmpl.Execute(file, data) +} + // bogoResults is a copy of boringssl.googlesource.com/boringssl/testresults.Results type bogoResults struct { Version int `json:"version"` @@ -669,3 +695,127 @@ type bogoResults struct { Error string `json:"error,omitempty"` } `json:"tests"` } + +type reportData struct { + Results bogoResults + SkipReasons map[string]string + Timestamp string + Revision string +} + +const reportTemplate = ` + + + + BoGo Results Report + + + +

BoGo Results Report

+ +
+ Generated: {{.Timestamp}} | BoGo Revision: {{.Revision}}
+ {{range $status, $count := .Results.NumFailuresByType}} + {{$status}}: {{$count}} | + {{end}} +
+ +
+ + +
+ + + + + + + + + + + + + {{range $name, $test := .Results.Tests}} + + + + + + + + {{end}} + +
Test NameStatusActualExpectedError
{{$name}}{{$test.Actual}}{{$test.Actual}}{{$test.Expected}}{{$test.Error}}
+ + + + +` diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go index ea8ac6fc837ae7..13f1bb1dd50a19 100644 --- a/src/crypto/tls/handshake_test.go +++ b/src/crypto/tls/handshake_test.go @@ -47,6 +47,7 @@ var ( bogoMode = flag.Bool("bogo-mode", false, "Enabled bogo shim mode, ignore everything else") bogoFilter = flag.String("bogo-filter", "", "BoGo test filter") bogoLocalDir = flag.String("bogo-local-dir", "", "Local BoGo to use, instead of fetching from source") + bogoReport = flag.String("bogo-html-report", "", "File path to render an HTML report with BoGo results") ) func runTestAndUpdateIfNeeded(t *testing.T, name string, run func(t *testing.T, update bool), wait bool) { From a7917eed70c63840b2c59748c52ef9ff11fecf38 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Mon, 17 Mar 2025 11:45:52 -0400 Subject: [PATCH 044/152] internal/buildcfg: enable specializedmalloc experiment Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64_c2s16-perf_vs_parent,gotip-linux-amd64_c3h88-perf_vs_parent,gotip-linux-arm64_c4ah72-perf_vs_parent,gotip-linux-arm64_c4as16-perf_vs_parent Change-Id: I6a6a6964c4c596bbf4f072b5a44a34c3ce4f6541 Reviewed-on: https://go-review.googlesource.com/c/go/+/696536 Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek --- src/internal/buildcfg/exp.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index 707776b374b0d4..d06913d9a7f25a 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -79,10 +79,11 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { dwarf5Supported := (goos != "darwin" && goos != "ios" && goos != "aix") baseline := goexperiment.Flags{ - RegabiWrappers: regabiSupported, - RegabiArgs: regabiSupported, - Dwarf5: dwarf5Supported, - RandomizedHeapBase64: true, + RegabiWrappers: regabiSupported, + RegabiArgs: regabiSupported, + Dwarf5: dwarf5Supported, + RandomizedHeapBase64: true, + SizeSpecializedMalloc: true, } // Start with the statically enabled set of experiments. From c54dc1418b6fbff4176aaaffcc9fab6f1ad631a6 Mon Sep 17 00:00:00 2001 From: matloob Date: Wed, 1 Oct 2025 12:06:14 -0400 Subject: [PATCH 045/152] runtime: support valgrind (but not asan) in specialized malloc functions We're adding this so that the compiler doesn't need to know about valgrind since it's just implemented using a build tag. Change-Id: I6a6a696452b0379caceca2ae4e49195016f7a90d Reviewed-on: https://go-review.googlesource.com/c/go/+/708296 Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI Auto-Submit: Michael Matloob Reviewed-by: Michael Knyszek --- src/runtime/malloc_generated.go | 324 ++++++++++++++++++++++++++++++++ src/runtime/malloc_stubs.go | 9 + 2 files changed, 333 insertions(+) diff --git a/src/runtime/malloc_generated.go b/src/runtime/malloc_generated.go index 600048c67557b1..2215dbaddb2e1c 100644 --- a/src/runtime/malloc_generated.go +++ b/src/runtime/malloc_generated.go @@ -150,6 +150,10 @@ func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -304,6 +308,10 @@ func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -458,6 +466,10 @@ func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -612,6 +624,10 @@ func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -766,6 +782,10 @@ func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -920,6 +940,10 @@ func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1074,6 +1098,10 @@ func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1228,6 +1256,10 @@ func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1382,6 +1414,10 @@ func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1536,6 +1572,10 @@ func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1690,6 +1730,10 @@ func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1844,6 +1888,10 @@ func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1998,6 +2046,10 @@ func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2152,6 +2204,10 @@ func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2306,6 +2362,10 @@ func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2460,6 +2520,10 @@ func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2614,6 +2678,10 @@ func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2768,6 +2836,10 @@ func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2922,6 +2994,10 @@ func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3076,6 +3152,10 @@ func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3230,6 +3310,10 @@ func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3384,6 +3468,10 @@ func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3538,6 +3626,10 @@ func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3692,6 +3784,10 @@ func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3846,6 +3942,10 @@ func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4000,6 +4100,10 @@ func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4064,6 +4168,10 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4142,6 +4250,10 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4206,6 +4318,10 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4284,6 +4400,10 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4348,6 +4468,10 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4426,6 +4550,10 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4490,6 +4618,10 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4568,6 +4700,10 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4632,6 +4768,10 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4710,6 +4850,10 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4774,6 +4918,10 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4852,6 +5000,10 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4916,6 +5068,10 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4994,6 +5150,10 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5058,6 +5218,10 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5136,6 +5300,10 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5200,6 +5368,10 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5278,6 +5450,10 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5342,6 +5518,10 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5420,6 +5600,10 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5484,6 +5668,10 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5562,6 +5750,10 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5626,6 +5818,10 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5704,6 +5900,10 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5768,6 +5968,10 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5846,6 +6050,10 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5910,6 +6118,10 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5988,6 +6200,10 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6052,6 +6268,10 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6130,6 +6350,10 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6223,6 +6447,10 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6316,6 +6544,10 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6409,6 +6641,10 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6502,6 +6738,10 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6595,6 +6835,10 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6688,6 +6932,10 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6781,6 +7029,10 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6874,6 +7126,10 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6967,6 +7223,10 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7060,6 +7320,10 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7153,6 +7417,10 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7246,6 +7514,10 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7339,6 +7611,10 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7432,6 +7708,10 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7525,6 +7805,10 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7618,6 +7902,10 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7711,6 +7999,10 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7804,6 +8096,10 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7897,6 +8193,10 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7990,6 +8290,10 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8083,6 +8387,10 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8176,6 +8484,10 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8269,6 +8581,10 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8362,6 +8678,10 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8455,6 +8775,10 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) diff --git a/src/runtime/malloc_stubs.go b/src/runtime/malloc_stubs.go index 7fd144418938fb..224746f3d41124 100644 --- a/src/runtime/malloc_stubs.go +++ b/src/runtime/malloc_stubs.go @@ -50,6 +50,8 @@ func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer { panic("not defined for sizeclass") } +// WARNING: mallocStub does not do any work for sanitizers so callers need +// to steer out of this codepath early if sanitizers are enabled. func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if doubleCheckMalloc { if gcphase == _GCmarktermination { @@ -77,6 +79,13 @@ func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Actually do the allocation. x, elemsize := inlinedMalloc(size, typ, needzero) + // Notify valgrind, if enabled. + // To allow the compiler to not know about valgrind, we do valgrind instrumentation + // unlike the other sanitizers. + if valgrindenabled { + valgrindMalloc(x, size) + } + // Adjust our GC assist debt to account for internal fragmentation. if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { From ebb72bef44a0e125c7f900a04af6538e3c39dfc6 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Fri, 20 Jun 2025 12:02:18 -0400 Subject: [PATCH 046/152] cmd/compile: don't treat devel compiler as a released compiler The compiler has a logic to print different messages on internal compiler error depending on whether this is a released version of Go. It hides the panic stack trace if it is a released version. It does this by checking the version and see if it has a "go" prefix. This includes all the released versions. However, for a non- released build, if there is no explicit version set, cmd/dist now sets the toolchain version as go1.X-devel_XXX, which makes it be treated as a released compiler, and causes the stack trace to be hidden. Change the logic to not match a devel compiler as a released compiler. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: I5d3b2101527212f825b6e4000b36030c4f83870b Reviewed-on: https://go-review.googlesource.com/c/go/+/682975 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Reviewed-on: https://go-review.googlesource.com/c/go/+/708855 Reviewed-by: Junyang Shao --- src/cmd/compile/internal/base/print.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index 119f06fbc03351..9e3348c1ecca89 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -220,7 +220,7 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) { fmt.Printf("\n") // If this is a released compiler version, ask for a bug report. - if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") { + if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") && !strings.Contains(buildcfg.Version, "devel") { fmt.Printf("\n") fmt.Printf("Please file a bug report including a short program that triggers the error.\n") fmt.Printf("https://go.dev/issue/new\n") From ab043953cbd6e3cd262548710f35f05924aa8f32 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 2 Jul 2025 18:00:12 -0400 Subject: [PATCH 047/152] cmd/compile: minor tweak for race detector This makes the front-end a little bit less temp-happy when instrumenting, which repairs the "is it a constant?" test in the simd intrinsic conversion which is otherwise broken by race detection. Also, this will perhaps be better code. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: I84b7a45b7bff62bb2c9f9662466b50858d288645 Reviewed-on: https://go-review.googlesource.com/c/go/+/685637 LUCI-TryBot-Result: Go LUCI Reviewed-by: Junyang Shao Reviewed-by: Cherry Mui Reviewed-on: https://go-review.googlesource.com/c/go/+/708856 --- src/cmd/compile/internal/walk/walk.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index a7d8182a747ce7..25add3d8043905 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -275,6 +275,15 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { // function calls, which could clobber function call arguments/results // currently on the stack. func mayCall(n ir.Node) bool { + // This is intended to avoid putting constants + // into temporaries with the race detector (or other + // instrumentation) which interferes with simple + // "this is a constant" tests in ssagen. + // Also, it will generally lead to better code. + if n.Op() == ir.OLITERAL { + return false + } + // When instrumenting, any expression might require function calls. if base.Flag.Cfg.Instrumenting { return true From 10e796884905d23ab2419cc158769e8fdc73de4e Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 12 Aug 2025 16:53:44 +0000 Subject: [PATCH 048/152] cmd/compile: accounts rematerialize ops's output reginfo This CL implements the check for rematerializeable value's output regspec at its remateralization site. It has some potential problems, please see the TODO in regalloc.go. Fixes #70451. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: Ib624b967031776851136554719e939e9bf116b7c Reviewed-on: https://go-review.googlesource.com/c/go/+/695315 Reviewed-by: David Chase TryBot-Bypass: David Chase Reviewed-on: https://go-review.googlesource.com/c/go/+/708857 Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/func.go | 1 + src/cmd/compile/internal/ssa/func_test.go | 5 ++++ src/cmd/compile/internal/ssa/regalloc.go | 23 +++++++++++++++++ src/cmd/compile/internal/ssa/regalloc_test.go | 25 +++++++++++++++++++ 4 files changed, 54 insertions(+) diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 5736f0b8126484..fc8cb3f2fef0af 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -102,6 +102,7 @@ func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func { NamedValues: make(map[LocalSlot][]*Value), CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot), + OwnAux: &AuxCall{}, } } diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go index a1e639e0486b62..4639d674e145fa 100644 --- a/src/cmd/compile/internal/ssa/func_test.go +++ b/src/cmd/compile/internal/ssa/func_test.go @@ -250,6 +250,11 @@ func Exit(arg string) ctrl { return ctrl{BlockExit, arg, []string{}} } +// Ret specifies a BlockRet. +func Ret(arg string) ctrl { + return ctrl{BlockRet, arg, []string{}} +} + // Eq specifies a BlockAMD64EQ. func Eq(cond, sub, alt string) ctrl { return ctrl{BlockAMD64EQ, cond, []string{sub, alt}} diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 1ce85a8f63b76a..56f9e550b2d439 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -609,6 +609,29 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos } else if v.rematerializeable() { // Rematerialize instead of loading from the spill location. c = v.copyIntoWithXPos(s.curBlock, pos) + // We need to consider its output mask and potentially issue a Copy + // if there are register mask conflicts. + // This currently happens for the SIMD package only between GP and FP + // register. Because Intel's vector extension can put integer value into + // FP, which is seen as a vector. Example instruction: VPSLL[BWDQ] + // Because GP and FP masks do not overlap, mask & outputMask == 0 + // detects this situation thoroughly. + sourceMask := s.regspec(c).outputs[0].regs + if mask&sourceMask == 0 && !onWasmStack { + s.setOrig(c, v) + s.assignReg(s.allocReg(sourceMask, v), v, c) + // v.Type for the new OpCopy is likely wrong and it might delay the problem + // until ssa to asm lowering, which might need the types to generate the right + // assembly for OpCopy. For Intel's GP to FP move, it happens to be that + // MOV instruction has such a variant so it happens to be right. + // But it's unclear for other architectures or situations, and the problem + // might be exposed when the assembler sees illegal instructions. + // Right now make we still pick v.Type, because at least its size should be correct + // for the rematerialization case the amd64 SIMD package exposed. + // TODO: We might need to figure out a way to find the correct type or make + // the asm lowering use reg info only for OpCopy. + c = s.curBlock.NewValue1(pos, OpCopy, v.Type, c) + } } else { // Load v from its spill location. spill := s.makeSpill(v, s.curBlock) diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go index 0f69b852d12971..79f94da0114f93 100644 --- a/src/cmd/compile/internal/ssa/regalloc_test.go +++ b/src/cmd/compile/internal/ssa/regalloc_test.go @@ -6,6 +6,7 @@ package ssa import ( "cmd/compile/internal/types" + "cmd/internal/obj/x86" "fmt" "testing" ) @@ -279,3 +280,27 @@ func numOps(b *Block, op Op) int { } return n } + +func TestRematerializeableRegCompatible(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("x", OpAMD64MOVLconst, c.config.Types.Int32, 1, nil), + Valu("a", OpAMD64POR, c.config.Types.Float32, 0, nil, "x", "x"), + Valu("res", OpMakeResult, types.NewResults([]*types.Type{c.config.Types.Float32, types.TypeMem}), 0, nil, "a", "mem"), + Ret("res"), + ), + ) + regalloc(f.f) + checkFunc(f.f) + moveFound := false + for _, v := range f.f.Blocks[0].Values { + if v.Op == OpCopy && x86.REG_X0 <= v.Reg() && v.Reg() <= x86.REG_X31 { + moveFound = true + } + } + if !moveFound { + t.Errorf("Expects an Copy to be issued, but got: %+v", f.f) + } +} From ec70d1902355f10e0ab4788334b80db11ab69785 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 20 Aug 2025 12:29:02 -0400 Subject: [PATCH 049/152] cmd/compile: rewrite to elide Slicemask from len==c>0 slicing This might have been something that prove could be educated into figuring out, but this also works, and it also helps prove downstream. Adjusted the prove test, because this change moved a message. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: I5eabe639eff5db9cd9766a6a8666fdb4973829cb Reviewed-on: https://go-review.googlesource.com/c/go/+/697715 Commit-Queue: David Chase Reviewed-by: Cherry Mui TryBot-Bypass: David Chase Reviewed-on: https://go-review.googlesource.com/c/go/+/708858 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Junyang Shao --- .../compile/internal/ssa/_gen/generic.rules | 4 + .../compile/internal/ssa/rewritegeneric.go | 87 +++++++++++++++++++ test/prove.go | 4 +- 3 files changed, 93 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index b16aa473cd5961..6fdea7cc7a3cdc 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -989,6 +989,10 @@ (Const64 [0]) (Const64 [0])) +// Special rule to help constant slicing; len > 0 implies cap > 0 implies Slicemask is all 1 +(SliceMake (AddPtr x (And64 y (Slicemask _))) w:(Const64 [c]) z) && c > 0 => (SliceMake (AddPtr x y) w z) +(SliceMake (AddPtr x (And32 y (Slicemask _))) w:(Const32 [c]) z) && c > 0 => (SliceMake (AddPtr x y) w z) + // interface ops (ConstInterface) => (IMake diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 5e0135be3a5be5..5720063f34b267 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -422,6 +422,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpSliceCap(v) case OpSliceLen: return rewriteValuegeneric_OpSliceLen(v) + case OpSliceMake: + return rewriteValuegeneric_OpSliceMake(v) case OpSlicePtr: return rewriteValuegeneric_OpSlicePtr(v) case OpSlicemask: @@ -30514,6 +30516,91 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { } return false } +func rewriteValuegeneric_OpSliceMake(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SliceMake (AddPtr x (And64 y (Slicemask _))) w:(Const64 [c]) z) + // cond: c > 0 + // result: (SliceMake (AddPtr x y) w z) + for { + if v_0.Op != OpAddPtr { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAnd64 { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + v_0_1_1 := v_0_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_1_0, v_0_1_1 = _i0+1, v_0_1_1, v_0_1_0 { + y := v_0_1_0 + if v_0_1_1.Op != OpSlicemask { + continue + } + w := v_1 + if w.Op != OpConst64 { + continue + } + c := auxIntToInt64(w.AuxInt) + z := v_2 + if !(c > 0) { + continue + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpAddPtr, t) + v0.AddArg2(x, y) + v.AddArg3(v0, w, z) + return true + } + break + } + // match: (SliceMake (AddPtr x (And32 y (Slicemask _))) w:(Const32 [c]) z) + // cond: c > 0 + // result: (SliceMake (AddPtr x y) w z) + for { + if v_0.Op != OpAddPtr { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAnd32 { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + v_0_1_1 := v_0_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_1_0, v_0_1_1 = _i0+1, v_0_1_1, v_0_1_0 { + y := v_0_1_0 + if v_0_1_1.Op != OpSlicemask { + continue + } + w := v_1 + if w.Op != OpConst32 { + continue + } + c := auxIntToInt32(w.AuxInt) + z := v_2 + if !(c > 0) { + continue + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpAddPtr, t) + v0.AddArg2(x, y) + v.AddArg3(v0, w, z) + return true + } + break + } + return false +} func rewriteValuegeneric_OpSlicePtr(v *Value) bool { v_0 := v.Args[0] // match: (SlicePtr (SliceMake (SlicePtr x) _ _)) diff --git a/test/prove.go b/test/prove.go index 70a27865cfd7c3..6d2bb0962be894 100644 --- a/test/prove.go +++ b/test/prove.go @@ -511,10 +511,10 @@ func f19() (e int64, err error) { func sm1(b []int, x int) { // Test constant argument to slicemask. - useSlice(b[2:8]) // ERROR "Proved slicemask not needed$" + useSlice(b[2:8]) // optimized away earlier by rewrite // Test non-constant argument with known limits. if cap(b) > 10 { - useSlice(b[2:]) + useSlice(b[2:]) // ERROR "Proved slicemask not needed$" } } From 1caa95acfa9d516eb3bc26292b5601bea25a4e79 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 3 Sep 2025 13:09:32 -0400 Subject: [PATCH 050/152] cmd/compile: enhance prove to deal with double-offset IsInBounds checks For chunked iterations (useful for, but not exclusive to, SIMD calculations) it is common to see the combination of ``` for ; i <= len(m)-4; i += 4 { ``` and ``` r0, r1, r2, r3 := m[i], m[i+1], m[i+2], m[i+3] `` Prove did not handle the case of len-offset1 vs index+offset2 checking, but this change fixes this. There may be other similar cases yet to handle -- this worked for the chunked loops for simd, as well as a handful in std. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: I3785df83028d517e5e5763206653b34b2befd3d0 Reviewed-on: https://go-review.googlesource.com/c/go/+/700696 Reviewed-by: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-on: https://go-review.googlesource.com/c/go/+/708859 Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/prove.go | 66 +++++++++++++++++++++++++++ test/prove.go | 12 ++--- 2 files changed, 72 insertions(+), 6 deletions(-) diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 309229b4d753b7..7b860a6f9ea2b0 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -2174,6 +2174,65 @@ func unsignedSubUnderflows(a, b uint64) bool { return a < b } +// checkForChunkedIndexBounds looks for index expressions of the form +// A[i+delta] where delta < K and i <= len(A)-K. That is, this is a chunked +// iteration where the index is not directly compared to the length. +func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value) bool { + if bound.Op != OpSliceLen { + return false + } + lim := ft.limits[index.ID] + if lim.min < 0 { + return false + } + i, delta := isConstDelta(index) + if i == nil { + return false + } + if delta < 0 { + return false + } + // special case for blocked iteration over a slice. + // slicelen > i + delta && <==== if clauses above + // && index >= 0 <==== if clause above + // delta >= 0 && <==== if clause above + // slicelen-K >/>= x <==== checked below + // && K >=/> delta <==== checked below + // then v > w + // example: i <=/< len - 4/3 means i+{0,1,2,3} are legal indices + for o := ft.orderings[i.ID]; o != nil; o = o.next { + if o.d != signed { + continue + } + if ow := o.w; ow.Op == OpAdd64 { + var lenOffset *Value + if ow.Args[0] == bound { + lenOffset = ow.Args[1] + } else if ow.Args[1] == bound { + lenOffset = ow.Args[0] + } + if lenOffset == nil || lenOffset.Op != OpConst64 { + continue + } + if K := -lenOffset.AuxInt; K >= 0 { + or := o.r + if or == lt { + or = lt | eq + K++ + if K < 0 { + continue + } + } + + if delta < K && or == lt|eq { + return true + } + } + } + } + return false +} + func addLocalFacts(ft *factsTable, b *Block) { // Propagate constant ranges among values in this block. // We do this before the second loop so that we have the @@ -2285,6 +2344,13 @@ func addLocalFacts(ft *factsTable, b *Block) { if v.Args[0].Op == OpSliceMake { ft.update(b, v, v.Args[0].Args[2], signed, eq) } + case OpIsInBounds: + if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1]) { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %s for blocked indexing", v.Op) + } + ft.booleanTrue(v) + } case OpPhi: addLocalFactsPhi(ft, v) } diff --git a/test/prove.go b/test/prove.go index 6d2bb0962be894..bcc023dfec62fd 100644 --- a/test/prove.go +++ b/test/prove.go @@ -773,8 +773,8 @@ func indexGT0(b []byte, n int) { func unrollUpExcl(a []int) int { var i, x int for i = 0; i < len(a)-1; i += 2 { // ERROR "Induction variable: limits \[0,\?\), increment 2$" - x += a[i] // ERROR "Proved IsInBounds$" - x += a[i+1] + x += a[i] // ERROR "Proved IsInBounds$" + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -786,8 +786,8 @@ func unrollUpExcl(a []int) int { func unrollUpIncl(a []int) int { var i, x int for i = 0; i <= len(a)-2; i += 2 { // ERROR "Induction variable: limits \[0,\?\], increment 2$" - x += a[i] // ERROR "Proved IsInBounds$" - x += a[i+1] + x += a[i] // ERROR "Proved IsInBounds$" + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -839,7 +839,7 @@ func unrollExclStepTooLarge(a []int) int { var i, x int for i = 0; i < len(a)-1; i += 3 { x += a[i] - x += a[i+1] + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -852,7 +852,7 @@ func unrollInclStepTooLarge(a []int) int { var i, x int for i = 0; i <= len(a)-2; i += 3 { x += a[i] - x += a[i+1] + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] From 18cd4a1fc7d5387ae91ffc23328e4fc81f93681d Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Mon, 15 Sep 2025 21:27:19 -0400 Subject: [PATCH 051/152] cmd/compile: use the right type for spill slot Currently, when shuffling registers, if we need to spill a register, we always create a spill slot of type int64. The type doesn't actually matter, as long as it is wide enough to hold the registers. This is no longer true with SIMD registers, which could be wider than a int64. Create the slot with the proper type instead. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Test is SIMD specific so not included for now. Change-Id: I85c82e2532001bfdefe98c9446f2dd18583d49b4 Reviewed-on: https://go-review.googlesource.com/c/go/+/704055 TryBot-Bypass: Cherry Mui Reviewed-by: David Chase Reviewed-by: Junyang Shao Reviewed-on: https://go-review.googlesource.com/c/go/+/708860 LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/regalloc.go | 5 +---- src/cmd/compile/internal/ssa/value.go | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 56f9e550b2d439..26c742d7fe78ff 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -2690,7 +2690,6 @@ func (e *edgeState) erase(loc Location) { // findRegFor finds a register we can use to make a temp copy of type typ. func (e *edgeState) findRegFor(typ *types.Type) Location { // Which registers are possibilities. - types := &e.s.f.Config.Types m := e.s.compatRegs(typ) // Pick a register. In priority order: @@ -2724,9 +2723,7 @@ func (e *edgeState) findRegFor(typ *types.Type) Location { if !c.rematerializeable() { x := e.p.NewValue1(c.Pos, OpStoreReg, c.Type, c) // Allocate a temp location to spill a register to. - // The type of the slot is immaterial - it will not be live across - // any safepoint. Just use a type big enough to hold any register. - t := LocalSlot{N: e.s.f.NewLocal(c.Pos, types.Int64), Type: types.Int64} + t := LocalSlot{N: e.s.f.NewLocal(c.Pos, c.Type), Type: c.Type} // TODO: reuse these slots. They'll need to be erased first. e.set(t, vid, x, false, c.Pos) if e.s.f.pass.debug > regDebug { diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 55ab23ce9a0bcd..51a70c7fd4fdcd 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -600,7 +600,7 @@ func (v *Value) removeable() bool { func AutoVar(v *Value) (*ir.Name, int64) { if loc, ok := v.Block.Func.RegAlloc[v.ID].(LocalSlot); ok { if v.Type.Size() > loc.Type.Size() { - v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) + v.Fatalf("v%d: spill/restore type %v doesn't fit in slot type %v", v.ID, v.Type, loc.Type) } return loc.N, loc.Off } From ad3db2562edf23cb4fb9a909ea11d57b65e304fb Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Tue, 16 Sep 2025 03:27:41 +0000 Subject: [PATCH 052/152] cmd/compile: handle rematerialized op for incompatible reg constraint This CL fixes an issue raised by contributor dominikh@. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Test is SIMD specific so not included for now. Change-Id: I941b330a6ba6f6c120c69951ddd24933f2f0b3ec Reviewed-on: https://go-review.googlesource.com/c/go/+/704056 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-on: https://go-review.googlesource.com/c/go/+/708861 --- src/cmd/compile/internal/ssa/regalloc.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 26c742d7fe78ff..88861dfa14c840 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -2561,7 +2561,26 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString()) } if dstReg { - x = v.copyInto(e.p) + // Handle incompatible registers. + // For #70451. + if e.s.regspec(v).outputs[0].regs®Mask(1< Date: Wed, 17 Sep 2025 14:25:16 -0400 Subject: [PATCH 053/152] cmd/compile: enhance the chunked indexing case to include reslicing this helps SIMD, but also helps plain old Go Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: Idcdacd54b6776f5c32b497bc94485052611cfa8d Reviewed-on: https://go-review.googlesource.com/c/go/+/704756 Reviewed-by: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-on: https://go-review.googlesource.com/c/go/+/708862 Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/prove.go | 34 ++++++++++++++++++++------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 7b860a6f9ea2b0..b1d49812c7d353 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -2177,10 +2177,18 @@ func unsignedSubUnderflows(a, b uint64) bool { // checkForChunkedIndexBounds looks for index expressions of the form // A[i+delta] where delta < K and i <= len(A)-K. That is, this is a chunked // iteration where the index is not directly compared to the length. -func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value) bool { - if bound.Op != OpSliceLen { +// if isReslice, then delta can be equal to K. +func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value, isReslice bool) bool { + if bound.Op != OpSliceLen && bound.Op != OpSliceCap { return false } + + // this is a slice bounds check against len or capacity, + // and refers back to a prior check against length, which + // will also work for the cap since that is not smaller + // than the length. + + slice := bound.Args[0] lim := ft.limits[index.ID] if lim.min < 0 { return false @@ -2206,9 +2214,9 @@ func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value) b } if ow := o.w; ow.Op == OpAdd64 { var lenOffset *Value - if ow.Args[0] == bound { + if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice { lenOffset = ow.Args[1] - } else if ow.Args[1] == bound { + } else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice { lenOffset = ow.Args[0] } if lenOffset == nil || lenOffset.Op != OpConst64 { @@ -2216,12 +2224,15 @@ func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value) b } if K := -lenOffset.AuxInt; K >= 0 { or := o.r + if isReslice { + K++ + } if or == lt { or = lt | eq K++ - if K < 0 { - continue - } + } + if K < 0 { // We hate thinking about overflow + continue } if delta < K && or == lt|eq { @@ -2345,12 +2356,19 @@ func addLocalFacts(ft *factsTable, b *Block) { ft.update(b, v, v.Args[0].Args[2], signed, eq) } case OpIsInBounds: - if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1]) { + if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1], false) { if b.Func.pass.debug > 0 { b.Func.Warnl(v.Pos, "Proved %s for blocked indexing", v.Op) } ft.booleanTrue(v) } + case OpIsSliceInBounds: + if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1], true) { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %s for blocked reslicing", v.Op) + } + ft.booleanTrue(v) + } case OpPhi: addLocalFactsPhi(ft, v) } From d91148c7a8b2d774ddea5c66c170d24937195df5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 17 Sep 2025 17:19:15 -0400 Subject: [PATCH 054/152] cmd/compile: enhance prove to infer bounds in slice len/cap calculations the example comes up in chunked reslicing, e.g. A[i:] where i has a relationship with len(A)-K. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: Ib97dede6cfc7bbbd27b4f384988f741760686604 Reviewed-on: https://go-review.googlesource.com/c/go/+/704875 Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-on: https://go-review.googlesource.com/c/go/+/708863 Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/prove.go | 65 ++++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index b1d49812c7d353..5ed5be47443d77 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -1766,7 +1766,8 @@ func (ft *factsTable) flowLimit(v *Value) bool { b := ft.limits[v.Args[1].ID] sub := ft.newLimit(v, a.sub(b, uint(v.Type.Size())*8)) mod := ft.detectSignedMod(v) - return sub || mod + inferred := ft.detectSliceLenRelation(v) + return sub || mod || inferred case OpNeg64, OpNeg32, OpNeg16, OpNeg8: a := ft.limits[v.Args[0].ID] bitsize := uint(v.Type.Size()) * 8 @@ -1947,6 +1948,68 @@ func (ft *factsTable) detectSignedMod(v *Value) bool { // TODO: non-powers-of-2 return false } + +// detectSliceLenRelation matches the pattern where +// 1. v := slicelen - index, OR v := slicecap - index +// AND +// 2. index <= slicelen - K +// THEN +// +// slicecap - index >= slicelen - index >= K +// +// Note that "index" is not useed for indexing in this pattern, but +// in the motivating example (chunked slice iteration) it is. +func (ft *factsTable) detectSliceLenRelation(v *Value) (inferred bool) { + if v.Op != OpSub64 { + return false + } + + if !(v.Args[0].Op == OpSliceLen || v.Args[0].Op == OpSliceCap) { + return false + } + + slice := v.Args[0].Args[0] + index := v.Args[1] + + for o := ft.orderings[index.ID]; o != nil; o = o.next { + if o.d != signed { + continue + } + or := o.r + if or != lt && or != lt|eq { + continue + } + ow := o.w + if ow.Op != OpAdd64 && ow.Op != OpSub64 { + continue + } + var lenOffset *Value + if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice { + lenOffset = ow.Args[1] + } else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice { + lenOffset = ow.Args[0] + } + if lenOffset == nil || lenOffset.Op != OpConst64 { + continue + } + K := lenOffset.AuxInt + if ow.Op == OpAdd64 { + K = -K + } + if K < 0 { + continue + } + if or == lt { + K++ + } + if K < 0 { // We hate thinking about overflow + continue + } + inferred = inferred || ft.signedMin(v, K) + } + return inferred +} + func (ft *factsTable) detectSignedModByPowerOfTwo(v *Value) bool { // We're looking for: // From 003b5ce1bc15cf265e74ba1ec4eb7cf801e49986 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Fri, 19 Sep 2025 18:38:25 +0000 Subject: [PATCH 055/152] cmd/compile: fix SIMD const rematerialization condition This CL fixes a condition for the previous fix CL 704056. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Test is SIMD specific so not included for now. Change-Id: I1f1f8c6f72870403cb3dff14755c43385dc0c933 Reviewed-on: https://go-review.googlesource.com/c/go/+/705499 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-on: https://go-review.googlesource.com/c/go/+/708864 Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/regalloc.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 88861dfa14c840..e959b8ed7df2eb 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -2561,22 +2561,25 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString()) } if dstReg { - // Handle incompatible registers. + // We want to rematerialize v into a register that is incompatible with v's op's register mask. + // Instead of setting the wrong register for the rematerialized v, we should find the right register + // for it and emit an additional copy to move to the desired register. // For #70451. - if e.s.regspec(v).outputs[0].regs®Mask(1< Date: Mon, 22 Sep 2025 10:57:29 -0400 Subject: [PATCH 056/152] cmd/compile: remove stores to unread parameters Currently, we remove stores to local variables that are not read. We don't do that for arguments. But arguments and locals are essentially the same. Arguments are passed by value, and are not expected to be read in the caller's frame. So we can remove the writes to them as well. One exception is the cgo_unsafe_arg directive, which makes all the arguments effectively address-taken. cgo_unsafe_arg implies ABI0, so we just skip ABI0 functions' arguments. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: I8999fc50da6a87f22c1ec23e9a0c15483b6f7df8 Reviewed-on: https://go-review.googlesource.com/c/go/+/705815 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Junyang Shao Reviewed-on: https://go-review.googlesource.com/c/go/+/708865 --- src/cmd/compile/internal/ssa/deadstore.go | 22 +++++++++++++++---- src/runtime/testdata/testprog/badtraceback.go | 2 ++ test/codegen/stack.go | 6 +++++ 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 9e67e8339992c7..d0adff788c0a4f 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -7,6 +7,7 @@ package ssa import ( "cmd/compile/internal/ir" "cmd/compile/internal/types" + "cmd/internal/obj" ) // dse does dead-store elimination on the Function. @@ -213,7 +214,7 @@ func elimDeadAutosGeneric(f *Func) { case OpAddr, OpLocalAddr: // Propagate the address if it points to an auto. n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if addr[v] == nil { @@ -224,7 +225,7 @@ func elimDeadAutosGeneric(f *Func) { case OpVarDef: // v should be eliminated if we eliminate the auto. n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if elim[v] == nil { @@ -240,7 +241,7 @@ func elimDeadAutosGeneric(f *Func) { // may not be used by the inline code, but will be used by // panic processing). n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if !used.Has(n) { @@ -373,7 +374,7 @@ func elimUnreadAutos(f *Func) { if !ok { continue } - if n.Class != ir.PAUTO { + if n.Class != ir.PAUTO && !isABIInternalParam(f, n) { continue } @@ -413,3 +414,16 @@ func elimUnreadAutos(f *Func) { store.Op = OpCopy } } + +// isABIInternalParam returns whether n is a parameter of an ABIInternal +// function. For dead store elimination, we can treat parameters the same +// way as autos. Storing to a parameter can be removed if it is not read +// or address-taken. +// +// We check ABI here because for a cgo_unsafe_arg function (which is ABI0), +// all the args are effectively address-taken, but not necessarily have +// an Addr or LocalAddr op. We could probably just check for cgo_unsafe_arg, +// but ABIInternal is mostly what matters. +func isABIInternalParam(f *Func, n *ir.Name) bool { + return n.Class == ir.PPARAM && f.ABISelf.Which() == obj.ABIInternal +} diff --git a/src/runtime/testdata/testprog/badtraceback.go b/src/runtime/testdata/testprog/badtraceback.go index 09aa2b877ecf5a..455118a54371d7 100644 --- a/src/runtime/testdata/testprog/badtraceback.go +++ b/src/runtime/testdata/testprog/badtraceback.go @@ -44,6 +44,8 @@ func badLR2(arg int) { lrPtr := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&arg)) - lrOff)) *lrPtr = 0xbad + runtime.KeepAlive(lrPtr) // prevent dead store elimination + // Print a backtrace. This should include diagnostics for the // bad return PC and a hex dump. panic("backtrace") diff --git a/test/codegen/stack.go b/test/codegen/stack.go index 4e45d68f3816bd..59284ae88862a3 100644 --- a/test/codegen/stack.go +++ b/test/codegen/stack.go @@ -168,3 +168,9 @@ func getp1() *[4]int { func getp2() *[4]int { return nil } + +// Store to an argument without read can be removed. +func storeArg(a [2]int) { + // amd64:-`MOVQ\t\$123,.*\.a\+\d+\(SP\)` + a[1] = 123 +} From 1bca4c1673f5d90822086f34aed6de4a9bea2d93 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 17 Sep 2025 17:21:37 -0400 Subject: [PATCH 057/152] cmd/compile: improve slicemask removal this will be subsumed by pending changes in local slice representation, however this was easy and works well. Cherry-picked from the dev.simd branch. This CL is not necessarily SIMD specific. Apply early to reduce risk. Change-Id: I5b6eb10d257f04f906be7a8a6f2b6833992a39e8 Reviewed-on: https://go-review.googlesource.com/c/go/+/704876 Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-on: https://go-review.googlesource.com/c/go/+/708866 Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/prove.go | 30 +++++-- test/loopbce.go | 118 +++++++++++++------------- 2 files changed, 81 insertions(+), 67 deletions(-) diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 5ed5be47443d77..b4f91fd4fd12dc 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -2529,24 +2529,38 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { switch v.Op { case OpSlicemask: // Replace OpSlicemask operations in b with constants where possible. - x, delta := isConstDelta(v.Args[0]) - if x == nil { + cap := v.Args[0] + x, delta := isConstDelta(cap) + if x != nil { + // slicemask(x + y) + // if x is larger than -y (y is negative), then slicemask is -1. + lim := ft.limits[x.ID] + if lim.umin > uint64(-delta) { + if cap.Op == OpAdd64 { + v.reset(OpConst64) + } else { + v.reset(OpConst32) + } + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved slicemask not needed") + } + v.AuxInt = -1 + } break } - // slicemask(x + y) - // if x is larger than -y (y is negative), then slicemask is -1. - lim := ft.limits[x.ID] - if lim.umin > uint64(-delta) { - if v.Args[0].Op == OpAdd64 { + lim := ft.limits[cap.ID] + if lim.umin > 0 { + if cap.Type.Size() == 8 { v.reset(OpConst64) } else { v.reset(OpConst32) } if b.Func.pass.debug > 0 { - b.Func.Warnl(v.Pos, "Proved slicemask not needed") + b.Func.Warnl(v.Pos, "Proved slicemask not needed (by limit)") } v.AuxInt = -1 } + case OpCtz8, OpCtz16, OpCtz32, OpCtz64: // On some architectures, notably amd64, we can generate much better // code for CtzNN if we know that the argument is non-zero. diff --git a/test/loopbce.go b/test/loopbce.go index 8bc44ece9455a0..8a58d942361221 100644 --- a/test/loopbce.go +++ b/test/loopbce.go @@ -9,7 +9,7 @@ import "math" func f0a(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -17,7 +17,7 @@ func f0a(a []int) int { func f0b(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[i:] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + b := a[i:] // ERROR "Proved IsSliceInBounds$" x += b[0] } return x @@ -26,8 +26,8 @@ func f0b(a []int) int { func f0c(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[:i+1] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - x += b[0] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + b := a[:i+1] // ERROR "Proved IsSliceInBounds$" + x += b[0] // ERROR "Proved IsInBounds$" } return x } @@ -43,7 +43,7 @@ func f1(a []int) int { func f2(a []int) int { x := 0 for i := 1; i < len(a); i++ { // ERROR "Induction variable: limits \[1,\?\), increment 1$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -51,7 +51,7 @@ func f2(a []int) int { func f4(a [10]int) int { x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -91,7 +91,7 @@ func f5_int8(a [10]int) int { //go:noinline func f6(a []int) { for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[0:i] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + b := a[0:i] // ERROR "Proved IsSliceInBounds$" f6(b) } } @@ -99,7 +99,7 @@ func f6(a []int) { func g0a(a string) int { x := 0 for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -107,7 +107,7 @@ func g0a(a string) int { func g0b(a string) int { x := 0 for i := 0; len(a) > i; i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -115,7 +115,7 @@ func g0b(a string) int { func g0c(a string) int { x := 0 for i := len(a); i > 0; i-- { // ERROR "Induction variable: limits \(0,\?\], increment 1$" - x += int(a[i-1]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i-1]) // ERROR "Proved IsInBounds$" } return x } @@ -123,7 +123,7 @@ func g0c(a string) int { func g0d(a string) int { x := 0 for i := len(a); 0 < i; i-- { // ERROR "Induction variable: limits \(0,\?\], increment 1$" - x += int(a[i-1]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i-1]) // ERROR "Proved IsInBounds$" } return x } @@ -131,7 +131,7 @@ func g0d(a string) int { func g0e(a string) int { x := 0 for i := len(a) - 1; i >= 0; i-- { // ERROR "Induction variable: limits \[0,\?\], increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -139,7 +139,7 @@ func g0e(a string) int { func g0f(a string) int { x := 0 for i := len(a) - 1; 0 <= i; i-- { // ERROR "Induction variable: limits \[0,\?\], increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -148,7 +148,7 @@ func g1() int { a := "evenlength" x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -158,7 +158,7 @@ func g2() int { x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" j := i - if a[i] == 'e' { // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + if a[i] == 'e' { // ERROR "Proved IsInBounds$" j = j + 1 } x += int(a[j]) @@ -169,29 +169,29 @@ func g2() int { func g3a() { a := "this string has length 25" for i := 0; i < len(a); i += 5 { // ERROR "Induction variable: limits \[0,20\], increment 5$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useString(a[:i+3]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useString(a[:i+5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useString(a[:i+3]) // ERROR "Proved IsSliceInBounds$" + useString(a[:i+5]) // ERROR "Proved IsSliceInBounds$" useString(a[:i+6]) } } func g3b(a string) { for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i+1:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i+1:]) // ERROR "Proved IsSliceInBounds$" } } func g3c(a string) { for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[:i+1]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[:i+1]) // ERROR "Proved IsSliceInBounds$" } } func h1(a []byte) { c := a[:128] for i := range c { // ERROR "Induction variable: limits \[0,128\), increment 1$" - c[i] = byte(i) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + c[i] = byte(i) // ERROR "Proved IsInBounds$" } } @@ -208,11 +208,11 @@ func k0(a [100]int) [100]int { continue } a[i-11] = i - a[i-10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i-5] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i+5] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-10] = i // ERROR "Proved IsInBounds$" + a[i-5] = i // ERROR "Proved IsInBounds$" + a[i] = i // ERROR "Proved IsInBounds$" + a[i+5] = i // ERROR "Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -225,12 +225,12 @@ func k1(a [100]int) [100]int { continue } useSlice(a[:i-11]) - useSlice(a[:i-10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i-5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+11]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[:i-10]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i-5]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+5]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+10]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+11]) // ERROR "Proved IsSliceInBounds$" useSlice(a[:i+12]) } @@ -243,13 +243,13 @@ func k2(a [100]int) [100]int { // This is a trick to prohibit sccp to optimize out the following out of bound check continue } - useSlice(a[i-11:]) - useSlice(a[i-10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i-5:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+5:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+11:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[i-11:]) // ERROR "Proved slicemask not needed \(by limit\)$" + useSlice(a[i-10:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i-5:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+5:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+10:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+11:]) // ERROR "Proved IsSliceInBounds$" useSlice(a[i+12:]) } return a @@ -262,7 +262,7 @@ func k3(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -275,7 +275,7 @@ func k3neg(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -288,7 +288,7 @@ func k3neg2(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -299,7 +299,7 @@ func k4(a [100]int) [100]int { // and it isn't worth adding that special case to prove. min := (-1)<<63 + 1 for i := min; i < min+50; i++ { // ERROR "Induction variable: limits \[-9223372036854775807,-9223372036854775757\), increment 1$" - a[i-min] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-min] = i // ERROR "Proved IsInBounds$" } return a } @@ -307,8 +307,8 @@ func k4(a [100]int) [100]int { func k5(a [100]int) [100]int { max := (1 << 63) - 1 for i := max - 50; i < max; i++ { // ERROR "Induction variable: limits \[9223372036854775757,9223372036854775807\), increment 1$" - a[i-max+50] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i-(max-70)] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-max+50] = i // ERROR "Proved IsInBounds$" + a[i-(max-70)] = i // ERROR "Proved IsInBounds$" } return a } @@ -374,22 +374,22 @@ func d4() { } func d5() { - for i := int64(math.MinInt64 + 9); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4" + for i := int64(math.MinInt64 + 9); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 8); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4" + for i := int64(math.MinInt64 + 8); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4$" useString("foo") } for i := int64(math.MinInt64 + 7); i > math.MinInt64+2; i -= 4 { useString("foo") } - for i := int64(math.MinInt64 + 6); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775802,-9223372036854775802\], increment 4" + for i := int64(math.MinInt64 + 6); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775802,-9223372036854775802\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 9); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4" + for i := int64(math.MinInt64 + 9); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 8); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4" + for i := int64(math.MinInt64 + 8); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4$" useString("foo") } for i := int64(math.MinInt64 + 7); i >= math.MinInt64+2; i -= 4 { @@ -410,23 +410,23 @@ func bce1() { panic("invalid test: modulos should differ") } - for i := b; i < a; i += z { // ERROR "Induction variable: limits \[-1547,9223372036854772720\], increment 1337" + for i := b; i < a; i += z { // ERROR "Induction variable: limits \[-1547,9223372036854772720\], increment 1337$" useString("foobar") } } func nobce2(a string) { for i := int64(0); i < int64(len(a)); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" } for i := int64(0); i < int64(len(a))-31337; i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" } - for i := int64(0); i < int64(len(a))+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" "Disproved Less64" + for i := int64(0); i < int64(len(a))+int64(-1<<63); i++ { // ERROR "Disproved Less64$" "Induction variable: limits \[0,\?\), increment 1$" useString(a[i:]) } j := int64(len(a)) - 123 - for i := int64(0); i < j+123+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" "Disproved Less64" + for i := int64(0); i < j+123+int64(-1<<63); i++ { // ERROR "Disproved Less64$" "Induction variable: limits \[0,\?\), increment 1$" useString(a[i:]) } for i := int64(0); i < j+122+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" @@ -455,16 +455,16 @@ func issue26116a(a []int) { func stride1(x *[7]int) int { s := 0 - for i := 0; i <= 8; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3" - s += x[i] // ERROR "Proved IsInBounds" + for i := 0; i <= 8; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3$" + s += x[i] // ERROR "Proved IsInBounds$" } return s } func stride2(x *[7]int) int { s := 0 - for i := 0; i < 9; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3" - s += x[i] // ERROR "Proved IsInBounds" + for i := 0; i < 9; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3$" + s += x[i] // ERROR "Proved IsInBounds$" } return s } From ee5369b003b17b34aa6417cf8c9b702f1cd76da1 Mon Sep 17 00:00:00 2001 From: qmuntal Date: Fri, 3 Oct 2025 11:18:47 +0200 Subject: [PATCH 058/152] cmd/link: add LIBRARY statement only with -buildmode=cshared When creating a .def file for Windows linking, add a LIBRARY statement only when building a DLL with -buildmode=cshared. That statement is documented to instruct the linker to create a DLL, overriding any other flag that might indicate building an executable. Fixes #75734 Change-Id: I0231435df70b71a493a39deb639f6328a8e354f6 Reviewed-on: https://go-review.googlesource.com/c/go/+/708815 Reviewed-by: Carlos Amedee Reviewed-by: Dominic Della Valle Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/cmd/link/internal/ld/pe.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 0f0650e5e149e3..e0186f46b035d9 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -1758,7 +1758,9 @@ func peCreateExportFile(ctxt *Link, libName string) (fname string) { fname = filepath.Join(*flagTmpdir, "export_file.def") var buf bytes.Buffer - fmt.Fprintf(&buf, "LIBRARY %s\n", libName) + if ctxt.BuildMode == BuildModeCShared { + fmt.Fprintf(&buf, "LIBRARY %s\n", libName) + } buf.WriteString("EXPORTS\n") ldr := ctxt.loader From 2a71af11fc7e45903be9ab84e59c668bb051f528 Mon Sep 17 00:00:00 2001 From: Mateusz Poliwczak Date: Wed, 24 Sep 2025 16:40:55 +0200 Subject: [PATCH 059/152] net/url: improve URL docs The Raw fields are confusing and easy to use by mistake. Adds more context in comments to these fields. Also the current docs (and the names of these fields) of these boolean fields are not obvious that parser might produce them, so clarify that Change-Id: I6a6a69644834c3ccbf657147f771930b6875f721 Reviewed-on: https://go-review.googlesource.com/c/go/+/706515 Reviewed-by: Florian Lehner Reviewed-by: Sean Liao Reviewed-by: Damien Neil Reviewed-by: Carlos Amedee Auto-Submit: Damien Neil LUCI-TryBot-Result: Go LUCI --- src/net/url/url.go | 50 ++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/src/net/url/url.go b/src/net/url/url.go index 2a57659460373d..015c5b2751974a 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -364,25 +364,41 @@ func escape(s string, mode encoding) string { // A consequence is that it is impossible to tell which slashes in the Path were // slashes in the raw URL and which were %2f. This distinction is rarely important, // but when it is, the code should use the [URL.EscapedPath] method, which preserves -// the original encoding of Path. +// the original encoding of Path. The Fragment field is also stored in decoded form, +// use [URL.EscapedFragment] to retrieve the original encoding. // -// The RawPath field is an optional field which is only set when the default -// encoding of Path is different from the escaped path. See the EscapedPath method -// for more details. -// -// URL's String method uses the EscapedPath method to obtain the path. +// The [URL.String] method uses the [URL.EscapedPath] method to obtain the path. type URL struct { - Scheme string - Opaque string // encoded opaque data - User *Userinfo // username and password information - Host string // host or host:port (see Hostname and Port methods) - Path string // path (relative paths may omit leading slash) - RawPath string // encoded path hint (see EscapedPath method) - OmitHost bool // do not emit empty host (authority) - ForceQuery bool // append a query ('?') even if RawQuery is empty - RawQuery string // encoded query values, without '?' - Fragment string // fragment for references, without '#' - RawFragment string // encoded fragment hint (see EscapedFragment method) + Scheme string + Opaque string // encoded opaque data + User *Userinfo // username and password information + Host string // "host" or "host:port" (see Hostname and Port methods) + Path string // path (relative paths may omit leading slash) + Fragment string // fragment for references (without '#') + + // RawQuery contains the encoded query values, without the initial '?'. + // Use URL.Query to decode the query. + RawQuery string + + // RawPath is an optional field containing an encoded path hint. + // See the EscapedPath method for more details. + // + // In general, code should call EscapedPath instead of reading RawPath. + RawPath string + + // RawFragment is an optional field containing an encoded fragment hint. + // See the EscapedFragment method for more details. + // + // In general, code should call EscapedFragment instead of reading RawFragment. + RawFragment string + + // ForceQuery indicates whether the original URL contained a query ('?') character. + // When set, the String method will include a trailing '?', even when RawQuery is empty. + ForceQuery bool + + // OmitHost indicates the URL has an empty host (authority). + // When set, the String method will not include the host when it is empty. + OmitHost bool } // User returns a [Userinfo] containing the provided username From 3a05e7b0325eb71fede880f67db63d192f2fa0e1 Mon Sep 17 00:00:00 2001 From: Oliver Eikemeier Date: Sun, 5 Oct 2025 11:57:40 +0000 Subject: [PATCH 060/152] spec: close tag Close an "a" tag. While we are here, fix some escapes. Change-Id: I16040eff0d4beeef6230aec8fcf4315f0efd13a4 GitHub-Last-Rev: 3ba7b9f7478f54338bd3ca7ac55cc2ad1ffcb3a4 GitHub-Pull-Request: golang/go#75760 Reviewed-on: https://go-review.googlesource.com/c/go/+/708517 Reviewed-by: Sean Liao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Reviewed-by: Emmanuel Odeke Reviewed-by: Robert Findley Auto-Submit: Sean Liao --- doc/go_spec.html | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/go_spec.html b/doc/go_spec.html index 92afe1cee0baef..d6c7962a961b88 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -3173,7 +3173,7 @@

Composite literals

Unless the LiteralType is a type parameter, -its underlying type +its underlying type must be a struct, array, slice, or map type (the syntax enforces this constraint except when the type is given as a TypeName). @@ -4873,7 +4873,7 @@

Operator precedence

x <= f() // x <= f() ^a >> b // (^a) >> b f() || g() // f() || g() -x == y+1 && <-chanInt > 0 // (x == (y+1)) && ((<-chanInt) > 0) +x == y+1 && <-chanInt > 0 // (x == (y+1)) && ((<-chanInt) > 0) @@ -6635,7 +6635,7 @@

For statements with for clause

 var prints []func()
-for i := 0; i < 5; i++ {
+for i := 0; i < 5; i++ {
 	prints = append(prints, func() { println(i) })
 	i++
 }
@@ -6772,7 +6772,7 @@ 

For statements with range clause

variable, which must be of integer type. Otherwise, if the iteration variable is declared by the "range" clause or is absent, the type of the iteration values is the default type for n. -If n <= 0, the loop does not run any iterations. +If n <= 0, the loop does not run any iterations.
  • @@ -7799,7 +7799,7 @@

    Min and max

    -min(x, y)    == if x <= y then x else y
    +min(x, y)    == if x <= y then x else y
     min(x, y, z) == min(min(x, y), z)
     
    From e74b224b7c7b7511fe37686d81a6e33d40fdeb17 Mon Sep 17 00:00:00 2001 From: Daniel McCarney Date: Fri, 11 Jul 2025 10:42:22 -0400 Subject: [PATCH 061/152] crypto/tls: streamline BoGo testing w/ -bogo-local-dir If -bogo-local-dir is provided but doesn't exist, populate it with a git checkout of the BoringSSL repo at the correct SHA. Without any -bogo-local-dir argument the BoGo TLS handshake test will fetch the BoringSSL source at a specific SHA as a Go module in a r/o module directory. When debugging, or extending BoGo coverage, it's preferable to have a mutable local copy of BoGo that the test will use. The pre-existing -bogo-local-dir flag offered a way to use a checkout of BoGo but it relied on the user fetching the correct repo & revision manually ahead of time. This commit extends the test to automatically invoke `git` to clone the repo into the provided local dir at the correct SHA based on the boringsslModVer const if the local dir doesn't exist. This leaves the user ready to make changes in local BoGo dir to aid debugging, or to upstream as CRs to BoringSSL, and prevents using an incorrect SHA by mistake. Updates #72006 Change-Id: I0451a3d35203878cdf02a7587e138c3cd60d15a9 Reviewed-on: https://go-review.googlesource.com/c/go/+/687475 Reviewed-by: Roland Shoemaker Reviewed-by: Carlos Amedee TryBot-Bypass: Daniel McCarney --- src/crypto/tls/bogo_shim_test.go | 46 ++++++++++++++++++++++++++++++++ src/crypto/tls/handshake_test.go | 5 ++-- 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/src/crypto/tls/bogo_shim_test.go b/src/crypto/tls/bogo_shim_test.go index f3f19b34e9e4cc..8f171d925959c8 100644 --- a/src/crypto/tls/bogo_shim_test.go +++ b/src/crypto/tls/bogo_shim_test.go @@ -11,6 +11,7 @@ import ( "encoding/base64" "encoding/json" "encoding/pem" + "errors" "flag" "fmt" "html/template" @@ -541,6 +542,7 @@ func orderlyShutdown(tlsConn *Conn) { } func TestBogoSuite(t *testing.T) { + testenv.MustHaveGoBuild(t) if testing.Short() { t.Skip("skipping in short mode") } @@ -559,6 +561,7 @@ func TestBogoSuite(t *testing.T) { var bogoDir string if *bogoLocalDir != "" { + ensureLocalBogo(t, *bogoLocalDir) bogoDir = *bogoLocalDir } else { bogoDir = cryptotest.FetchModule(t, "boringssl.googlesource.com/boringssl.git", boringsslModVer) @@ -664,6 +667,49 @@ func TestBogoSuite(t *testing.T) { } } +// ensureLocalBogo fetches BoringSSL to localBogoDir at the correct revision +// (from boringsslModVer) if localBogoDir doesn't already exist. +// +// If localBogoDir does exist, ensureLocalBogo fails the test if it isn't +// a directory. +func ensureLocalBogo(t *testing.T, localBogoDir string) { + t.Helper() + + if stat, err := os.Stat(localBogoDir); err == nil { + if !stat.IsDir() { + t.Fatalf("local bogo dir (%q) exists but is not a directory", localBogoDir) + } + + t.Logf("using local bogo checkout from %q", localBogoDir) + return + } else if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("failed to stat local bogo dir (%q): %v", localBogoDir, err) + } + + testenv.MustHaveExecPath(t, "git") + + idx := strings.LastIndex(boringsslModVer, "-") + if idx == -1 || idx == len(boringsslModVer)-1 { + t.Fatalf("invalid boringsslModVer format: %q", boringsslModVer) + } + commitSHA := boringsslModVer[idx+1:] + + t.Logf("cloning boringssl@%s to %q", commitSHA, localBogoDir) + cloneCmd := testenv.Command(t, "git", "clone", "--no-checkout", "https://boringssl.googlesource.com/boringssl", localBogoDir) + if err := cloneCmd.Run(); err != nil { + t.Fatalf("git clone failed: %v", err) + } + + checkoutCmd := testenv.Command(t, "git", "checkout", commitSHA) + checkoutCmd.Dir = localBogoDir + if err := checkoutCmd.Run(); err != nil { + t.Fatalf("git checkout failed: %v", err) + } + + t.Logf("using fresh local bogo checkout from %q", localBogoDir) + return +} + func generateReport(results bogoResults, outPath string) error { data := reportData{ Results: results, diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go index 13f1bb1dd50a19..3e2c5663087828 100644 --- a/src/crypto/tls/handshake_test.go +++ b/src/crypto/tls/handshake_test.go @@ -46,8 +46,9 @@ var ( keyFile = flag.String("keylog", "", "destination file for KeyLogWriter") bogoMode = flag.Bool("bogo-mode", false, "Enabled bogo shim mode, ignore everything else") bogoFilter = flag.String("bogo-filter", "", "BoGo test filter") - bogoLocalDir = flag.String("bogo-local-dir", "", "Local BoGo to use, instead of fetching from source") - bogoReport = flag.String("bogo-html-report", "", "File path to render an HTML report with BoGo results") + bogoLocalDir = flag.String("bogo-local-dir", "", + "If not-present, checkout BoGo into this dir, or otherwise use it as a pre-existing checkout") + bogoReport = flag.String("bogo-html-report", "", "File path to render an HTML report with BoGo results") ) func runTestAndUpdateIfNeeded(t *testing.T, name string, run func(t *testing.T, update bool), wait bool) { From ac2ec82172799b88c057bb9ded6fe24e7909e860 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Fri, 3 Oct 2025 16:23:10 +0000 Subject: [PATCH 062/152] runtime: bump thread count slack for TestReadMetricsSched This test is *still* flaky, but it appears to be just mayMoreStackPreempt and the thread count *occasionally* exceeds the original (and arbitrary) thread count slack by exactly 1. Bump the thread count slack by one. We can investigate further and bump it again if it continues to be a problem. Fixes #75664. Change-Id: I29c922bba6d2cc99a8c3bf5e04cc512d0694f7fa Reviewed-on: https://go-review.googlesource.com/c/go/+/708868 Reviewed-by: Michael Pratt Auto-Submit: Michael Knyszek LUCI-TryBot-Result: Go LUCI --- src/runtime/testdata/testprog/schedmetrics.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/runtime/testdata/testprog/schedmetrics.go b/src/runtime/testdata/testprog/schedmetrics.go index 6d3f68a848e00a..bc0906330f1a4f 100644 --- a/src/runtime/testdata/testprog/schedmetrics.go +++ b/src/runtime/testdata/testprog/schedmetrics.go @@ -84,7 +84,12 @@ func SchedMetrics() { // threadsSlack is the maximum number of threads left over // from the runtime (sysmon, the template thread, etc.) - const threadsSlack = 4 + // Certain build modes may also cause the creation of additional + // threads through frequent scheduling, like mayMoreStackPreempt. + // A slack of 5 is arbitrary but appears to be enough to cover + // the leftovers plus any inflation from scheduling-heavy build + // modes. + const threadsSlack = 5 // Make sure GC isn't running, since GC workers interfere with // expected counts. From c2fb15164bdb9d44a302771be613fbef5faa4a8e Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Sun, 5 Oct 2025 23:17:40 +0100 Subject: [PATCH 063/152] testing/synctest: remove Run Run (experimental) is replaced by Test. Fixes #74012 Change-Id: I1721e1edfbcb4f1fe2159dc0430a13685b2d08c4 Reviewed-on: https://go-review.googlesource.com/c/go/+/709355 Reviewed-by: Emmanuel Odeke Reviewed-by: Damien Neil Auto-Submit: Damien Neil LUCI-TryBot-Result: Go LUCI Reviewed-by: Alan Donovan --- src/testing/synctest/run.go | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 src/testing/synctest/run.go diff --git a/src/testing/synctest/run.go b/src/testing/synctest/run.go deleted file mode 100644 index 2e668ab8634f58..00000000000000 --- a/src/testing/synctest/run.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.synctest - -package synctest - -import "internal/synctest" - -// Run is deprecated. -// -// Deprecated: Use Test instead. Run will be removed in Go 1.26. -func Run(f func()) { - synctest.Run(f) -} From 24416458c21a48d83f34d3c2242d892e002bcd6c Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 19:12:20 -0400 Subject: [PATCH 064/152] cmd/go: export type State Export the type `State` and add global variable `LoaderState` in preparation for refactoring usage of other global variables in the modload package. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/modload rf 'mv state State' rf 'add State func NewState() *State { return &State{} }' rf 'add init.go:/NewState/+0 var LoaderState = NewState()' Change-Id: I0ec6199ba3e05927bec12f11a60383d1b51b111a Reviewed-on: https://go-review.googlesource.com/c/go/+/698055 Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI --- src/cmd/go/internal/modload/init.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 498ff7433ea6fe..5192fe0fc8cc58 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -96,7 +96,7 @@ func EnterWorkspace(ctx context.Context) (exit func(), err error) { } // Reset the state to a clean state. - oldstate := setState(state{}) + oldstate := setState(State{}) ForceUseModules = true // Load in workspace mode. @@ -401,11 +401,11 @@ func WorkFilePath() string { // Reset clears all the initialized, cached state about the use of modules, // so that we can start over. func Reset() { - setState(state{}) + setState(State{}) } -func setState(s state) state { - oldState := state{ +func setState(s State) State { + oldState := State{ initialized: initialized, forceUseModules: ForceUseModules, rootMode: RootMode, @@ -429,7 +429,7 @@ func setState(s state) state { return oldState } -type state struct { +type State struct { initialized bool forceUseModules bool rootMode Root @@ -441,6 +441,10 @@ type state struct { modfetchState modfetch.State } +func NewState() *State { return &State{} } + +var LoaderState = NewState() + // Init determines whether module mode is enabled, locates the root of the // current module (if any), sets environment variables for Git subprocesses, and // configures the cfg, codehost, load, modfetch, and search packages for use From f3312124c2370c2f64a7f9ad29732ec30209647a Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Mon, 6 Oct 2025 14:38:47 -0400 Subject: [PATCH 065/152] runtime: remove batching from spanSPMC free Added in CL 700496, freeSomeSpanSPMCs attempts to bound tail latency by processing at most 64 entries at a time, as well as returning early if it notices a preemption request. Both of those are attempts to reduce tail latency, as we cannot preempt the function while it holds the lock. This scheme is based on a similar scheme in freeSomeWbufs. freeSomeWbufs has a key difference: all workbufs in its list are unconditionally freed. So freeSomeWbufs will always make forward progress in each call (unless it is constantly preempted). In contrast, freeSomeSpanSPMCs only frees "dead" entries. If the list contains >64 live entries, a call may make no progress, and the caller will simply keep calling in a loop forever, until the GC ends at which point it returns success early. The infinite loop likely restarts at the next GC cycle. The queues are used on each P, so it is easy to have 64 permanently live queues if GOMAXPROCS >= 64. If GOMAXPROCS < 64, it is possible to transiently have more queues, but spanQueue.drain increases queue size in an attempt to reach a steady state of one queue per P. We must drop work.spanSPMCs.lock to allow preemption, but dropping the lock allows mutation of the linked list, meaning we cannot simply continue iteration after retaking lock. Since there is no straightforward resolution to this and we expect this to generally only be around 1 entry per P, simply remove the batching and process the entire list without preemption. We may want to revisit this in the future for very high GOMAXPROCS or if application regularly otherwise create very long lists. Fixes #75771. Change-Id: I6a6a636cd3be443aacde5a678c460aa7066b4c4a Reviewed-on: https://go-review.googlesource.com/c/go/+/709575 Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI --- src/runtime/mgc.go | 3 +-- src/runtime/mgcmark_greenteagc.go | 23 ++++++++++------------- src/runtime/mgcmark_nogreenteagc.go | 4 ++-- src/runtime/mgcsweep.go | 5 +---- 4 files changed, 14 insertions(+), 21 deletions(-) diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index b13ec845fc401f..b4b4485629b59d 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -2046,8 +2046,7 @@ func gcSweep(mode gcMode) bool { prepareFreeWorkbufs() for freeSomeWbufs(false) { } - for freeSomeSpanSPMCs(false) { - } + freeDeadSpanSPMCs() // All "free" events for this mark/sweep cycle have // now happened, so we can make this profile cycle // available immediately. diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index 3975e1e76b501c..7f8d60349ffb67 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -722,13 +722,8 @@ func (r *spanSPMC) slot(i uint32) *objptr { return (*objptr)(unsafe.Add(unsafe.Pointer(r.ring), idx*unsafe.Sizeof(objptr(0)))) } -// freeSomeSpanSPMCs frees some spanSPMCs back to the OS and returns -// true if it should be called again to free more. -func freeSomeSpanSPMCs(preemptible bool) bool { - // TODO(mknyszek): This is arbitrary, but some kind of limit is necessary - // to help bound delays to cooperatively preempt ourselves. - const batchSize = 64 - +// freeDeadSpanSPMCs frees dead spanSPMCs back to the OS. +func freeDeadSpanSPMCs() { // According to the SPMC memory management invariants, we can only free // spanSPMCs outside of the mark phase. We ensure we do this in two ways. // @@ -740,18 +735,21 @@ func freeSomeSpanSPMCs(preemptible bool) bool { // // This way, we ensure that we don't start freeing if we're in the wrong // phase, and the phase can't change on us while we're freeing. + // + // TODO(go.dev/issue/75771): Due to the grow semantics in + // spanQueue.drain, we expect a steady-state of around one spanSPMC per + // P, with some spikes higher when Ps have more than one. For high + // GOMAXPROCS, or if this list otherwise gets long, it would be nice to + // have a way to batch work that allows preemption during processing. lock(&work.spanSPMCs.lock) if gcphase != _GCoff || work.spanSPMCs.all == nil { unlock(&work.spanSPMCs.lock) - return false + return } rp := &work.spanSPMCs.all - gp := getg() - more := true - for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ { + for { r := *rp if r == nil { - more = false break } if r.dead.Load() { @@ -766,7 +764,6 @@ func freeSomeSpanSPMCs(preemptible bool) bool { } } unlock(&work.spanSPMCs.lock) - return more } // tryStealSpan attempts to steal a span from another P's local queue. diff --git a/src/runtime/mgcmark_nogreenteagc.go b/src/runtime/mgcmark_nogreenteagc.go index 9838887f7be008..883c3451abec6b 100644 --- a/src/runtime/mgcmark_nogreenteagc.go +++ b/src/runtime/mgcmark_nogreenteagc.go @@ -67,8 +67,8 @@ type spanSPMC struct { _ sys.NotInHeap } -func freeSomeSpanSPMCs(preemptible bool) bool { - return false +func freeDeadSpanSPMCs() { + return } type objptr uintptr diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index 364cdb58ccb0bc..c3d6afb90a54fe 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -307,10 +307,7 @@ func bgsweep(c chan int) { // N.B. freeSomeWbufs is already batched internally. goschedIfBusy() } - for freeSomeSpanSPMCs(true) { - // N.B. freeSomeSpanSPMCs is already batched internally. - goschedIfBusy() - } + freeDeadSpanSPMCs() lock(&sweep.lock) if !isSweepDone() { // This can happen if a GC runs between From 719dfcf8a8478d70360bf3c34c0e920be7b32994 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 17 May 2025 15:05:56 -0700 Subject: [PATCH 066/152] cmd/compile: redo arm64 LR/FP save and restore Instead of storing LR (the return address) at 0(SP) and the FP (parent's frame pointer) at -8(SP), store them at framesize-8(SP) and framesize-16(SP), respectively. We push and pop data onto the stack such that we're never accessing anything below SP. The prolog/epilog lengths are unchanged (3 insns for a typical prolog, 2 for a typical epilog). We use 8 bytes more per frame. Typical prologue: STP.W (FP, LR), -16(SP) MOVD SP, FP SUB $C, SP Typical epilogue: ADD $C, SP LDP.P 16(SP), (FP, LR) RET The previous word where we stored LR, at 0(SP), is now unused. We could repurpose that slot for storing a local variable. The new prolog and epilog instructions are recognized by libunwind, so pc-sampling tools like perf should now be accurate. (TODO: except maybe after the first RET instruction? Have to look into that.) Update #73753 (fixes, for arm64) Update #57302 (Quim thinks this will help on that issue) Change-Id: I4800036a9a9a08aaaf35d9f99de79a36cf37ebb8 Reviewed-on: https://go-review.googlesource.com/c/go/+/674615 Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/abi-internal.md | 12 +- src/cmd/compile/internal/arm64/ggen.go | 10 +- src/cmd/compile/internal/arm64/ssa.go | 2 +- src/cmd/compile/internal/ssagen/pgen.go | 6 + src/cmd/compile/internal/ssagen/ssa.go | 3 +- src/cmd/internal/obj/arm64/asm7.go | 12 +- src/cmd/internal/obj/arm64/obj7.go | 326 ++++++------------ src/cmd/link/internal/amd64/obj.go | 19 +- src/cmd/link/internal/arm64/obj.go | 23 +- src/cmd/link/internal/ld/dwarf.go | 7 +- src/cmd/link/internal/ld/lib.go | 4 + src/cmd/link/internal/ld/stackcheck.go | 5 - src/cmd/link/internal/x86/obj.go | 15 +- src/runtime/asm_arm64.s | 81 ++++- src/runtime/mkpreempt.go | 20 +- src/runtime/panic.go | 8 +- src/runtime/preempt_arm64.s | 15 +- src/runtime/race_arm64.s | 17 +- src/runtime/signal_arm64.go | 16 +- src/runtime/stack.go | 20 +- src/runtime/testdata/testprog/badtraceback.go | 5 + src/runtime/traceback.go | 30 +- test/nosplit.go | 8 +- 23 files changed, 303 insertions(+), 361 deletions(-) diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md index eae230dc070d86..490e1affb74de9 100644 --- a/src/cmd/compile/abi-internal.md +++ b/src/cmd/compile/abi-internal.md @@ -576,19 +576,19 @@ A function's stack frame, after the frame is created, is laid out as follows: +------------------------------+ + | return PC | + | frame pointer on entry | ← R29 points to | ... locals ... | | ... outgoing arguments ... | - | return PC | ← RSP points to - | frame pointer on entry | + | unused word | ← RSP points to +------------------------------+ ↓ lower addresses The "return PC" is loaded to the link register, R30, as part of the arm64 `CALL` operation. -On entry, a function subtracts from RSP to open its stack frame, and -saves the values of R30 and R29 at the bottom of the frame. -Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP), -after RSP is updated. +On entry, a function pushes R30 (the return address) and R29 +(the caller's frame pointer) onto the bottom of the stack. It then +subtracts a constant from RSP to open its stack frame. A leaf function that does not require any stack space may omit the saved R30 and R29. diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go index 14027467002a3e..6ba56b992eeda6 100644 --- a/src/cmd/compile/internal/arm64/ggen.go +++ b/src/cmd/compile/internal/arm64/ggen.go @@ -11,10 +11,12 @@ import ( ) func padframe(frame int64) int64 { - // arm64 requires that the frame size (not counting saved FP&LR) - // be 16 bytes aligned. If not, pad it. - if frame%16 != 0 { - frame += 16 - (frame % 16) + // arm64 requires frame sizes here that are 8 mod 16. + // With the additional (unused) slot at the bottom of the frame, + // that makes an aligned 16 byte frame. + // Adding a save region for LR+FP does not change the alignment. + if frame != 0 { + frame += (-(frame + 8)) & 15 } return frame } diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 7bc0e536e941e6..9f79a740c6ca81 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -221,7 +221,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { for i := 0; i < len(args); i++ { a := args[i] - // Offset by size of the saved LR slot. + // Offset by size of the unused slot before start of args. addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.Arch.FixedFrameSize) // Look for double-register operations if we can. if i < len(args)-1 { diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go index 0a2010363f8d04..f0776172b9264a 100644 --- a/src/cmd/compile/internal/ssagen/pgen.go +++ b/src/cmd/compile/internal/ssagen/pgen.go @@ -393,10 +393,16 @@ func StackOffset(slot ssa.LocalSlot) int32 { case ir.PAUTO: off = n.FrameOffset() if base.Ctxt.Arch.FixedFrameSize == 0 { + // x86 return address off -= int64(types.PtrSize) } if buildcfg.FramePointerEnabled { + // frame pointer off -= int64(types.PtrSize) + if buildcfg.GOARCH == "arm64" { + // arm64 return address also + off -= int64(types.PtrSize) + } } } return int32(off + slot.Off) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 1e2159579dfbf2..107447f04cc4f6 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -7150,6 +7150,7 @@ func defframe(s *State, e *ssafn, f *ssa.Func) { // Insert code to zero ambiguously live variables so that the // garbage collector only sees initialized values when it // looks for pointers. + // Note: lo/hi are offsets from varp and will be negative. var lo, hi int64 // Opaque state for backend to use. Current backends use it to @@ -7157,7 +7158,7 @@ func defframe(s *State, e *ssafn, f *ssa.Func) { var state uint32 // Iterate through declarations. Autos are sorted in decreasing - // frame offset order. + // frame offset order (least negative to most negative). for _, n := range e.curfn.Dcl { if !n.Needzero() { continue diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 743d09a319087d..281d705a3eb614 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -51,7 +51,6 @@ type ctxt7 struct { blitrl *obj.Prog elitrl *obj.Prog autosize int32 - extrasize int32 instoffset int64 pc int64 pool struct { @@ -1122,8 +1121,7 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ctxt.Diag("arm64 ops not initialized, call arm64.buildop first") } - c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset & 0xffffffff), extrasize: int32(p.To.Offset >> 32)} - p.To.Offset &= 0xffffffff // extrasize is no longer needed + c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} // Process literal pool and allocate initial program counter for each Prog, before // generating branch veneers. @@ -2119,8 +2117,8 @@ func (c *ctxt7) aclass(a *obj.Addr) int { // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } - // The frame top 8 or 16 bytes are for FP - c.instoffset = int64(c.autosize) + a.Offset - int64(c.extrasize) + // The frame top 16 bytes are for LR/FP + c.instoffset = int64(c.autosize) + a.Offset - extrasize return autoclass(c.instoffset) case obj.NAME_PARAM: @@ -2180,8 +2178,8 @@ func (c *ctxt7) aclass(a *obj.Addr) int { // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } - // The frame top 8 or 16 bytes are for FP - c.instoffset = int64(c.autosize) + a.Offset - int64(c.extrasize) + // The frame top 16 bytes are for LR/FP + c.instoffset = int64(c.autosize) + a.Offset - extrasize case obj.NAME_PARAM: if a.Reg == REGSP { diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index 2583e46354292f..a697426145185b 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -36,7 +36,6 @@ import ( "cmd/internal/src" "cmd/internal/sys" "internal/abi" - "internal/buildcfg" "log" "math" ) @@ -472,6 +471,8 @@ func (c *ctxt7) rewriteToUseGot(p *obj.Prog) { obj.Nopout(p) } +const extrasize = 16 // space needed in the frame for LR+FP + func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if cursym.Func().Text == nil || cursym.Func().Text.Link == nil { return @@ -521,33 +522,26 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { c.autosize = int32(textstksiz) if p.Mark&LEAF != 0 && c.autosize == 0 { - // A leaf function with no locals has no frame. + // A leaf function with no locals needs no frame. p.From.Sym.Set(obj.AttrNoFrame, true) } if !p.From.Sym.NoFrame() { // If there is a stack frame at all, it includes - // space to save the LR. + // space for the (now unused) word at [SP:SP+8]. c.autosize += 8 } + // Round up to a multiple of 16. + c.autosize += (-c.autosize) & 15 + if c.autosize != 0 { - extrasize := int32(0) - if c.autosize%16 == 8 { - // Allocate extra 8 bytes on the frame top to save FP - extrasize = 8 - } else if c.autosize&(16-1) == 0 { - // Allocate extra 16 bytes to save FP for the old frame whose size is 8 mod 16 - extrasize = 16 - } else { - c.ctxt.Diag("%v: unaligned frame size %d - must be 16 aligned", p, c.autosize-8) - } + // Allocate an extra 16 bytes at the top of the frame + // to save LR+FP. c.autosize += extrasize c.cursym.Func().Locals += extrasize - // low 32 bits for autosize - // high 32 bits for extrasize - p.To.Offset = int64(c.autosize) | int64(extrasize)<<32 + p.To.Offset = int64(c.autosize) } else { // NOFRAME p.To.Offset = 0 @@ -580,120 +574,72 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { var prologueEnd *obj.Prog aoffset := c.autosize - if aoffset > 0xf0 { - // MOVD.W offset variant range is -0x100 to 0xf8, SP should be 16-byte aligned. - // so the maximum aoffset value is 0xf0. - aoffset = 0xf0 + if aoffset < 16 { + log.Fatalf("aoffset too small %d", aoffset) } - // Frame is non-empty. Make sure to save link register, even if - // it is a leaf function, so that traceback works. q = p - if c.autosize > aoffset { - // Frame size is too large for a MOVD.W instruction. Store the frame pointer - // register and link register before decrementing SP, so if a signal comes - // during the execution of the function prologue, the traceback code will - // not see a half-updated stack frame. - - // SUB $autosize, RSP, R20 - q1 = obj.Appendp(q, c.newprog) - q1.Pos = p.Pos - q1.As = ASUB - q1.From.Type = obj.TYPE_CONST - q1.From.Offset = int64(c.autosize) - q1.Reg = REGSP - q1.To.Type = obj.TYPE_REG - q1.To.Reg = REG_R20 - - prologueEnd = q1 - - // STP (R29, R30), -8(R20) - q1 = obj.Appendp(q1, c.newprog) - q1.Pos = p.Pos - q1.As = ASTP - q1.From.Type = obj.TYPE_REGREG - q1.From.Reg = REGFP - q1.From.Offset = REGLINK - q1.To.Type = obj.TYPE_MEM - q1.To.Reg = REG_R20 - q1.To.Offset = -8 - - // This is not async preemptible, as if we open a frame - // at the current SP, it will clobber the saved LR. - q1 = c.ctxt.StartUnsafePoint(q1, c.newprog) - - // MOVD R20, RSP - q1 = obj.Appendp(q1, c.newprog) - q1.Pos = p.Pos - q1.As = AMOVD - q1.From.Type = obj.TYPE_REG - q1.From.Reg = REG_R20 - q1.To.Type = obj.TYPE_REG - q1.To.Reg = REGSP - q1.Spadj = c.autosize - - q1 = c.ctxt.EndUnsafePoint(q1, c.newprog, -1) - - if buildcfg.GOOS == "ios" { - // iOS does not support SA_ONSTACK. We will run the signal handler - // on the G stack. If we write below SP, it may be clobbered by - // the signal handler. So we save FP and LR after decrementing SP. - // STP (R29, R30), -8(RSP) + + // Store return address and frame pointer at the top of the stack frame. + // STP.W (R29, R30), -16(SP) + q1 = obj.Appendp(q, c.newprog) + q1.Pos = p.Pos + q1.As = ASTP + q1.From.Type = obj.TYPE_REGREG + q1.From.Reg = REGFP + q1.From.Offset = REGLINK + q1.To.Type = obj.TYPE_MEM + q1.To.Reg = REG_RSP + q1.To.Offset = -16 + q1.Scond = C_XPRE + + prologueEnd = q1 + + // Update frame pointer + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = AMOVD + q1.From.Type = obj.TYPE_REG + q1.From.Reg = REGSP + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REGFP + + // Allocate additional frame space. + adj := aoffset - 16 + if adj > 0 { + // SUB $autosize-16, RSP + if adj < 1<<12 { + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = ASUB + q1.From.Type = obj.TYPE_CONST + q1.From.Offset = int64(adj) + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REGSP + } else { + // Constant too big for atomic subtract. + // Materialize in tmp register first. + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = AMOVD + q1.From.Type = obj.TYPE_CONST + q1.From.Offset = int64(adj) + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REGTMP + q1 = obj.Appendp(q1, c.newprog) q1.Pos = p.Pos - q1.As = ASTP - q1.From.Type = obj.TYPE_REGREG - q1.From.Reg = REGFP - q1.From.Offset = REGLINK - q1.To.Type = obj.TYPE_MEM + q1.As = ASUB + q1.From.Type = obj.TYPE_REG + q1.From.Reg = REGTMP + q1.To.Type = obj.TYPE_REG q1.To.Reg = REGSP - q1.To.Offset = -8 } - } else { - // small frame, update SP and save LR in a single MOVD.W instruction. - // So if a signal comes during the execution of the function prologue, - // the traceback code will not see a half-updated stack frame. - // Also, on Linux, in a cgo binary we may get a SIGSETXID signal - // early on before the signal stack is set, as glibc doesn't allow - // us to block SIGSETXID. So it is important that we don't write below - // the SP until the signal stack is set. - // Luckily, all the functions from thread entry to setting the signal - // stack have small frames. - q1 = obj.Appendp(q, c.newprog) - q1.As = AMOVD - q1.Pos = p.Pos - q1.From.Type = obj.TYPE_REG - q1.From.Reg = REGLINK - q1.To.Type = obj.TYPE_MEM - q1.Scond = C_XPRE - q1.To.Offset = int64(-aoffset) - q1.To.Reg = REGSP - q1.Spadj = aoffset - - prologueEnd = q1 - - // Frame pointer. - q1 = obj.Appendp(q1, c.newprog) - q1.Pos = p.Pos - q1.As = AMOVD - q1.From.Type = obj.TYPE_REG - q1.From.Reg = REGFP - q1.To.Type = obj.TYPE_MEM - q1.To.Reg = REGSP - q1.To.Offset = -8 + q1.Spadj = adj } prologueEnd.Pos = prologueEnd.Pos.WithXlogue(src.PosPrologueEnd) - q1 = obj.Appendp(q1, c.newprog) - q1.Pos = p.Pos - q1.As = ASUB - q1.From.Type = obj.TYPE_CONST - q1.From.Offset = 8 - q1.Reg = REGSP - q1.To.Type = obj.TYPE_REG - q1.To.Reg = REGFP - case obj.ARET: nocache(p) if p.From.Type == obj.TYPE_CONST { @@ -707,105 +653,56 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } p.To = obj.Addr{} aoffset := c.autosize - if c.cursym.Func().Text.Mark&LEAF != 0 { - if aoffset != 0 { - // Restore frame pointer. - // ADD $framesize-8, RSP, R29 - p.As = AADD - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(c.autosize) - 8 - p.Reg = REGSP - p.To.Type = obj.TYPE_REG - p.To.Reg = REGFP - - // Pop stack frame. - // ADD $framesize, RSP, RSP - p = obj.Appendp(p, c.newprog) - p.As = AADD - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(c.autosize) - p.To.Type = obj.TYPE_REG - p.To.Reg = REGSP - p.Spadj = -c.autosize + if aoffset > 0 { + if aoffset < 16 { + log.Fatalf("aoffset too small %d", aoffset) + } + adj := aoffset - 16 + if adj > 0 { + if adj < 1<<12 { + // ADD $adj, RSP, RSP + p.As = AADD + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(adj) + p.To.Type = obj.TYPE_REG + p.To.Reg = REGSP + } else { + // Put frame size in a separate register and + // add it in with a single instruction, + // so we never have a partial frame during + // the epilog. See issue 73259. + + // MOVD $adj, REGTMP + p.As = AMOVD + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(adj) + p.To.Type = obj.TYPE_REG + p.To.Reg = REGTMP + // ADD REGTMP, RSP, RSP + p = obj.Appendp(p, c.newprog) + p.As = AADD + p.From.Type = obj.TYPE_REG + p.From.Reg = REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = REGSP + } + p.Spadj = -adj } - } else if aoffset <= 0xF0 { - // small frame, restore LR and update SP in a single MOVD.P instruction. - // There is no correctness issue to use a single LDP for LR and FP, - // but the instructions are not pattern matched with the prologue's - // MOVD.W and MOVD, which may cause performance issue in - // store-forwarding. - - // MOVD -8(RSP), R29 - p.As = AMOVD - p.From.Type = obj.TYPE_MEM - p.From.Reg = REGSP - p.From.Offset = -8 - p.To.Type = obj.TYPE_REG - p.To.Reg = REGFP - p = obj.Appendp(p, c.newprog) - // MOVD.P offset(RSP), R30 - p.As = AMOVD - p.From.Type = obj.TYPE_MEM - p.Scond = C_XPOST - p.From.Offset = int64(aoffset) - p.From.Reg = REGSP - p.To.Type = obj.TYPE_REG - p.To.Reg = REGLINK - p.Spadj = -aoffset - } else { - // LDP -8(RSP), (R29, R30) + // Pop LR+FP. + // LDP.P 16(RSP), (R29, R30) + if p.As != obj.ARET { + p = obj.Appendp(p, c.newprog) + } p.As = ALDP p.From.Type = obj.TYPE_MEM - p.From.Offset = -8 p.From.Reg = REGSP + p.From.Offset = 16 + p.Scond = C_XPOST p.To.Type = obj.TYPE_REGREG p.To.Reg = REGFP p.To.Offset = REGLINK - - if aoffset < 1<<12 { - // ADD $aoffset, RSP, RSP - q = newprog() - q.As = AADD - q.From.Type = obj.TYPE_CONST - q.From.Offset = int64(aoffset) - q.To.Type = obj.TYPE_REG - q.To.Reg = REGSP - q.Spadj = -aoffset - q.Pos = p.Pos - q.Link = p.Link - p.Link = q - p = q - } else { - // Put frame size in a separate register and - // add it in with a single instruction, - // so we never have a partial frame during - // the epilog. See issue 73259. - - // MOVD $aoffset, REGTMP - q = newprog() - q.As = AMOVD - q.From.Type = obj.TYPE_CONST - q.From.Offset = int64(aoffset) - q.To.Type = obj.TYPE_REG - q.To.Reg = REGTMP - q.Pos = p.Pos - q.Link = p.Link - p.Link = q - p = q - // ADD REGTMP, RSP, RSP - q = newprog() - q.As = AADD - q.From.Type = obj.TYPE_REG - q.From.Reg = REGTMP - q.To.Type = obj.TYPE_REG - q.To.Reg = REGSP - q.Spadj = -aoffset - q.Pos = p.Pos - q.Link = p.Link - p.Link = q - p = q - } + p.Spadj = -16 } // If enabled, this code emits 'MOV PC, R27' before every 'MOV LR, PC', @@ -868,10 +765,11 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK } else { - /* MOVD (RSP), Rd */ + /* MOVD framesize-8(RSP), Rd */ p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP + p.From.Offset = int64(c.autosize - 8) } } if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 { @@ -906,6 +804,12 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.From.Reg = int16(REG_LSL + r + (shift&7)<<5) p.From.Offset = 0 } + if p.To.Type == obj.TYPE_MEM && p.To.Reg == REG_RSP && (p.Scond == C_XPRE || p.Scond == C_XPOST) { + p.Spadj += int32(-p.To.Offset) + } + if p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_RSP && (p.Scond == C_XPRE || p.Scond == C_XPOST) { + p.Spadj += int32(-p.From.Offset) + } } } diff --git a/src/cmd/link/internal/amd64/obj.go b/src/cmd/link/internal/amd64/obj.go index 3a6141b9091eb6..761496549f93aa 100644 --- a/src/cmd/link/internal/amd64/obj.go +++ b/src/cmd/link/internal/amd64/obj.go @@ -51,15 +51,16 @@ func Init() (*sys.Arch, ld.Arch) { Plan9Magic: uint32(4*26*26 + 7), Plan9_64Bit: true, - Adddynrel: adddynrel, - Archinit: archinit, - Archreloc: archreloc, - Archrelocvariant: archrelocvariant, - Gentext: gentext, - Machoreloc1: machoreloc1, - MachorelocSize: 8, - PEreloc1: pereloc1, - TLSIEtoLE: tlsIEtoLE, + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Gentext: gentext, + Machoreloc1: machoreloc1, + MachorelocSize: 8, + PEreloc1: pereloc1, + TLSIEtoLE: tlsIEtoLE, + ReturnAddressAtTopOfFrame: true, ELF: ld.ELFArch{ Linuxdynld: "/lib64/ld-linux-x86-64.so.2", diff --git a/src/cmd/link/internal/arm64/obj.go b/src/cmd/link/internal/arm64/obj.go index 3d358155badbca..e1e4ade81835c6 100644 --- a/src/cmd/link/internal/arm64/obj.go +++ b/src/cmd/link/internal/arm64/obj.go @@ -47,17 +47,18 @@ func Init() (*sys.Arch, ld.Arch) { Dwarfreglr: dwarfRegLR, TrampLimit: 0x7c00000, // 26-bit signed offset * 4, leave room for PLT etc. - Adddynrel: adddynrel, - Archinit: archinit, - Archreloc: archreloc, - Archrelocvariant: archrelocvariant, - Extreloc: extreloc, - Gentext: gentext, - GenSymsLate: gensymlate, - Machoreloc1: machoreloc1, - MachorelocSize: 8, - PEreloc1: pereloc1, - Trampoline: trampoline, + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + Gentext: gentext, + GenSymsLate: gensymlate, + Machoreloc1: machoreloc1, + MachorelocSize: 8, + PEreloc1: pereloc1, + Trampoline: trampoline, + ReturnAddressAtTopOfFrame: true, ELF: ld.ELFArch{ Androiddynld: "/system/bin/linker64", diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index 0003938ef2e036..c4d12a5488df3c 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -1544,9 +1544,14 @@ func (d *dwctxt) writeframes(fs loader.Sym) dwarfSecInfo { if pcsp.Value > 0 { // The return address is preserved at (CFA-frame_size) // after a stack frame has been allocated. + off := -spdelta + if thearch.ReturnAddressAtTopOfFrame { + // Except arm64, which has it at the top of frame. + off = -int64(d.arch.PtrSize) + } deltaBuf = append(deltaBuf, dwarf.DW_CFA_offset_extended_sf) deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(thearch.Dwarfreglr)) - deltaBuf = dwarf.AppendSleb128(deltaBuf, -spdelta/dataAlignmentFactor) + deltaBuf = dwarf.AppendSleb128(deltaBuf, off/dataAlignmentFactor) } else { // The return address is restored into the link register // when a stack frame has been de-allocated. diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 2c861129b52f9a..5f5ebfc1d9855c 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -263,6 +263,10 @@ type Arch struct { // optional override for assignAddress AssignAddress func(ldr *loader.Loader, sect *sym.Section, n int, s loader.Sym, va uint64, isTramp bool) (*sym.Section, int, uint64) + // Reports whether the return address is stored at the top (highest address) + // of the stack frame. + ReturnAddressAtTopOfFrame bool + // ELF specific information. ELF ELFArch } diff --git a/src/cmd/link/internal/ld/stackcheck.go b/src/cmd/link/internal/ld/stackcheck.go index 98e7edaeb1477b..14cd3a22384f88 100644 --- a/src/cmd/link/internal/ld/stackcheck.go +++ b/src/cmd/link/internal/ld/stackcheck.go @@ -9,7 +9,6 @@ import ( "cmd/internal/objabi" "cmd/link/internal/loader" "fmt" - "internal/buildcfg" "sort" "strings" ) @@ -62,10 +61,6 @@ func (ctxt *Link) doStackCheck() { // that there are at least StackLimit bytes available below SP // when morestack returns. limit := objabi.StackNosplit(*flagRace) - sc.callSize - if buildcfg.GOARCH == "arm64" { - // Need an extra 8 bytes below SP to save FP. - limit -= 8 - } // Compute stack heights without any back-tracking information. // This will almost certainly succeed and we can simply diff --git a/src/cmd/link/internal/x86/obj.go b/src/cmd/link/internal/x86/obj.go index 4336f01ea3d536..a4885fde8fd06f 100644 --- a/src/cmd/link/internal/x86/obj.go +++ b/src/cmd/link/internal/x86/obj.go @@ -50,13 +50,14 @@ func Init() (*sys.Arch, ld.Arch) { Plan9Magic: uint32(4*11*11 + 7), - Adddynrel: adddynrel, - Archinit: archinit, - Archreloc: archreloc, - Archrelocvariant: archrelocvariant, - Gentext: gentext, - Machoreloc1: machoreloc1, - PEreloc1: pereloc1, + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Gentext: gentext, + Machoreloc1: machoreloc1, + PEreloc1: pereloc1, + ReturnAddressAtTopOfFrame: true, ELF: ld.ELFArch{ Linuxdynld: "/lib/ld-linux.so.2", diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index a0e82ec830f74b..aa49a27a75d1e3 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -50,9 +50,7 @@ TEXT _rt0_arm64_lib(SB),NOSPLIT,$184 CBZ R4, nocgo MOVD $_rt0_arm64_lib_go(SB), R0 MOVD $0, R1 - SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL (R4) - ADD $16, RSP B restore nocgo: @@ -371,7 +369,6 @@ switch: BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R0 MOVD R0, RSP - MOVD (g_sched+gobuf_bp)(g), R29 MOVD $0, (g_sched+gobuf_sp)(g) MOVD $0, (g_sched+gobuf_bp)(g) RET @@ -381,8 +378,8 @@ noswitch: // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOVD 0(R26), R3 // code pointer - MOVD.P 16(RSP), R30 // restore LR - SUB $8, RSP, R29 // restore FP + ADD $16, RSP + LDP.P 16(RSP), (R29,R30) // restore FP, LR B (R3) // func switchToCrashStack0(fn func()) @@ -1051,7 +1048,7 @@ again: // Smashes R0. TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 MOVD $runtime·systemstack_switch(SB), R0 - ADD $8, R0 // get past prologue + ADD $12, R0 // get past prologue MOVD R0, (g_sched+gobuf_pc)(g) MOVD RSP, R0 MOVD R0, (g_sched+gobuf_sp)(g) @@ -1069,9 +1066,7 @@ TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16 MOVD fn+0(FP), R1 MOVD arg+8(FP), R0 - SUB $16, RSP // skip over saved frame pointer below RSP BL (R1) - ADD $16, RSP // skip over saved frame pointer below RSP RET // func asmcgocall(fn, arg unsafe.Pointer) int32 @@ -1236,9 +1231,9 @@ havem: BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 MOVD (g_sched+gobuf_pc)(g), R5 - MOVD R5, -48(R4) + MOVD R5, -8(R4) MOVD (g_sched+gobuf_bp)(g), R5 - MOVD R5, -56(R4) + MOVD R5, -16(R4) // Gather our arguments into registers. MOVD fn+0(FP), R1 MOVD frame+8(FP), R2 @@ -1252,7 +1247,7 @@ havem: CALL (R0) // indirect call to bypass nosplit check. We're on a different stack now. // Restore g->sched (== m->curg->sched) from saved values. - MOVD 0(RSP), R5 + MOVD 40(RSP), R5 MOVD R5, (g_sched+gobuf_pc)(g) MOVD RSP, R4 ADD $48, R4, R4 @@ -1490,10 +1485,57 @@ GLOBL debugCallFrameTooLarge<>(SB), RODATA, $20 // Size duplicated below // // This is ABIInternal because Go code injects its PC directly into new // goroutine stacks. +// +// State before debugger starts doing anything: +// | current | +// | stack | +// +-------------+ <- SP = origSP +// stopped executing at PC = origPC +// some values are in LR (origLR) and FP (origFP) +// +// After debugger has done steps 1-6 above: +// | current | +// | stack | +// +-------------+ <- origSP +// | ----- | (used to be a slot to store frame pointer on entry to origPC's frame.) +// +-------------+ +// | origLR | +// +-------------+ <- SP +// | ----- | +// +-------------+ +// | argsize | +// +-------------+ +// LR = origPC, PC = debugCallV2 +// +// debugCallV2 then modifies the stack up to the "good" label: +// | current | +// | stack | +// +-------------+ <- origSP +// | ----- | (used to be a slot to store frame pointer on entry to origPC's frame.) +// +-------------+ +// | origLR | +// +-------------+ <- where debugger left SP +// | origPC | +// +-------------+ +// | origFP | +// +-------------+ <- FP = SP + 256 +// | saved | +// | registers | +// | (224 bytes) | +// +-------------+ <- SP + 32 +// | space for | +// | outargs | +// +-------------+ <- SP + 8 +// | argsize | +// +-------------+ <- SP + TEXT runtime·debugCallV2(SB),NOSPLIT|NOFRAME,$0-0 - STP (R29, R30), -280(RSP) - SUB $272, RSP, RSP - SUB $8, RSP, R29 + MOVD R30, -8(RSP) // save origPC + MOVD -16(RSP), R30 // save argsize in R30 temporarily + MOVD.W R29, -16(RSP) // push origFP + MOVD RSP, R29 // frame pointer chain now set up + SUB $256, RSP, RSP // allocate frame + MOVD R30, (RSP) // Save argsize on the stack // Save all registers that may contain pointers so they can be // conservatively scanned. // @@ -1515,7 +1557,8 @@ TEXT runtime·debugCallV2(SB),NOSPLIT|NOFRAME,$0-0 STP (R0, R1), (4*8)(RSP) // Perform a safe-point check. - MOVD R30, 8(RSP) // Caller's PC + MOVD 264(RSP), R0 // origPC + MOVD R0, 8(RSP) CALL runtime·debugCallCheck(SB) MOVD 16(RSP), R0 CBZ R0, good @@ -1559,7 +1602,7 @@ good: CALL runtime·debugCallWrap(SB); \ JMP restore - MOVD 256(RSP), R0 // the argument frame size + MOVD (RSP), R0 // the argument frame size DEBUG_CALL_DISPATCH(debugCall32<>, 32) DEBUG_CALL_DISPATCH(debugCall64<>, 64) DEBUG_CALL_DISPATCH(debugCall128<>, 128) @@ -1607,9 +1650,9 @@ restore: LDP (6*8)(RSP), (R2, R3) LDP (4*8)(RSP), (R0, R1) - LDP -8(RSP), (R29, R27) - ADD $288, RSP, RSP // Add 16 more bytes, see saveSigContext - MOVD -16(RSP), R30 // restore old lr + MOVD 272(RSP), R30 // restore old lr (saved by (*sigctxt).pushCall) + LDP 256(RSP), (R29, R27) // restore old fp, set up resumption address + ADD $288, RSP, RSP // Pop frame, LR+FP, and block pushed by (*sigctxt).pushCall JMP (R27) // runtime.debugCallCheck assumes that functions defined with the diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 769c4ffc5c9eeb..9064cae039f00d 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -488,26 +488,18 @@ func genARM64(g *gen) { l.stack += 8 // SP needs 16-byte alignment } - // allocate frame, save PC of interrupted instruction (in LR) - p("MOVD R30, %d(RSP)", -l.stack) + // allocate frame, save PC (in R30), FP (in R29) of interrupted instruction + p("STP.W (R29, R30), -16(RSP)") + p("MOVD RSP, R29") // set up new frame pointer p("SUB $%d, RSP", l.stack) - p("MOVD R29, -8(RSP)") // save frame pointer (only used on Linux) - p("SUB $8, RSP, R29") // set up new frame pointer - // On iOS, save the LR again after decrementing SP. We run the - // signal handler on the G stack (as it doesn't support sigaltstack), - // so any writes below SP may be clobbered. - p("#ifdef GOOS_ios") - p("MOVD R30, (RSP)") - p("#endif") l.save(g) p("CALL ·asyncPreempt2(SB)") l.restore(g) - p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it - p("MOVD -8(RSP), R29") // restore frame pointer - p("MOVD (RSP), R27") // load PC to REGTMP - p("ADD $%d, RSP", l.stack+16) // pop frame (including the space pushed by sigctxt.pushCall) + p("MOVD %d(RSP), R30", l.stack+16) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it + p("LDP %d(RSP), (R29, R27)", l.stack) // Restore frame pointer. Load PC into regtmp. + p("ADD $%d, RSP", l.stack+32) // pop frame (including the space pushed by sigctxt.pushCall) p("RET (R27)") } diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 8c91c9435abd18..04b3afe1682765 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -1379,10 +1379,10 @@ func recovery(gp *g) { // the caller gp.sched.bp = fp - 2*goarch.PtrSize case goarch.IsArm64 != 0: - // on arm64, the architectural bp points one word higher - // than the sp. fp is totally useless to us here, because it - // only gets us to the caller's fp. - gp.sched.bp = sp - goarch.PtrSize + // on arm64, the first two words of the frame are caller's PC + // (the saved LR register) and the caller's BP. + // Coincidentally, the same as amd64. + gp.sched.bp = fp - 2*goarch.PtrSize } gogo(&gp.sched) } diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s index 31ec9d940f76d4..f4248cac257550 100644 --- a/src/runtime/preempt_arm64.s +++ b/src/runtime/preempt_arm64.s @@ -4,13 +4,9 @@ #include "textflag.h" TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 - MOVD R30, -496(RSP) + STP.W (R29, R30), -16(RSP) + MOVD RSP, R29 SUB $496, RSP - MOVD R29, -8(RSP) - SUB $8, RSP, R29 - #ifdef GOOS_ios - MOVD R30, (RSP) - #endif STP (R0, R1), 8(RSP) STP (R2, R3), 24(RSP) STP (R4, R5), 40(RSP) @@ -78,8 +74,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 LDP 40(RSP), (R4, R5) LDP 24(RSP), (R2, R3) LDP 8(RSP), (R0, R1) - MOVD 496(RSP), R30 - MOVD -8(RSP), R29 - MOVD (RSP), R27 - ADD $512, RSP + MOVD 512(RSP), R30 + LDP 496(RSP), (R29, R27) + ADD $528, RSP RET (R27) diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s index 5df650105bb4d5..feaa328d4c0d8a 100644 --- a/src/runtime/race_arm64.s +++ b/src/runtime/race_arm64.s @@ -397,7 +397,7 @@ TEXT racecallatomic<>(SB), NOSPLIT, $0 // R3 = addr of incoming arg list // Trigger SIGSEGV early. - MOVD 40(RSP), R3 // 1st arg is addr. after two times BL, get it at 40(RSP) + MOVD 72(RSP), R3 // 1st arg is addr. after two small frames (32 bytes each), get it at 72(RSP) MOVB (R3), R13 // segv here if addr is bad // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend). MOVD runtime·racearenastart(SB), R10 @@ -417,10 +417,11 @@ racecallatomic_ok: // Addr is within the good range, call the atomic function. load_g MOVD g_racectx(g), R0 // goroutine context - MOVD 16(RSP), R1 // caller pc + MOVD 56(RSP), R1 // caller pc MOVD R9, R2 // pc - ADD $40, RSP, R3 - JMP racecall<>(SB) // does not return + ADD $72, RSP, R3 + BL racecall<>(SB) + RET racecallatomic_ignore: // Addr is outside the good range. // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op. @@ -435,9 +436,9 @@ racecallatomic_ignore: // racecall will call LLVM race code which might clobber R28 (g) load_g MOVD g_racectx(g), R0 // goroutine context - MOVD 16(RSP), R1 // caller pc + MOVD 56(RSP), R1 // caller pc MOVD R9, R2 // pc - ADD $40, RSP, R3 // arguments + ADD $72, RSP, R3 // arguments BL racecall<>(SB) // Call __tsan_go_ignore_sync_end. MOVD $__tsan_go_ignore_sync_end(SB), R9 @@ -476,10 +477,6 @@ TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0 MOVD (g_sched+gobuf_sp)(R11), R12 MOVD R12, RSP call: - // Decrement SP past where the frame pointer is saved in the Go arm64 - // ABI (one word below the stack pointer) so the race detector library - // code doesn't clobber it - SUB $16, RSP BL R9 MOVD R19, RSP JMP (R20) diff --git a/src/runtime/signal_arm64.go b/src/runtime/signal_arm64.go index af7d29f9de1d31..61dad507219caf 100644 --- a/src/runtime/signal_arm64.go +++ b/src/runtime/signal_arm64.go @@ -8,7 +8,6 @@ package runtime import ( "internal/abi" - "internal/goarch" "internal/runtime/sys" "unsafe" ) @@ -63,18 +62,11 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) { // We arrange lr, and pc to pretend the panicking // function calls sigpanic directly. // Always save LR to stack so that panics in leaf - // functions are correctly handled. This smashes - // the stack frame but we're not going back there - // anyway. + // functions are correctly handled. + // This extra space is known to gentraceback. sp := c.sp() - sys.StackAlign // needs only sizeof uint64, but must align the stack c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr() - // Make sure a valid frame pointer is saved on the stack so that the - // frame pointer checks in adjustframe are happy, if they're enabled. - // Frame pointer unwinding won't visit the sigpanic frame, since - // sigpanic will save the same frame pointer before calling into a panic - // function. - *(*uint64)(unsafe.Pointer(uintptr(sp - goarch.PtrSize))) = c.r29() pc := gp.sigpc @@ -96,10 +88,6 @@ func (c *sigctxt) pushCall(targetPC, resumePC uintptr) { sp := c.sp() - 16 // SP needs 16-byte alignment c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr() - // Make sure a valid frame pointer is saved on the stack so that the - // frame pointer checks in adjustframe are happy, if they're enabled. - // This is not actually used for unwinding. - *(*uint64)(unsafe.Pointer(uintptr(sp - goarch.PtrSize))) = c.r29() // Set up PC and LR to pretend the function being signaled // calls targetPC at resumePC. c.set_lr(uint64(resumePC)) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 55e97e77afa957..5eaceec6da14d5 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -579,23 +579,27 @@ var ptrnames = []string{ // | args to callee | // +------------------+ <- frame->sp // -// (arm) +// (arm64) // +------------------+ // | args from caller | // +------------------+ <- frame->argp -// | caller's retaddr | +// | | +// +------------------+ <- frame->fp (aka caller's sp) +// | return address | // +------------------+ -// | caller's FP (*) | (*) on ARM64, if framepointer_enabled && varp > sp +// | caller's FP | (frame pointer always enabled: TODO) // +------------------+ <- frame->varp // | locals | // +------------------+ // | args to callee | // +------------------+ -// | return address | +// | | // +------------------+ <- frame->sp // // varp > sp means that the function has a frame; // varp == sp means frameless function. +// +// Alignment padding, if needed, will be between "locals" and "args to callee". type adjustinfo struct { old stack @@ -709,7 +713,8 @@ func adjustframe(frame *stkframe, adjinfo *adjustinfo) { } // Adjust saved frame pointer if there is one. - if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize { + if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize || + goarch.ArchFamily == goarch.ARM64 && frame.argp-frame.varp == 3*goarch.PtrSize { if stackDebug >= 3 { print(" saved bp\n") } @@ -723,10 +728,7 @@ func adjustframe(frame *stkframe, adjinfo *adjustinfo) { throw("bad frame pointer") } } - // On AMD64, this is the caller's frame pointer saved in the current - // frame. - // On ARM64, this is the frame pointer of the caller's caller saved - // by the caller in its frame (one word below its SP). + // This is the caller's frame pointer saved in the current frame. adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) } diff --git a/src/runtime/testdata/testprog/badtraceback.go b/src/runtime/testdata/testprog/badtraceback.go index 455118a54371d7..36575f765db9a5 100644 --- a/src/runtime/testdata/testprog/badtraceback.go +++ b/src/runtime/testdata/testprog/badtraceback.go @@ -41,6 +41,11 @@ func badLR2(arg int) { if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" { lrOff = 32 // FIXED_FRAME or sys.MinFrameSize } + if runtime.GOARCH == "arm64" { + // skip 8 bytes at bottom of parent frame, then point + // to the 8 bytes of the saved PC at the top of the frame. + lrOff = 16 + } lrPtr := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&arg)) - lrOff)) *lrPtr = 0xbad diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 8882c306edb736..1c3e679a02bdaf 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -175,6 +175,11 @@ func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) { // Start in the caller's frame. if frame.pc == 0 { if usesLR { + // TODO: this isn't right on arm64. But also, this should + // ~never happen. Calling a nil function will panic + // when loading the PC out of the closure, not when + // branching to that PC. (Closures should always have + // valid PCs in their first word.) frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp)) frame.lr = 0 } else { @@ -369,7 +374,11 @@ func (u *unwinder) resolveInternal(innermost, isSyscall bool) { var lrPtr uintptr if usesLR { if innermost && frame.sp < frame.fp || frame.lr == 0 { - lrPtr = frame.sp + if GOARCH == "arm64" { + lrPtr = frame.fp - goarch.PtrSize + } else { + lrPtr = frame.sp + } frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr)) } } else { @@ -385,24 +394,17 @@ func (u *unwinder) resolveInternal(innermost, isSyscall bool) { // On x86, call instruction pushes return PC before entering new function. frame.varp -= goarch.PtrSize } + if GOARCH == "arm64" && frame.varp > frame.sp { + frame.varp -= goarch.PtrSize // LR have been saved, skip over it. + } // For architectures with frame pointers, if there's // a frame, then there's a saved frame pointer here. // // NOTE: This code is not as general as it looks. - // On x86, the ABI is to save the frame pointer word at the + // On x86 and arm64, the ABI is to save the frame pointer word at the // top of the stack frame, so we have to back down over it. - // On arm64, the frame pointer should be at the bottom of - // the stack (with R29 (aka FP) = RSP), in which case we would - // not want to do the subtraction here. But we started out without - // any frame pointer, and when we wanted to add it, we didn't - // want to break all the assembly doing direct writes to 8(RSP) - // to set the first parameter to a called function. - // So we decided to write the FP link *below* the stack pointer - // (with R29 = RSP - 8 in Go functions). - // This is technically ABI-compatible but not standard. - // And it happens to end up mimicking the x86 layout. - // Other architectures may make different decisions. + // No other architectures are framepointer-enabled at the moment. if frame.varp > frame.sp && framepointer_enabled { frame.varp -= goarch.PtrSize } @@ -562,7 +564,7 @@ func (u *unwinder) finishInternal() { gp := u.g.ptr() if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && u.frame.sp != gp.stktopsp { print("runtime: g", gp.goid, ": frame.sp=", hex(u.frame.sp), " top=", hex(gp.stktopsp), "\n") - print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "\n") + print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "]\n") throw("traceback did not unwind completely") } } diff --git a/test/nosplit.go b/test/nosplit.go index 4b4c93b1d067c5..1f943fa18c3f58 100644 --- a/test/nosplit.go +++ b/test/nosplit.go @@ -142,7 +142,7 @@ start 136 # (CallSize is 32 on ppc64, 8 on amd64 for frame pointer.) start 96 nosplit start 100 nosplit; REJECT ppc64 ppc64le -start 104 nosplit; REJECT ppc64 ppc64le arm64 +start 104 nosplit; REJECT ppc64 ppc64le start 108 nosplit; REJECT ppc64 ppc64le start 112 nosplit; REJECT ppc64 ppc64le arm64 start 116 nosplit; REJECT ppc64 ppc64le @@ -160,7 +160,7 @@ start 136 nosplit; REJECT # Because AMD64 uses frame pointer, it has 8 fewer bytes. start 96 nosplit call f; f 0 nosplit start 100 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le -start 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le arm64 +start 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le start 108 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le start 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 arm64 start 116 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 @@ -176,7 +176,7 @@ start 136 nosplit call f; f 0 nosplit; REJECT # Architectures differ in the same way as before. start 96 nosplit call f; f 0 call f start 100 nosplit call f; f 0 call f; REJECT ppc64 ppc64le -start 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 arm64 +start 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 start 108 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 start 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 arm64 start 116 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 @@ -189,7 +189,7 @@ start 136 nosplit call f; f 0 call f; REJECT # Indirect calls are assumed to be splitting functions. start 96 nosplit callind start 100 nosplit callind; REJECT ppc64 ppc64le -start 104 nosplit callind; REJECT ppc64 ppc64le amd64 arm64 +start 104 nosplit callind; REJECT ppc64 ppc64le amd64 start 108 nosplit callind; REJECT ppc64 ppc64le amd64 start 112 nosplit callind; REJECT ppc64 ppc64le amd64 arm64 start 116 nosplit callind; REJECT ppc64 ppc64le amd64 From 4fca79833fcdd0dc19bb0feba8715a0def3d07be Mon Sep 17 00:00:00 2001 From: tony Date: Mon, 29 Sep 2025 14:25:57 +0000 Subject: [PATCH 067/152] runtime: delete redundant code in the page allocator The page allocator's scavenge index has sysGrow called on it twice, once in pageAlloc.grow, and once in pageAlloc.sysGrow on 64-bit platforms. Calling it twice is OK since sysGrow is idempotent, but it's also wasteful. This change removes the call in pageAlloc.sysGrow. Change-Id: I5b955b6e2beed5c2b8305ab82b76718ea305792c Reviewed-on: https://go-review.googlesource.com/c/go/+/707735 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek Reviewed-by: Carlos Amedee --- src/runtime/mpagealloc_64bit.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go index eb425f070445dd..2e3643004bc858 100644 --- a/src/runtime/mpagealloc_64bit.go +++ b/src/runtime/mpagealloc_64bit.go @@ -180,9 +180,6 @@ func (p *pageAlloc) sysGrow(base, limit uintptr) { sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size()) p.summaryMappedReady += need.size() } - - // Update the scavenge index. - p.summaryMappedReady += p.scav.index.sysGrow(base, limit, p.sysStat) } // sysGrow increases the index's backing store in response to a heap growth. From 1d62e92567a858b18f4e7e0c24e071c039dd3edf Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Sat, 4 Oct 2025 11:32:33 -0400 Subject: [PATCH 068/152] test/codegen: make sure assignment results are used. Some tests make assignments to an argument without reading it. With CL 708865, they are treated as dead stores and are removed. Make sure the results are used. Fixes #75745. Fixes #75746. Change-Id: I05580beb1006505ec1550e5fa245b54dcefd10b9 Reviewed-on: https://go-review.googlesource.com/c/go/+/708916 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall --- test/codegen/constants.go | 6 ++++-- test/codegen/mathbits.go | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/test/codegen/constants.go b/test/codegen/constants.go index 3ce17d0ad3a65b..0935a9e53a9d87 100644 --- a/test/codegen/constants.go +++ b/test/codegen/constants.go @@ -7,7 +7,7 @@ package codegen // A uint16 or sint16 constant shifted left. -func shifted16BitConstants(out [64]uint64) { +func shifted16BitConstants() (out [64]uint64) { // ppc64x: "MOVD\t[$]8193,", "SLD\t[$]27," out[0] = 0x0000010008000000 // ppc64x: "MOVD\t[$]-32767", "SLD\t[$]26," @@ -16,10 +16,11 @@ func shifted16BitConstants(out [64]uint64) { out[2] = 0xFFFF000000000000 // ppc64x: "MOVD\t[$]65535", "SLD\t[$]44," out[3] = 0x0FFFF00000000000 + return } // A contiguous set of 1 bits, potentially wrapping. -func contiguousMaskConstants(out [64]uint64) { +func contiguousMaskConstants() (out [64]uint64) { // ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]44, [$]63," out[0] = 0xFFFFF00000000001 // ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]43, [$]63," @@ -30,4 +31,5 @@ func contiguousMaskConstants(out [64]uint64) { // ppc64x/power9: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]33, [$]63," // ppc64x/power10: "MOVD\t[$]-8589934591," out[3] = 0xFFFFFFFE00000001 + return } diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go index ba5387d2c32121..f8fa374c0af483 100644 --- a/test/codegen/mathbits.go +++ b/test/codegen/mathbits.go @@ -731,7 +731,7 @@ func Add64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 { // // This is what happened on PPC64 when compiling // crypto/internal/edwards25519/field.feMulGeneric. -func Add64MultipleChains(a, b, c, d [2]uint64) { +func Add64MultipleChains(a, b, c, d [2]uint64) [2]uint64 { var cx, d1, d2 uint64 a1, a2 := a[0], a[1] b1, b2 := b[0], b[1] @@ -748,6 +748,7 @@ func Add64MultipleChains(a, b, c, d [2]uint64) { d2, _ = bits.Add64(c2, d2, cx) d[0] = d1 d[1] = d2 + return d } // --------------- // From 7bfeb43509acbe75ce8e1a14c60bffe597e46813 Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 19:16:36 -0400 Subject: [PATCH 069/152] cmd/go: refactor usage of `initialized` This commit refactors usage of the global variable `initialized` to the global LoaderState field of the same name. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/modload rf 'ex { initialized -> LoaderState.initialized }' rf 'rm initialized' Change-Id: I97e35bab00f4c22661670b01b69425fc25efe6df Reviewed-on: https://go-review.googlesource.com/c/go/+/698056 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/init.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 5192fe0fc8cc58..d7d532ec942665 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -60,7 +60,6 @@ var ( // Variables set in Init. var ( - initialized bool // These are primarily used to initialize the MainModules, and should be // eventually superseded by them but are still used in cases where the module @@ -406,7 +405,7 @@ func Reset() { func setState(s State) State { oldState := State{ - initialized: initialized, + initialized: LoaderState.initialized, forceUseModules: ForceUseModules, rootMode: RootMode, modRoots: modRoots, @@ -414,7 +413,7 @@ func setState(s State) State { mainModules: MainModules, requirements: requirements, } - initialized = s.initialized + LoaderState.initialized = s.initialized ForceUseModules = s.forceUseModules RootMode = s.rootMode modRoots = s.modRoots @@ -450,10 +449,10 @@ var LoaderState = NewState() // configures the cfg, codehost, load, modfetch, and search packages for use // with modules. func Init() { - if initialized { + if LoaderState.initialized { return } - initialized = true + LoaderState.initialized = true fips140.Init() @@ -573,7 +572,7 @@ func WillBeEnabled() bool { // Already enabled. return true } - if initialized { + if LoaderState.initialized { // Initialized, not enabled. return false } @@ -640,7 +639,7 @@ func VendorDir() string { } func inWorkspaceMode() bool { - if !initialized { + if !LoaderState.initialized { panic("inWorkspaceMode called before modload.Init called") } if !Enabled() { @@ -1253,7 +1252,7 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { // This function affects the default cfg.BuildMod when outside of a module, // so it can only be called prior to Init. func AllowMissingModuleImports() { - if initialized { + if LoaderState.initialized { panic("AllowMissingModuleImports after Init") } allowMissingModuleImports = true From 7fbf54bfebf9243550177bc6871d80e58bedf1a6 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Sun, 9 Mar 2025 17:19:48 +0000 Subject: [PATCH 070/152] internal/buildcfg: enable greenteagc experiment by default Slightly bump the value in Test/wasmmemsize.go. We use 1 additional page compared to before, and we were already sitting *right* on the edge. For #73581. Change-Id: I485df16c3cf59803a8a1fc852b3e90666981ab09 Reviewed-on: https://go-review.googlesource.com/c/go/+/656195 LUCI-TryBot-Result: Go LUCI Auto-Submit: Michael Knyszek Reviewed-by: Cherry Mui --- src/internal/buildcfg/exp.go | 6 ++++++ test/wasmmemsize.dir/main.go | 12 +++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index d06913d9a7f25a..fb6b5859e31a1e 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -78,12 +78,18 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { // things like .debug_addr (needed for DWARF 5). dwarf5Supported := (goos != "darwin" && goos != "ios" && goos != "aix") + // The compiler crashes while compiling some of the Green Tea code. + // The Green Tea code is pretty normal, so this is likely a compiler + // bug in the loong64 port. + greenTeaGCSupported := goarch != "loong64" + baseline := goexperiment.Flags{ RegabiWrappers: regabiSupported, RegabiArgs: regabiSupported, Dwarf5: dwarf5Supported, RandomizedHeapBase64: true, SizeSpecializedMalloc: true, + GreenTeaGC: greenTeaGCSupported, } // Start with the statically enabled set of experiments. diff --git a/test/wasmmemsize.dir/main.go b/test/wasmmemsize.dir/main.go index e3aa5b5e92154c..c51e6b3b0476c9 100644 --- a/test/wasmmemsize.dir/main.go +++ b/test/wasmmemsize.dir/main.go @@ -9,17 +9,19 @@ import ( "io" ) -// Expect less than 3 MB of memory usage for a small wasm program. -// This reflects the current allocator. If the allocator changes, -// update this value. -const want = 3 << 20 +// Wasm page size. +const pageSize = 64 * 1024 + +// Expect less than 3 MB + 1 page of memory usage for a small wasm +// program. This reflects the current allocator. If the allocator +// changes, update this value. +const want = 3<<20 + pageSize var w = io.Discard func main() { fmt.Fprintln(w, "hello world") - const pageSize = 64 * 1024 sz := uintptr(currentMemory()) * pageSize if sz > want { fmt.Printf("FAIL: unexpected memory size %d, want <= %d\n", sz, want) From c1e6e49d5d3f3fb927f1bfd1b453d8e7c906c6ac Mon Sep 17 00:00:00 2001 From: thepudds Date: Fri, 3 Oct 2025 10:59:54 -0400 Subject: [PATCH 071/152] fmt: reduce Errorf("x") allocations to match errors.New("x") MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For unformatted strings, it comes up periodically that there are more allocations using fmt.Errorf("x") compared to errors.New("x"). People cite it as a reason to switch code using fmt.Errorf to use errors.New instead. Three examples from the last few weeks essentially made this suggestion: #75235, CL 708496, and CL 708618. Prior to that, it is periodically suggested as a vet check (e.g., proposals #17173 and #52696) or in various CLs to change the standard library (e.g., CL 403938 and CL 588776). On the other hand, I believe the position of the core Go team is that it is usually not worthwhile to make such a change. For example, in #52696, Russ wrote: Thanks for raising the issue, but please don't do this. Using fmt.Errorf("foo") is completely fine, especially in a program where all the errors are constructed with fmt.Errorf. Having to mentally switch between two functions based on the argument is unnecessary noise. This CL attempts to mostly take performance out of the discussion. We drop from 2 allocations to 0 allocations for a non-escaping error, and drop from 2 allocations to 1 allocation for an escaping error: _ = fmt.Errorf("foo") // non-escaping error sink = fmt.Errorf("foo") // escaping error This now matches the allocations for errors.New("foo") in both cases. The CPU cost difference is greatly reduced, though there is still a small ~4ns difference measurable in these microbenchmarks. Previously, it was ~64ns vs. ~21ns for fmt.Errorf("x") vs. errors.New("x") for escaping errors, whereas with this CL it is now ~25ns vs. ~21ns. When fmt.Errorf("foo") executes with this CL, there are essentially three optimizations now, in rough order of usefulness: (1) we always avoid an allocation inside the doPrintf machinery; (2) if the error does not otherwise escape, we can stack allocate the errors.errorString struct by virtue of mid-stack inlining of fmt.Errorf and the resulting inlining of errors.New, which also can be more effective via PGO; (3) stringslite.IndexByte is a tiny bit faster than going through the for loops looking for '%' inside doPrintf. See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ for background on avoiding heap allocations via mid-stack inlining. The common case here is likely that the string format argument is a constant when there are no other arguments. However, one concern could be that by not allocating a copy, we could now keep a string argument alive longer with this change, which could be a pessimization if for example that string argument is a slice of a much bigger string: s := bigString[m:n] longLivedErr := fmt.Errorf(s) Aside from that being perhaps unusual code, vet will complain about s there as a "non-constant format string in call to fmt.Errorf", so that particular example seems unlikely to occur frequently in practice. The main benchmark results are below. "old" is prior to this CL, "new" is with this CL. The non-escaping case is "local", the escaping case is "sink". In practice, I suspect errors escape the majority of the time. Benchmark code at https://go.dev/play/p/rlRSO1ehx8O goos: linux goarch: amd64 pkg: fmt cpu: AMD EPYC 7B13 │ old-7bd6fac4.txt │ new-dcd2a72f0.txt │ │ sec/op │ sec/op vs base │ Errorf/no-args/local-16 63.76n ± 1% 4.874n ± 0% -92.36% (n=120) Errorf/no-args/sink-16 64.25n ± 1% 25.81n ± 0% -59.83% (n=120) Errorf/int-arg/local-16 90.86n ± 1% 90.97n ± 1% ~ (p=0.713 n=120) Errorf/int-arg/sink-16 91.81n ± 1% 91.10n ± 1% -0.76% (p=0.036 n=120) geomean 76.46n 31.95n -58.20% │ old-7bd6fac4.txt │ new-dcd2a72f0.txt │ │ B/op │ B/op vs base │ Errorf/no-args/local-16 19.00 ± 0% 0.00 ± 0% -100.00% (n=120) Errorf/no-args/sink-16 19.00 ± 0% 16.00 ± 0% -15.79% (n=120) Errorf/int-arg/local-16 24.00 ± 0% 24.00 ± 0% ~ (p=1.000 n=120) ¹ Errorf/int-arg/sink-16 24.00 ± 0% 24.00 ± 0% ~ (p=1.000 n=120) ¹ geomean 21.35 ? ² ³ ¹ all samples are equal │ old-7bd6fac4.txt │ new-dcd2a72f0.txt │ │ allocs/op │ allocs/op vs base │ Errorf/no-args/local-16 2.000 ± 0% 0.000 ± 0% -100.00% (n=120) Errorf/no-args/sink-16 2.000 ± 0% 1.000 ± 0% -50.00% (n=120) Errorf/int-arg/local-16 2.000 ± 0% 2.000 ± 0% ~ (p=1.000 n=120) ¹ Errorf/int-arg/sink-16 2.000 ± 0% 2.000 ± 0% ~ (p=1.000 n=120) ¹ geomean 2.000 ? ² ³ ¹ all samples are equal Change-Id: Ib27c52933bec5c2236624c577fbb1741052e792f Reviewed-on: https://go-review.googlesource.com/c/go/+/708836 Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI Commit-Queue: t hepudds Reviewed-by: Alan Donovan Reviewed-by: Emmanuel Odeke --- src/fmt/errors.go | 18 +++++++++++++++++- src/fmt/errors_test.go | 9 +++++++++ src/fmt/fmt_test.go | 5 +++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/src/fmt/errors.go b/src/fmt/errors.go index 1ac83404bc7c55..a0ce7ada346dbc 100644 --- a/src/fmt/errors.go +++ b/src/fmt/errors.go @@ -6,6 +6,7 @@ package fmt import ( "errors" + "internal/stringslite" "slices" ) @@ -19,7 +20,22 @@ import ( // order they appear in the arguments. // It is invalid to supply the %w verb with an operand that does not implement // the error interface. The %w verb is otherwise a synonym for %v. -func Errorf(format string, a ...any) error { +func Errorf(format string, a ...any) (err error) { + // This function has been split in a somewhat unnatural way + // so that both it and the errors.New call can be inlined. + if err = errorf(format, a...); err != nil { + return err + } + // No formatting was needed. We can avoid some allocations and other work. + // See https://go.dev/cl/708836 for details. + return errors.New(format) +} + +// errorf formats and returns an error value, or nil if no formatting is required. +func errorf(format string, a ...any) error { + if len(a) == 0 && stringslite.IndexByte(format, '%') == -1 { + return nil + } p := newPrinter() p.wrapErrs = true p.doPrintf(format, a) diff --git a/src/fmt/errors_test.go b/src/fmt/errors_test.go index 4eb55faffe7a18..52bf42d0a623a6 100644 --- a/src/fmt/errors_test.go +++ b/src/fmt/errors_test.go @@ -54,6 +54,15 @@ func TestErrorf(t *testing.T) { }, { err: noVetErrorf("%w is not an error", "not-an-error"), wantText: "%!w(string=not-an-error) is not an error", + }, { + err: fmt.Errorf("no verbs"), + wantText: "no verbs", + }, { + err: noVetErrorf("no verbs with extra arg", "extra"), + wantText: "no verbs with extra arg%!(EXTRA string=extra)", + }, { + err: noVetErrorf("too many verbs: %w %v"), + wantText: "too many verbs: %!w(MISSING) %!v(MISSING)", }, { err: noVetErrorf("wrapped two errors: %w %w", errString("1"), errString("2")), wantText: "wrapped two errors: 1 2", diff --git a/src/fmt/fmt_test.go b/src/fmt/fmt_test.go index 86e458ae6481fb..c07da5683c2463 100644 --- a/src/fmt/fmt_test.go +++ b/src/fmt/fmt_test.go @@ -1480,6 +1480,7 @@ func BenchmarkFprintIntNoAlloc(b *testing.B) { var mallocBuf bytes.Buffer var mallocPointer *int // A pointer so we know the interface value won't allocate. +var sink any var mallocTest = []struct { count int @@ -1510,6 +1511,10 @@ var mallocTest = []struct { mallocBuf.Reset() Fprintf(&mallocBuf, "%x %x %x", mallocPointer, mallocPointer, mallocPointer) }}, + {0, `Errorf("hello")`, func() { _ = Errorf("hello") }}, + {2, `Errorf("hello: %x")`, func() { _ = Errorf("hello: %x", mallocPointer) }}, + {1, `sink = Errorf("hello")`, func() { sink = Errorf("hello") }}, + {2, `sink = Errorf("hello: %x")`, func() { sink = Errorf("hello: %x", mallocPointer) }}, } var _ bytes.Buffer From 4c0fd3a2b45675a581ef6fa273a221d7131b5647 Mon Sep 17 00:00:00 2001 From: Mateusz Poliwczak Date: Mon, 6 Oct 2025 20:54:27 +0200 Subject: [PATCH 072/152] internal/goexperiment: remove the synctest GOEXPERIMENT synctest package is enabled by default and the synctest goexperiment does nothing after CL 709355. Change-Id: Ia96b070d5f3779ae7c38a9044f754e716a6a6964 Reviewed-on: https://go-review.googlesource.com/c/go/+/709555 Reviewed-by: Michael Pratt Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI --- src/internal/goexperiment/exp_synctest_off.go | 8 -------- src/internal/goexperiment/exp_synctest_on.go | 8 -------- src/internal/goexperiment/flags.go | 3 --- 3 files changed, 19 deletions(-) delete mode 100644 src/internal/goexperiment/exp_synctest_off.go delete mode 100644 src/internal/goexperiment/exp_synctest_on.go diff --git a/src/internal/goexperiment/exp_synctest_off.go b/src/internal/goexperiment/exp_synctest_off.go deleted file mode 100644 index fade13f89ca79c..00000000000000 --- a/src/internal/goexperiment/exp_synctest_off.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build !goexperiment.synctest - -package goexperiment - -const Synctest = false -const SynctestInt = 0 diff --git a/src/internal/goexperiment/exp_synctest_on.go b/src/internal/goexperiment/exp_synctest_on.go deleted file mode 100644 index 9c44be7276138b..00000000000000 --- a/src/internal/goexperiment/exp_synctest_on.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build goexperiment.synctest - -package goexperiment - -const Synctest = true -const SynctestInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index 232a17135d2cc5..07aa1d0aeed15d 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -100,9 +100,6 @@ type Flags struct { // inlining phase within the Go compiler. NewInliner bool - // Synctest enables the testing/synctest package. - Synctest bool - // Dwarf5 enables DWARF version 5 debug info generation. Dwarf5 bool From 64699542031b994ec4fdb6de887a94b69a372f9b Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Mon, 6 Oct 2025 16:38:29 -0400 Subject: [PATCH 073/152] runtime: assert p.destroy runs with GC not running This is already guaranteed by stopTheWorldGC prior to procresize. Thus the cleanup code here is dead, which is a bit confusing. Replace it with a throw for clarity. Change-Id: I6a6a636c8ca1487b720c4fab41b2b86c13d1d9e0 Reviewed-on: https://go-review.googlesource.com/c/go/+/709655 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek --- src/runtime/proc.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index e5686705293e8a..d36895b046c8c6 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -5818,11 +5818,13 @@ func (pp *p) destroy() { // Move all timers to the local P. getg().m.p.ptr().timers.take(&pp.timers) - // Flush p's write barrier buffer. - if gcphase != _GCoff { - wbBufFlush1(pp) - pp.gcw.dispose() + // No need to flush p's write barrier buffer or span queue, as Ps + // cannot be destroyed during the mark phase. + if phase := gcphase; phase != _GCoff { + println("runtime: p id", pp.id, "destroyed during GC phase", phase) + throw("P destroyed while GC is running") } + clear(pp.sudogbuf[:]) pp.sudogcache = pp.sudogbuf[:0] pp.pinnerCache = nil From c938051dd0b80a5c60572d6807270d06ca685d2e Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 7 Oct 2025 07:58:50 -0700 Subject: [PATCH 074/152] Revert "cmd/compile: redo arm64 LR/FP save and restore" This reverts commit 719dfcf8a8478d70360bf3c34c0e920be7b32994. Reason for revert: Causing crashes. Change-Id: I0b8526dd03d82fa074ce4f97f1789eeac702b3eb Reviewed-on: https://go-review.googlesource.com/c/go/+/709755 Reviewed-by: Keith Randall Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI Auto-Submit: Keith Randall Reviewed-by: Cherry Mui --- src/cmd/compile/abi-internal.md | 12 +- src/cmd/compile/internal/arm64/ggen.go | 10 +- src/cmd/compile/internal/arm64/ssa.go | 2 +- src/cmd/compile/internal/ssagen/pgen.go | 6 - src/cmd/compile/internal/ssagen/ssa.go | 3 +- src/cmd/internal/obj/arm64/asm7.go | 12 +- src/cmd/internal/obj/arm64/obj7.go | 326 ++++++++++++------ src/cmd/link/internal/amd64/obj.go | 19 +- src/cmd/link/internal/arm64/obj.go | 23 +- src/cmd/link/internal/ld/dwarf.go | 7 +- src/cmd/link/internal/ld/lib.go | 4 - src/cmd/link/internal/ld/stackcheck.go | 5 + src/cmd/link/internal/x86/obj.go | 15 +- src/runtime/asm_arm64.s | 81 +---- src/runtime/mkpreempt.go | 20 +- src/runtime/panic.go | 8 +- src/runtime/preempt_arm64.s | 15 +- src/runtime/race_arm64.s | 17 +- src/runtime/signal_arm64.go | 16 +- src/runtime/stack.go | 20 +- src/runtime/testdata/testprog/badtraceback.go | 5 - src/runtime/traceback.go | 30 +- test/nosplit.go | 8 +- 23 files changed, 361 insertions(+), 303 deletions(-) diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md index 490e1affb74de9..eae230dc070d86 100644 --- a/src/cmd/compile/abi-internal.md +++ b/src/cmd/compile/abi-internal.md @@ -576,19 +576,19 @@ A function's stack frame, after the frame is created, is laid out as follows: +------------------------------+ - | return PC | - | frame pointer on entry | ← R29 points to | ... locals ... | | ... outgoing arguments ... | - | unused word | ← RSP points to + | return PC | ← RSP points to + | frame pointer on entry | +------------------------------+ ↓ lower addresses The "return PC" is loaded to the link register, R30, as part of the arm64 `CALL` operation. -On entry, a function pushes R30 (the return address) and R29 -(the caller's frame pointer) onto the bottom of the stack. It then -subtracts a constant from RSP to open its stack frame. +On entry, a function subtracts from RSP to open its stack frame, and +saves the values of R30 and R29 at the bottom of the frame. +Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP), +after RSP is updated. A leaf function that does not require any stack space may omit the saved R30 and R29. diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go index 6ba56b992eeda6..14027467002a3e 100644 --- a/src/cmd/compile/internal/arm64/ggen.go +++ b/src/cmd/compile/internal/arm64/ggen.go @@ -11,12 +11,10 @@ import ( ) func padframe(frame int64) int64 { - // arm64 requires frame sizes here that are 8 mod 16. - // With the additional (unused) slot at the bottom of the frame, - // that makes an aligned 16 byte frame. - // Adding a save region for LR+FP does not change the alignment. - if frame != 0 { - frame += (-(frame + 8)) & 15 + // arm64 requires that the frame size (not counting saved FP&LR) + // be 16 bytes aligned. If not, pad it. + if frame%16 != 0 { + frame += 16 - (frame % 16) } return frame } diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 9f79a740c6ca81..7bc0e536e941e6 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -221,7 +221,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { for i := 0; i < len(args); i++ { a := args[i] - // Offset by size of the unused slot before start of args. + // Offset by size of the saved LR slot. addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.Arch.FixedFrameSize) // Look for double-register operations if we can. if i < len(args)-1 { diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go index f0776172b9264a..0a2010363f8d04 100644 --- a/src/cmd/compile/internal/ssagen/pgen.go +++ b/src/cmd/compile/internal/ssagen/pgen.go @@ -393,16 +393,10 @@ func StackOffset(slot ssa.LocalSlot) int32 { case ir.PAUTO: off = n.FrameOffset() if base.Ctxt.Arch.FixedFrameSize == 0 { - // x86 return address off -= int64(types.PtrSize) } if buildcfg.FramePointerEnabled { - // frame pointer off -= int64(types.PtrSize) - if buildcfg.GOARCH == "arm64" { - // arm64 return address also - off -= int64(types.PtrSize) - } } } return int32(off + slot.Off) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 107447f04cc4f6..1e2159579dfbf2 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -7150,7 +7150,6 @@ func defframe(s *State, e *ssafn, f *ssa.Func) { // Insert code to zero ambiguously live variables so that the // garbage collector only sees initialized values when it // looks for pointers. - // Note: lo/hi are offsets from varp and will be negative. var lo, hi int64 // Opaque state for backend to use. Current backends use it to @@ -7158,7 +7157,7 @@ func defframe(s *State, e *ssafn, f *ssa.Func) { var state uint32 // Iterate through declarations. Autos are sorted in decreasing - // frame offset order (least negative to most negative). + // frame offset order. for _, n := range e.curfn.Dcl { if !n.Needzero() { continue diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 281d705a3eb614..743d09a319087d 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -51,6 +51,7 @@ type ctxt7 struct { blitrl *obj.Prog elitrl *obj.Prog autosize int32 + extrasize int32 instoffset int64 pc int64 pool struct { @@ -1121,7 +1122,8 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ctxt.Diag("arm64 ops not initialized, call arm64.buildop first") } - c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} + c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset & 0xffffffff), extrasize: int32(p.To.Offset >> 32)} + p.To.Offset &= 0xffffffff // extrasize is no longer needed // Process literal pool and allocate initial program counter for each Prog, before // generating branch veneers. @@ -2117,8 +2119,8 @@ func (c *ctxt7) aclass(a *obj.Addr) int { // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } - // The frame top 16 bytes are for LR/FP - c.instoffset = int64(c.autosize) + a.Offset - extrasize + // The frame top 8 or 16 bytes are for FP + c.instoffset = int64(c.autosize) + a.Offset - int64(c.extrasize) return autoclass(c.instoffset) case obj.NAME_PARAM: @@ -2178,8 +2180,8 @@ func (c *ctxt7) aclass(a *obj.Addr) int { // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } - // The frame top 16 bytes are for LR/FP - c.instoffset = int64(c.autosize) + a.Offset - extrasize + // The frame top 8 or 16 bytes are for FP + c.instoffset = int64(c.autosize) + a.Offset - int64(c.extrasize) case obj.NAME_PARAM: if a.Reg == REGSP { diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index a697426145185b..2583e46354292f 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -36,6 +36,7 @@ import ( "cmd/internal/src" "cmd/internal/sys" "internal/abi" + "internal/buildcfg" "log" "math" ) @@ -471,8 +472,6 @@ func (c *ctxt7) rewriteToUseGot(p *obj.Prog) { obj.Nopout(p) } -const extrasize = 16 // space needed in the frame for LR+FP - func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if cursym.Func().Text == nil || cursym.Func().Text.Link == nil { return @@ -522,26 +521,33 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { c.autosize = int32(textstksiz) if p.Mark&LEAF != 0 && c.autosize == 0 { - // A leaf function with no locals needs no frame. + // A leaf function with no locals has no frame. p.From.Sym.Set(obj.AttrNoFrame, true) } if !p.From.Sym.NoFrame() { // If there is a stack frame at all, it includes - // space for the (now unused) word at [SP:SP+8]. + // space to save the LR. c.autosize += 8 } - // Round up to a multiple of 16. - c.autosize += (-c.autosize) & 15 - if c.autosize != 0 { - // Allocate an extra 16 bytes at the top of the frame - // to save LR+FP. + extrasize := int32(0) + if c.autosize%16 == 8 { + // Allocate extra 8 bytes on the frame top to save FP + extrasize = 8 + } else if c.autosize&(16-1) == 0 { + // Allocate extra 16 bytes to save FP for the old frame whose size is 8 mod 16 + extrasize = 16 + } else { + c.ctxt.Diag("%v: unaligned frame size %d - must be 16 aligned", p, c.autosize-8) + } c.autosize += extrasize c.cursym.Func().Locals += extrasize - p.To.Offset = int64(c.autosize) + // low 32 bits for autosize + // high 32 bits for extrasize + p.To.Offset = int64(c.autosize) | int64(extrasize)<<32 } else { // NOFRAME p.To.Offset = 0 @@ -574,72 +580,120 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { var prologueEnd *obj.Prog aoffset := c.autosize - if aoffset < 16 { - log.Fatalf("aoffset too small %d", aoffset) + if aoffset > 0xf0 { + // MOVD.W offset variant range is -0x100 to 0xf8, SP should be 16-byte aligned. + // so the maximum aoffset value is 0xf0. + aoffset = 0xf0 } + // Frame is non-empty. Make sure to save link register, even if + // it is a leaf function, so that traceback works. q = p - - // Store return address and frame pointer at the top of the stack frame. - // STP.W (R29, R30), -16(SP) - q1 = obj.Appendp(q, c.newprog) - q1.Pos = p.Pos - q1.As = ASTP - q1.From.Type = obj.TYPE_REGREG - q1.From.Reg = REGFP - q1.From.Offset = REGLINK - q1.To.Type = obj.TYPE_MEM - q1.To.Reg = REG_RSP - q1.To.Offset = -16 - q1.Scond = C_XPRE - - prologueEnd = q1 - - // Update frame pointer - q1 = obj.Appendp(q1, c.newprog) - q1.Pos = p.Pos - q1.As = AMOVD - q1.From.Type = obj.TYPE_REG - q1.From.Reg = REGSP - q1.To.Type = obj.TYPE_REG - q1.To.Reg = REGFP - - // Allocate additional frame space. - adj := aoffset - 16 - if adj > 0 { - // SUB $autosize-16, RSP - if adj < 1<<12 { - q1 = obj.Appendp(q1, c.newprog) - q1.Pos = p.Pos - q1.As = ASUB - q1.From.Type = obj.TYPE_CONST - q1.From.Offset = int64(adj) - q1.To.Type = obj.TYPE_REG - q1.To.Reg = REGSP - } else { - // Constant too big for atomic subtract. - // Materialize in tmp register first. - q1 = obj.Appendp(q1, c.newprog) - q1.Pos = p.Pos - q1.As = AMOVD - q1.From.Type = obj.TYPE_CONST - q1.From.Offset = int64(adj) - q1.To.Type = obj.TYPE_REG - q1.To.Reg = REGTMP - + if c.autosize > aoffset { + // Frame size is too large for a MOVD.W instruction. Store the frame pointer + // register and link register before decrementing SP, so if a signal comes + // during the execution of the function prologue, the traceback code will + // not see a half-updated stack frame. + + // SUB $autosize, RSP, R20 + q1 = obj.Appendp(q, c.newprog) + q1.Pos = p.Pos + q1.As = ASUB + q1.From.Type = obj.TYPE_CONST + q1.From.Offset = int64(c.autosize) + q1.Reg = REGSP + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REG_R20 + + prologueEnd = q1 + + // STP (R29, R30), -8(R20) + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = ASTP + q1.From.Type = obj.TYPE_REGREG + q1.From.Reg = REGFP + q1.From.Offset = REGLINK + q1.To.Type = obj.TYPE_MEM + q1.To.Reg = REG_R20 + q1.To.Offset = -8 + + // This is not async preemptible, as if we open a frame + // at the current SP, it will clobber the saved LR. + q1 = c.ctxt.StartUnsafePoint(q1, c.newprog) + + // MOVD R20, RSP + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = AMOVD + q1.From.Type = obj.TYPE_REG + q1.From.Reg = REG_R20 + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REGSP + q1.Spadj = c.autosize + + q1 = c.ctxt.EndUnsafePoint(q1, c.newprog, -1) + + if buildcfg.GOOS == "ios" { + // iOS does not support SA_ONSTACK. We will run the signal handler + // on the G stack. If we write below SP, it may be clobbered by + // the signal handler. So we save FP and LR after decrementing SP. + // STP (R29, R30), -8(RSP) q1 = obj.Appendp(q1, c.newprog) q1.Pos = p.Pos - q1.As = ASUB - q1.From.Type = obj.TYPE_REG - q1.From.Reg = REGTMP - q1.To.Type = obj.TYPE_REG + q1.As = ASTP + q1.From.Type = obj.TYPE_REGREG + q1.From.Reg = REGFP + q1.From.Offset = REGLINK + q1.To.Type = obj.TYPE_MEM q1.To.Reg = REGSP + q1.To.Offset = -8 } - q1.Spadj = adj + } else { + // small frame, update SP and save LR in a single MOVD.W instruction. + // So if a signal comes during the execution of the function prologue, + // the traceback code will not see a half-updated stack frame. + // Also, on Linux, in a cgo binary we may get a SIGSETXID signal + // early on before the signal stack is set, as glibc doesn't allow + // us to block SIGSETXID. So it is important that we don't write below + // the SP until the signal stack is set. + // Luckily, all the functions from thread entry to setting the signal + // stack have small frames. + q1 = obj.Appendp(q, c.newprog) + q1.As = AMOVD + q1.Pos = p.Pos + q1.From.Type = obj.TYPE_REG + q1.From.Reg = REGLINK + q1.To.Type = obj.TYPE_MEM + q1.Scond = C_XPRE + q1.To.Offset = int64(-aoffset) + q1.To.Reg = REGSP + q1.Spadj = aoffset + + prologueEnd = q1 + + // Frame pointer. + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = AMOVD + q1.From.Type = obj.TYPE_REG + q1.From.Reg = REGFP + q1.To.Type = obj.TYPE_MEM + q1.To.Reg = REGSP + q1.To.Offset = -8 } prologueEnd.Pos = prologueEnd.Pos.WithXlogue(src.PosPrologueEnd) + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = ASUB + q1.From.Type = obj.TYPE_CONST + q1.From.Offset = 8 + q1.Reg = REGSP + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REGFP + case obj.ARET: nocache(p) if p.From.Type == obj.TYPE_CONST { @@ -653,56 +707,105 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } p.To = obj.Addr{} aoffset := c.autosize - if aoffset > 0 { - if aoffset < 16 { - log.Fatalf("aoffset too small %d", aoffset) - } - adj := aoffset - 16 - if adj > 0 { - if adj < 1<<12 { - // ADD $adj, RSP, RSP - p.As = AADD - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(adj) - p.To.Type = obj.TYPE_REG - p.To.Reg = REGSP - } else { - // Put frame size in a separate register and - // add it in with a single instruction, - // so we never have a partial frame during - // the epilog. See issue 73259. - - // MOVD $adj, REGTMP - p.As = AMOVD - p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(adj) - p.To.Type = obj.TYPE_REG - p.To.Reg = REGTMP - // ADD REGTMP, RSP, RSP - p = obj.Appendp(p, c.newprog) - p.As = AADD - p.From.Type = obj.TYPE_REG - p.From.Reg = REGTMP - p.To.Type = obj.TYPE_REG - p.To.Reg = REGSP - } - p.Spadj = -adj - } - - // Pop LR+FP. - // LDP.P 16(RSP), (R29, R30) - if p.As != obj.ARET { + if c.cursym.Func().Text.Mark&LEAF != 0 { + if aoffset != 0 { + // Restore frame pointer. + // ADD $framesize-8, RSP, R29 + p.As = AADD + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(c.autosize) - 8 + p.Reg = REGSP + p.To.Type = obj.TYPE_REG + p.To.Reg = REGFP + + // Pop stack frame. + // ADD $framesize, RSP, RSP p = obj.Appendp(p, c.newprog) + p.As = AADD + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(c.autosize) + p.To.Type = obj.TYPE_REG + p.To.Reg = REGSP + p.Spadj = -c.autosize } - p.As = ALDP + } else if aoffset <= 0xF0 { + // small frame, restore LR and update SP in a single MOVD.P instruction. + // There is no correctness issue to use a single LDP for LR and FP, + // but the instructions are not pattern matched with the prologue's + // MOVD.W and MOVD, which may cause performance issue in + // store-forwarding. + + // MOVD -8(RSP), R29 + p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP - p.From.Offset = 16 + p.From.Offset = -8 + p.To.Type = obj.TYPE_REG + p.To.Reg = REGFP + p = obj.Appendp(p, c.newprog) + + // MOVD.P offset(RSP), R30 + p.As = AMOVD + p.From.Type = obj.TYPE_MEM p.Scond = C_XPOST + p.From.Offset = int64(aoffset) + p.From.Reg = REGSP + p.To.Type = obj.TYPE_REG + p.To.Reg = REGLINK + p.Spadj = -aoffset + } else { + // LDP -8(RSP), (R29, R30) + p.As = ALDP + p.From.Type = obj.TYPE_MEM + p.From.Offset = -8 + p.From.Reg = REGSP p.To.Type = obj.TYPE_REGREG p.To.Reg = REGFP p.To.Offset = REGLINK - p.Spadj = -16 + + if aoffset < 1<<12 { + // ADD $aoffset, RSP, RSP + q = newprog() + q.As = AADD + q.From.Type = obj.TYPE_CONST + q.From.Offset = int64(aoffset) + q.To.Type = obj.TYPE_REG + q.To.Reg = REGSP + q.Spadj = -aoffset + q.Pos = p.Pos + q.Link = p.Link + p.Link = q + p = q + } else { + // Put frame size in a separate register and + // add it in with a single instruction, + // so we never have a partial frame during + // the epilog. See issue 73259. + + // MOVD $aoffset, REGTMP + q = newprog() + q.As = AMOVD + q.From.Type = obj.TYPE_CONST + q.From.Offset = int64(aoffset) + q.To.Type = obj.TYPE_REG + q.To.Reg = REGTMP + q.Pos = p.Pos + q.Link = p.Link + p.Link = q + p = q + // ADD REGTMP, RSP, RSP + q = newprog() + q.As = AADD + q.From.Type = obj.TYPE_REG + q.From.Reg = REGTMP + q.To.Type = obj.TYPE_REG + q.To.Reg = REGSP + q.Spadj = -aoffset + q.Pos = p.Pos + q.Link = p.Link + p.Link = q + p = q + } } // If enabled, this code emits 'MOV PC, R27' before every 'MOV LR, PC', @@ -765,11 +868,10 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK } else { - /* MOVD framesize-8(RSP), Rd */ + /* MOVD (RSP), Rd */ p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP - p.From.Offset = int64(c.autosize - 8) } } if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 { @@ -804,12 +906,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.From.Reg = int16(REG_LSL + r + (shift&7)<<5) p.From.Offset = 0 } - if p.To.Type == obj.TYPE_MEM && p.To.Reg == REG_RSP && (p.Scond == C_XPRE || p.Scond == C_XPOST) { - p.Spadj += int32(-p.To.Offset) - } - if p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_RSP && (p.Scond == C_XPRE || p.Scond == C_XPOST) { - p.Spadj += int32(-p.From.Offset) - } } } diff --git a/src/cmd/link/internal/amd64/obj.go b/src/cmd/link/internal/amd64/obj.go index 761496549f93aa..3a6141b9091eb6 100644 --- a/src/cmd/link/internal/amd64/obj.go +++ b/src/cmd/link/internal/amd64/obj.go @@ -51,16 +51,15 @@ func Init() (*sys.Arch, ld.Arch) { Plan9Magic: uint32(4*26*26 + 7), Plan9_64Bit: true, - Adddynrel: adddynrel, - Archinit: archinit, - Archreloc: archreloc, - Archrelocvariant: archrelocvariant, - Gentext: gentext, - Machoreloc1: machoreloc1, - MachorelocSize: 8, - PEreloc1: pereloc1, - TLSIEtoLE: tlsIEtoLE, - ReturnAddressAtTopOfFrame: true, + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Gentext: gentext, + Machoreloc1: machoreloc1, + MachorelocSize: 8, + PEreloc1: pereloc1, + TLSIEtoLE: tlsIEtoLE, ELF: ld.ELFArch{ Linuxdynld: "/lib64/ld-linux-x86-64.so.2", diff --git a/src/cmd/link/internal/arm64/obj.go b/src/cmd/link/internal/arm64/obj.go index e1e4ade81835c6..3d358155badbca 100644 --- a/src/cmd/link/internal/arm64/obj.go +++ b/src/cmd/link/internal/arm64/obj.go @@ -47,18 +47,17 @@ func Init() (*sys.Arch, ld.Arch) { Dwarfreglr: dwarfRegLR, TrampLimit: 0x7c00000, // 26-bit signed offset * 4, leave room for PLT etc. - Adddynrel: adddynrel, - Archinit: archinit, - Archreloc: archreloc, - Archrelocvariant: archrelocvariant, - Extreloc: extreloc, - Gentext: gentext, - GenSymsLate: gensymlate, - Machoreloc1: machoreloc1, - MachorelocSize: 8, - PEreloc1: pereloc1, - Trampoline: trampoline, - ReturnAddressAtTopOfFrame: true, + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Extreloc: extreloc, + Gentext: gentext, + GenSymsLate: gensymlate, + Machoreloc1: machoreloc1, + MachorelocSize: 8, + PEreloc1: pereloc1, + Trampoline: trampoline, ELF: ld.ELFArch{ Androiddynld: "/system/bin/linker64", diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index c4d12a5488df3c..0003938ef2e036 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -1544,14 +1544,9 @@ func (d *dwctxt) writeframes(fs loader.Sym) dwarfSecInfo { if pcsp.Value > 0 { // The return address is preserved at (CFA-frame_size) // after a stack frame has been allocated. - off := -spdelta - if thearch.ReturnAddressAtTopOfFrame { - // Except arm64, which has it at the top of frame. - off = -int64(d.arch.PtrSize) - } deltaBuf = append(deltaBuf, dwarf.DW_CFA_offset_extended_sf) deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(thearch.Dwarfreglr)) - deltaBuf = dwarf.AppendSleb128(deltaBuf, off/dataAlignmentFactor) + deltaBuf = dwarf.AppendSleb128(deltaBuf, -spdelta/dataAlignmentFactor) } else { // The return address is restored into the link register // when a stack frame has been de-allocated. diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 5f5ebfc1d9855c..2c861129b52f9a 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -263,10 +263,6 @@ type Arch struct { // optional override for assignAddress AssignAddress func(ldr *loader.Loader, sect *sym.Section, n int, s loader.Sym, va uint64, isTramp bool) (*sym.Section, int, uint64) - // Reports whether the return address is stored at the top (highest address) - // of the stack frame. - ReturnAddressAtTopOfFrame bool - // ELF specific information. ELF ELFArch } diff --git a/src/cmd/link/internal/ld/stackcheck.go b/src/cmd/link/internal/ld/stackcheck.go index 14cd3a22384f88..98e7edaeb1477b 100644 --- a/src/cmd/link/internal/ld/stackcheck.go +++ b/src/cmd/link/internal/ld/stackcheck.go @@ -9,6 +9,7 @@ import ( "cmd/internal/objabi" "cmd/link/internal/loader" "fmt" + "internal/buildcfg" "sort" "strings" ) @@ -61,6 +62,10 @@ func (ctxt *Link) doStackCheck() { // that there are at least StackLimit bytes available below SP // when morestack returns. limit := objabi.StackNosplit(*flagRace) - sc.callSize + if buildcfg.GOARCH == "arm64" { + // Need an extra 8 bytes below SP to save FP. + limit -= 8 + } // Compute stack heights without any back-tracking information. // This will almost certainly succeed and we can simply diff --git a/src/cmd/link/internal/x86/obj.go b/src/cmd/link/internal/x86/obj.go index a4885fde8fd06f..4336f01ea3d536 100644 --- a/src/cmd/link/internal/x86/obj.go +++ b/src/cmd/link/internal/x86/obj.go @@ -50,14 +50,13 @@ func Init() (*sys.Arch, ld.Arch) { Plan9Magic: uint32(4*11*11 + 7), - Adddynrel: adddynrel, - Archinit: archinit, - Archreloc: archreloc, - Archrelocvariant: archrelocvariant, - Gentext: gentext, - Machoreloc1: machoreloc1, - PEreloc1: pereloc1, - ReturnAddressAtTopOfFrame: true, + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Gentext: gentext, + Machoreloc1: machoreloc1, + PEreloc1: pereloc1, ELF: ld.ELFArch{ Linuxdynld: "/lib/ld-linux.so.2", diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index aa49a27a75d1e3..a0e82ec830f74b 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -50,7 +50,9 @@ TEXT _rt0_arm64_lib(SB),NOSPLIT,$184 CBZ R4, nocgo MOVD $_rt0_arm64_lib_go(SB), R0 MOVD $0, R1 + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL (R4) + ADD $16, RSP B restore nocgo: @@ -369,6 +371,7 @@ switch: BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R0 MOVD R0, RSP + MOVD (g_sched+gobuf_bp)(g), R29 MOVD $0, (g_sched+gobuf_sp)(g) MOVD $0, (g_sched+gobuf_bp)(g) RET @@ -378,8 +381,8 @@ noswitch: // Using a tail call here cleans up tracebacks since we won't stop // at an intermediate systemstack. MOVD 0(R26), R3 // code pointer - ADD $16, RSP - LDP.P 16(RSP), (R29,R30) // restore FP, LR + MOVD.P 16(RSP), R30 // restore LR + SUB $8, RSP, R29 // restore FP B (R3) // func switchToCrashStack0(fn func()) @@ -1048,7 +1051,7 @@ again: // Smashes R0. TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 MOVD $runtime·systemstack_switch(SB), R0 - ADD $12, R0 // get past prologue + ADD $8, R0 // get past prologue MOVD R0, (g_sched+gobuf_pc)(g) MOVD RSP, R0 MOVD R0, (g_sched+gobuf_sp)(g) @@ -1066,7 +1069,9 @@ TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16 MOVD fn+0(FP), R1 MOVD arg+8(FP), R0 + SUB $16, RSP // skip over saved frame pointer below RSP BL (R1) + ADD $16, RSP // skip over saved frame pointer below RSP RET // func asmcgocall(fn, arg unsafe.Pointer) int32 @@ -1231,9 +1236,9 @@ havem: BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 MOVD (g_sched+gobuf_pc)(g), R5 - MOVD R5, -8(R4) + MOVD R5, -48(R4) MOVD (g_sched+gobuf_bp)(g), R5 - MOVD R5, -16(R4) + MOVD R5, -56(R4) // Gather our arguments into registers. MOVD fn+0(FP), R1 MOVD frame+8(FP), R2 @@ -1247,7 +1252,7 @@ havem: CALL (R0) // indirect call to bypass nosplit check. We're on a different stack now. // Restore g->sched (== m->curg->sched) from saved values. - MOVD 40(RSP), R5 + MOVD 0(RSP), R5 MOVD R5, (g_sched+gobuf_pc)(g) MOVD RSP, R4 ADD $48, R4, R4 @@ -1485,57 +1490,10 @@ GLOBL debugCallFrameTooLarge<>(SB), RODATA, $20 // Size duplicated below // // This is ABIInternal because Go code injects its PC directly into new // goroutine stacks. -// -// State before debugger starts doing anything: -// | current | -// | stack | -// +-------------+ <- SP = origSP -// stopped executing at PC = origPC -// some values are in LR (origLR) and FP (origFP) -// -// After debugger has done steps 1-6 above: -// | current | -// | stack | -// +-------------+ <- origSP -// | ----- | (used to be a slot to store frame pointer on entry to origPC's frame.) -// +-------------+ -// | origLR | -// +-------------+ <- SP -// | ----- | -// +-------------+ -// | argsize | -// +-------------+ -// LR = origPC, PC = debugCallV2 -// -// debugCallV2 then modifies the stack up to the "good" label: -// | current | -// | stack | -// +-------------+ <- origSP -// | ----- | (used to be a slot to store frame pointer on entry to origPC's frame.) -// +-------------+ -// | origLR | -// +-------------+ <- where debugger left SP -// | origPC | -// +-------------+ -// | origFP | -// +-------------+ <- FP = SP + 256 -// | saved | -// | registers | -// | (224 bytes) | -// +-------------+ <- SP + 32 -// | space for | -// | outargs | -// +-------------+ <- SP + 8 -// | argsize | -// +-------------+ <- SP - TEXT runtime·debugCallV2(SB),NOSPLIT|NOFRAME,$0-0 - MOVD R30, -8(RSP) // save origPC - MOVD -16(RSP), R30 // save argsize in R30 temporarily - MOVD.W R29, -16(RSP) // push origFP - MOVD RSP, R29 // frame pointer chain now set up - SUB $256, RSP, RSP // allocate frame - MOVD R30, (RSP) // Save argsize on the stack + STP (R29, R30), -280(RSP) + SUB $272, RSP, RSP + SUB $8, RSP, R29 // Save all registers that may contain pointers so they can be // conservatively scanned. // @@ -1557,8 +1515,7 @@ TEXT runtime·debugCallV2(SB),NOSPLIT|NOFRAME,$0-0 STP (R0, R1), (4*8)(RSP) // Perform a safe-point check. - MOVD 264(RSP), R0 // origPC - MOVD R0, 8(RSP) + MOVD R30, 8(RSP) // Caller's PC CALL runtime·debugCallCheck(SB) MOVD 16(RSP), R0 CBZ R0, good @@ -1602,7 +1559,7 @@ good: CALL runtime·debugCallWrap(SB); \ JMP restore - MOVD (RSP), R0 // the argument frame size + MOVD 256(RSP), R0 // the argument frame size DEBUG_CALL_DISPATCH(debugCall32<>, 32) DEBUG_CALL_DISPATCH(debugCall64<>, 64) DEBUG_CALL_DISPATCH(debugCall128<>, 128) @@ -1650,9 +1607,9 @@ restore: LDP (6*8)(RSP), (R2, R3) LDP (4*8)(RSP), (R0, R1) - MOVD 272(RSP), R30 // restore old lr (saved by (*sigctxt).pushCall) - LDP 256(RSP), (R29, R27) // restore old fp, set up resumption address - ADD $288, RSP, RSP // Pop frame, LR+FP, and block pushed by (*sigctxt).pushCall + LDP -8(RSP), (R29, R27) + ADD $288, RSP, RSP // Add 16 more bytes, see saveSigContext + MOVD -16(RSP), R30 // restore old lr JMP (R27) // runtime.debugCallCheck assumes that functions defined with the diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 9064cae039f00d..769c4ffc5c9eeb 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -488,18 +488,26 @@ func genARM64(g *gen) { l.stack += 8 // SP needs 16-byte alignment } - // allocate frame, save PC (in R30), FP (in R29) of interrupted instruction - p("STP.W (R29, R30), -16(RSP)") - p("MOVD RSP, R29") // set up new frame pointer + // allocate frame, save PC of interrupted instruction (in LR) + p("MOVD R30, %d(RSP)", -l.stack) p("SUB $%d, RSP", l.stack) + p("MOVD R29, -8(RSP)") // save frame pointer (only used on Linux) + p("SUB $8, RSP, R29") // set up new frame pointer + // On iOS, save the LR again after decrementing SP. We run the + // signal handler on the G stack (as it doesn't support sigaltstack), + // so any writes below SP may be clobbered. + p("#ifdef GOOS_ios") + p("MOVD R30, (RSP)") + p("#endif") l.save(g) p("CALL ·asyncPreempt2(SB)") l.restore(g) - p("MOVD %d(RSP), R30", l.stack+16) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it - p("LDP %d(RSP), (R29, R27)", l.stack) // Restore frame pointer. Load PC into regtmp. - p("ADD $%d, RSP", l.stack+32) // pop frame (including the space pushed by sigctxt.pushCall) + p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it + p("MOVD -8(RSP), R29") // restore frame pointer + p("MOVD (RSP), R27") // load PC to REGTMP + p("ADD $%d, RSP", l.stack+16) // pop frame (including the space pushed by sigctxt.pushCall) p("RET (R27)") } diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 04b3afe1682765..8c91c9435abd18 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -1379,10 +1379,10 @@ func recovery(gp *g) { // the caller gp.sched.bp = fp - 2*goarch.PtrSize case goarch.IsArm64 != 0: - // on arm64, the first two words of the frame are caller's PC - // (the saved LR register) and the caller's BP. - // Coincidentally, the same as amd64. - gp.sched.bp = fp - 2*goarch.PtrSize + // on arm64, the architectural bp points one word higher + // than the sp. fp is totally useless to us here, because it + // only gets us to the caller's fp. + gp.sched.bp = sp - goarch.PtrSize } gogo(&gp.sched) } diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s index f4248cac257550..31ec9d940f76d4 100644 --- a/src/runtime/preempt_arm64.s +++ b/src/runtime/preempt_arm64.s @@ -4,9 +4,13 @@ #include "textflag.h" TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 - STP.W (R29, R30), -16(RSP) - MOVD RSP, R29 + MOVD R30, -496(RSP) SUB $496, RSP + MOVD R29, -8(RSP) + SUB $8, RSP, R29 + #ifdef GOOS_ios + MOVD R30, (RSP) + #endif STP (R0, R1), 8(RSP) STP (R2, R3), 24(RSP) STP (R4, R5), 40(RSP) @@ -74,7 +78,8 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 LDP 40(RSP), (R4, R5) LDP 24(RSP), (R2, R3) LDP 8(RSP), (R0, R1) - MOVD 512(RSP), R30 - LDP 496(RSP), (R29, R27) - ADD $528, RSP + MOVD 496(RSP), R30 + MOVD -8(RSP), R29 + MOVD (RSP), R27 + ADD $512, RSP RET (R27) diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s index feaa328d4c0d8a..5df650105bb4d5 100644 --- a/src/runtime/race_arm64.s +++ b/src/runtime/race_arm64.s @@ -397,7 +397,7 @@ TEXT racecallatomic<>(SB), NOSPLIT, $0 // R3 = addr of incoming arg list // Trigger SIGSEGV early. - MOVD 72(RSP), R3 // 1st arg is addr. after two small frames (32 bytes each), get it at 72(RSP) + MOVD 40(RSP), R3 // 1st arg is addr. after two times BL, get it at 40(RSP) MOVB (R3), R13 // segv here if addr is bad // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend). MOVD runtime·racearenastart(SB), R10 @@ -417,11 +417,10 @@ racecallatomic_ok: // Addr is within the good range, call the atomic function. load_g MOVD g_racectx(g), R0 // goroutine context - MOVD 56(RSP), R1 // caller pc + MOVD 16(RSP), R1 // caller pc MOVD R9, R2 // pc - ADD $72, RSP, R3 - BL racecall<>(SB) - RET + ADD $40, RSP, R3 + JMP racecall<>(SB) // does not return racecallatomic_ignore: // Addr is outside the good range. // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op. @@ -436,9 +435,9 @@ racecallatomic_ignore: // racecall will call LLVM race code which might clobber R28 (g) load_g MOVD g_racectx(g), R0 // goroutine context - MOVD 56(RSP), R1 // caller pc + MOVD 16(RSP), R1 // caller pc MOVD R9, R2 // pc - ADD $72, RSP, R3 // arguments + ADD $40, RSP, R3 // arguments BL racecall<>(SB) // Call __tsan_go_ignore_sync_end. MOVD $__tsan_go_ignore_sync_end(SB), R9 @@ -477,6 +476,10 @@ TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0 MOVD (g_sched+gobuf_sp)(R11), R12 MOVD R12, RSP call: + // Decrement SP past where the frame pointer is saved in the Go arm64 + // ABI (one word below the stack pointer) so the race detector library + // code doesn't clobber it + SUB $16, RSP BL R9 MOVD R19, RSP JMP (R20) diff --git a/src/runtime/signal_arm64.go b/src/runtime/signal_arm64.go index 61dad507219caf..af7d29f9de1d31 100644 --- a/src/runtime/signal_arm64.go +++ b/src/runtime/signal_arm64.go @@ -8,6 +8,7 @@ package runtime import ( "internal/abi" + "internal/goarch" "internal/runtime/sys" "unsafe" ) @@ -62,11 +63,18 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) { // We arrange lr, and pc to pretend the panicking // function calls sigpanic directly. // Always save LR to stack so that panics in leaf - // functions are correctly handled. - // This extra space is known to gentraceback. + // functions are correctly handled. This smashes + // the stack frame but we're not going back there + // anyway. sp := c.sp() - sys.StackAlign // needs only sizeof uint64, but must align the stack c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr() + // Make sure a valid frame pointer is saved on the stack so that the + // frame pointer checks in adjustframe are happy, if they're enabled. + // Frame pointer unwinding won't visit the sigpanic frame, since + // sigpanic will save the same frame pointer before calling into a panic + // function. + *(*uint64)(unsafe.Pointer(uintptr(sp - goarch.PtrSize))) = c.r29() pc := gp.sigpc @@ -88,6 +96,10 @@ func (c *sigctxt) pushCall(targetPC, resumePC uintptr) { sp := c.sp() - 16 // SP needs 16-byte alignment c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr() + // Make sure a valid frame pointer is saved on the stack so that the + // frame pointer checks in adjustframe are happy, if they're enabled. + // This is not actually used for unwinding. + *(*uint64)(unsafe.Pointer(uintptr(sp - goarch.PtrSize))) = c.r29() // Set up PC and LR to pretend the function being signaled // calls targetPC at resumePC. c.set_lr(uint64(resumePC)) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 5eaceec6da14d5..55e97e77afa957 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -579,27 +579,23 @@ var ptrnames = []string{ // | args to callee | // +------------------+ <- frame->sp // -// (arm64) +// (arm) // +------------------+ // | args from caller | // +------------------+ <- frame->argp -// | | -// +------------------+ <- frame->fp (aka caller's sp) -// | return address | +// | caller's retaddr | // +------------------+ -// | caller's FP | (frame pointer always enabled: TODO) +// | caller's FP (*) | (*) on ARM64, if framepointer_enabled && varp > sp // +------------------+ <- frame->varp // | locals | // +------------------+ // | args to callee | // +------------------+ -// | | +// | return address | // +------------------+ <- frame->sp // // varp > sp means that the function has a frame; // varp == sp means frameless function. -// -// Alignment padding, if needed, will be between "locals" and "args to callee". type adjustinfo struct { old stack @@ -713,8 +709,7 @@ func adjustframe(frame *stkframe, adjinfo *adjustinfo) { } // Adjust saved frame pointer if there is one. - if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize || - goarch.ArchFamily == goarch.ARM64 && frame.argp-frame.varp == 3*goarch.PtrSize { + if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize { if stackDebug >= 3 { print(" saved bp\n") } @@ -728,7 +723,10 @@ func adjustframe(frame *stkframe, adjinfo *adjustinfo) { throw("bad frame pointer") } } - // This is the caller's frame pointer saved in the current frame. + // On AMD64, this is the caller's frame pointer saved in the current + // frame. + // On ARM64, this is the frame pointer of the caller's caller saved + // by the caller in its frame (one word below its SP). adjustpointer(adjinfo, unsafe.Pointer(frame.varp)) } diff --git a/src/runtime/testdata/testprog/badtraceback.go b/src/runtime/testdata/testprog/badtraceback.go index 36575f765db9a5..455118a54371d7 100644 --- a/src/runtime/testdata/testprog/badtraceback.go +++ b/src/runtime/testdata/testprog/badtraceback.go @@ -41,11 +41,6 @@ func badLR2(arg int) { if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" { lrOff = 32 // FIXED_FRAME or sys.MinFrameSize } - if runtime.GOARCH == "arm64" { - // skip 8 bytes at bottom of parent frame, then point - // to the 8 bytes of the saved PC at the top of the frame. - lrOff = 16 - } lrPtr := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&arg)) - lrOff)) *lrPtr = 0xbad diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 1c3e679a02bdaf..8882c306edb736 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -175,11 +175,6 @@ func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) { // Start in the caller's frame. if frame.pc == 0 { if usesLR { - // TODO: this isn't right on arm64. But also, this should - // ~never happen. Calling a nil function will panic - // when loading the PC out of the closure, not when - // branching to that PC. (Closures should always have - // valid PCs in their first word.) frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp)) frame.lr = 0 } else { @@ -374,11 +369,7 @@ func (u *unwinder) resolveInternal(innermost, isSyscall bool) { var lrPtr uintptr if usesLR { if innermost && frame.sp < frame.fp || frame.lr == 0 { - if GOARCH == "arm64" { - lrPtr = frame.fp - goarch.PtrSize - } else { - lrPtr = frame.sp - } + lrPtr = frame.sp frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr)) } } else { @@ -394,17 +385,24 @@ func (u *unwinder) resolveInternal(innermost, isSyscall bool) { // On x86, call instruction pushes return PC before entering new function. frame.varp -= goarch.PtrSize } - if GOARCH == "arm64" && frame.varp > frame.sp { - frame.varp -= goarch.PtrSize // LR have been saved, skip over it. - } // For architectures with frame pointers, if there's // a frame, then there's a saved frame pointer here. // // NOTE: This code is not as general as it looks. - // On x86 and arm64, the ABI is to save the frame pointer word at the + // On x86, the ABI is to save the frame pointer word at the // top of the stack frame, so we have to back down over it. - // No other architectures are framepointer-enabled at the moment. + // On arm64, the frame pointer should be at the bottom of + // the stack (with R29 (aka FP) = RSP), in which case we would + // not want to do the subtraction here. But we started out without + // any frame pointer, and when we wanted to add it, we didn't + // want to break all the assembly doing direct writes to 8(RSP) + // to set the first parameter to a called function. + // So we decided to write the FP link *below* the stack pointer + // (with R29 = RSP - 8 in Go functions). + // This is technically ABI-compatible but not standard. + // And it happens to end up mimicking the x86 layout. + // Other architectures may make different decisions. if frame.varp > frame.sp && framepointer_enabled { frame.varp -= goarch.PtrSize } @@ -564,7 +562,7 @@ func (u *unwinder) finishInternal() { gp := u.g.ptr() if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && u.frame.sp != gp.stktopsp { print("runtime: g", gp.goid, ": frame.sp=", hex(u.frame.sp), " top=", hex(gp.stktopsp), "\n") - print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "]\n") + print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "\n") throw("traceback did not unwind completely") } } diff --git a/test/nosplit.go b/test/nosplit.go index 1f943fa18c3f58..4b4c93b1d067c5 100644 --- a/test/nosplit.go +++ b/test/nosplit.go @@ -142,7 +142,7 @@ start 136 # (CallSize is 32 on ppc64, 8 on amd64 for frame pointer.) start 96 nosplit start 100 nosplit; REJECT ppc64 ppc64le -start 104 nosplit; REJECT ppc64 ppc64le +start 104 nosplit; REJECT ppc64 ppc64le arm64 start 108 nosplit; REJECT ppc64 ppc64le start 112 nosplit; REJECT ppc64 ppc64le arm64 start 116 nosplit; REJECT ppc64 ppc64le @@ -160,7 +160,7 @@ start 136 nosplit; REJECT # Because AMD64 uses frame pointer, it has 8 fewer bytes. start 96 nosplit call f; f 0 nosplit start 100 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le -start 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le +start 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le arm64 start 108 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le start 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 arm64 start 116 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 @@ -176,7 +176,7 @@ start 136 nosplit call f; f 0 nosplit; REJECT # Architectures differ in the same way as before. start 96 nosplit call f; f 0 call f start 100 nosplit call f; f 0 call f; REJECT ppc64 ppc64le -start 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 +start 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 arm64 start 108 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 start 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 arm64 start 116 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 @@ -189,7 +189,7 @@ start 136 nosplit call f; f 0 call f; REJECT # Indirect calls are assumed to be splitting functions. start 96 nosplit callind start 100 nosplit callind; REJECT ppc64 ppc64le -start 104 nosplit callind; REJECT ppc64 ppc64le amd64 +start 104 nosplit callind; REJECT ppc64 ppc64le amd64 arm64 start 108 nosplit callind; REJECT ppc64 ppc64le amd64 start 112 nosplit callind; REJECT ppc64 ppc64le amd64 arm64 start 116 nosplit callind; REJECT ppc64 ppc64le amd64 From f86ddb54b5b8e4cb30b8fe2f9f3a2c0c172e7c37 Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 19:21:56 -0400 Subject: [PATCH 075/152] cmd/go: refactor usage of `ForceUseModules` This commit refactors usage of the global variable `ForceUseModules` to the global LoaderState field of the same name. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/modload rf 'mv State.forceUseModules State.ForceUseModules' rf 'ex { ForceUseModules -> LoaderState.ForceUseModules }' for dir in load modcmd modget run toolchain work workcmd ; do cd ../${dir} rf 'ex { import "cmd/go/internal/modload"; modload.ForceUseModules -> modload.LoaderState.ForceUseModules }' done cd ../modload rf 'add State.initialized \ // ForceUseModules may be set to force modules to be enabled when\ // GO111MODULE=auto or to report an error when GO111MODULE=off.' rf 'rm ForceUseModules' Change-Id: Ibdecfd273ff672516c9eb86279e5dfc6cdecb2ea Reviewed-on: https://go-review.googlesource.com/c/go/+/698057 Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI --- src/cmd/go/internal/load/pkg.go | 2 +- src/cmd/go/internal/modcmd/download.go | 2 +- src/cmd/go/internal/modcmd/graph.go | 2 +- src/cmd/go/internal/modcmd/init.go | 2 +- src/cmd/go/internal/modcmd/tidy.go | 2 +- src/cmd/go/internal/modcmd/vendor.go | 2 +- src/cmd/go/internal/modcmd/verify.go | 2 +- src/cmd/go/internal/modcmd/why.go | 2 +- src/cmd/go/internal/modget/get.go | 2 +- src/cmd/go/internal/modload/init.go | 21 ++++++++++----------- src/cmd/go/internal/run/run.go | 2 +- src/cmd/go/internal/toolchain/select.go | 4 ++-- src/cmd/go/internal/work/build.go | 2 +- src/cmd/go/internal/workcmd/init.go | 2 +- src/cmd/go/internal/workcmd/sync.go | 2 +- src/cmd/go/internal/workcmd/use.go | 2 +- 16 files changed, 26 insertions(+), 27 deletions(-) diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 1f791546f90088..48b2e70d74b759 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -3348,7 +3348,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa // would cause it to be interpreted differently if it were the main module // (replace, exclude). func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { - if !modload.ForceUseModules { + if !modload.LoaderState.ForceUseModules { panic("modload.ForceUseModules must be true") } if modload.RootMode != modload.NoRoot { diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 2f4feae8f254b2..6d12d689f0ff51 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -112,7 +112,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { modload.InitWorkfile() // Check whether modules are enabled and whether we're in a module. - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.ExplicitWriteGoMod = true haveExplicitArgs := len(args) > 0 diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index 172c1dda5ce8fb..4abae33129ab6b 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -57,7 +57,7 @@ func runGraph(ctx context.Context, cmd *base.Command, args []string) { if len(args) > 0 { base.Fatalf("go: 'go mod graph' accepts no arguments") } - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NeedRoot goVersion := graphGo.String() diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go index 356a0569913edf..618c673bf86f24 100644 --- a/src/cmd/go/internal/modcmd/init.go +++ b/src/cmd/go/internal/modcmd/init.go @@ -43,6 +43,6 @@ func runInit(ctx context.Context, cmd *base.Command, args []string) { modPath = args[0] } - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.CreateModFile(ctx, modPath) // does all the hard work } diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index 2efa33a7c343dd..dde70c6d74181e 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -119,7 +119,7 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { // those packages. In order to make 'go test' reproducible for the packages // that are in 'all' but outside of the main module, we must explicitly // request that their test dependencies be included. - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NeedRoot goVersion := tidyGo.String() diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index e1a9081a95ff80..bd3d8d602e2382 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -77,7 +77,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) if len(args) != 0 { base.Fatalf("go: 'go mod vendor' accepts no arguments") } - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NeedRoot loadOpts := modload.PackageOpts{ diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index d07f730c5d0dcf..ecd25d3a40e610 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -50,7 +50,7 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) { // NOTE(rsc): Could take a module pattern. base.Fatalf("go: verify takes no arguments") } - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NeedRoot // Only verify up to GOMAXPROCS zips at once. diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index 198672d8064113..6c4bf8abab17ee 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -64,7 +64,7 @@ func init() { func runWhy(ctx context.Context, cmd *base.Command, args []string) { modload.InitWorkfile() - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NeedRoot modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 25dbf3972fd465..167f515be98a7d 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -298,7 +298,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: -insecure flag is no longer supported; use GOINSECURE instead") } - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true // Do not allow any updating of go.mod until we've applied // all the requested changes and checked that the result matches diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index d7d532ec942665..264c02ef6db100 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -41,10 +41,6 @@ var ( // RootMode determines whether a module root is needed. RootMode Root - // ForceUseModules may be set to force modules to be enabled when - // GO111MODULE=auto or to report an error when GO111MODULE=off. - ForceUseModules bool - allowMissingModuleImports bool // ExplicitWriteGoMod prevents LoadPackages, ListModules, and other functions @@ -96,7 +92,7 @@ func EnterWorkspace(ctx context.Context) (exit func(), err error) { // Reset the state to a clean state. oldstate := setState(State{}) - ForceUseModules = true + LoaderState.ForceUseModules = true // Load in workspace mode. InitWorkfile() @@ -406,7 +402,7 @@ func Reset() { func setState(s State) State { oldState := State{ initialized: LoaderState.initialized, - forceUseModules: ForceUseModules, + ForceUseModules: LoaderState.ForceUseModules, rootMode: RootMode, modRoots: modRoots, modulesEnabled: cfg.ModulesEnabled, @@ -414,7 +410,7 @@ func setState(s State) State { requirements: requirements, } LoaderState.initialized = s.initialized - ForceUseModules = s.forceUseModules + LoaderState.ForceUseModules = s.ForceUseModules RootMode = s.rootMode modRoots = s.modRoots cfg.ModulesEnabled = s.modulesEnabled @@ -429,8 +425,11 @@ func setState(s State) State { } type State struct { - initialized bool - forceUseModules bool + initialized bool + + // ForceUseModules may be set to force modules to be enabled when + // GO111MODULE=auto or to report an error when GO111MODULE=off. + ForceUseModules bool rootMode Root modRoots []string modulesEnabled bool @@ -465,11 +464,11 @@ func Init() { default: base.Fatalf("go: unknown environment setting GO111MODULE=%s", env) case "auto": - mustUseModules = ForceUseModules + mustUseModules = LoaderState.ForceUseModules case "on", "": mustUseModules = true case "off": - if ForceUseModules { + if LoaderState.ForceUseModules { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } mustUseModules = false diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index b81b1a007bd79d..05ea5eaa15eea4 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -76,7 +76,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { // This must be done before modload.Init, but we need to call work.BuildInit // before loading packages, since it affects package locations, e.g., // for -race and -msan. - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NoRoot modload.AllowMissingModuleImports() modload.Init() diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go index e8712613366e8c..8f55076c6282c2 100644 --- a/src/cmd/go/internal/toolchain/select.go +++ b/src/cmd/go/internal/toolchain/select.go @@ -353,7 +353,7 @@ func Exec(gotoolchain string) { // Set up modules without an explicit go.mod, to download distribution. modload.Reset() - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NoRoot modload.Init() @@ -692,7 +692,7 @@ func maybeSwitchForGoInstallVersion(minVers string) { // command lines if we add new flags in the future. // Set up modules without an explicit go.mod, to download go.mod. - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NoRoot modload.Init() defer modload.Reset() diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 6741b39f051cd6..21bbab1bf47463 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -859,7 +859,7 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag // // See golang.org/issue/40276 for details and rationale. func installOutsideModule(ctx context.Context, args []string) { - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.RootMode = modload.NoRoot modload.AllowMissingModuleImports() modload.Init() diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go index 02240b8189fab5..52185391c115b3 100644 --- a/src/cmd/go/internal/workcmd/init.go +++ b/src/cmd/go/internal/workcmd/init.go @@ -46,7 +46,7 @@ func init() { func runInit(ctx context.Context, cmd *base.Command, args []string) { modload.InitWorkfile() - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true gowork := modload.WorkFilePath() if gowork == "" { diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go index 719cf76c9bf12d..800dd15dd6f6d1 100644 --- a/src/cmd/go/internal/workcmd/sync.go +++ b/src/cmd/go/internal/workcmd/sync.go @@ -48,7 +48,7 @@ func init() { } func runSync(ctx context.Context, cmd *base.Command, args []string) { - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.InitWorkfile() if modload.WorkFilePath() == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go index afbe99d3a480db..2842163517892c 100644 --- a/src/cmd/go/internal/workcmd/use.go +++ b/src/cmd/go/internal/workcmd/use.go @@ -61,7 +61,7 @@ func init() { } func runUse(ctx context.Context, cmd *base.Command, args []string) { - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.InitWorkfile() gowork := modload.WorkFilePath() if gowork == "" { From 2e52060084ff170097347457525f0debde91aea9 Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 19:34:40 -0400 Subject: [PATCH 076/152] cmd/go: refactor usage of `RootMode` This commit refactors usage of the global variable `RootMode` to the global LoaderState variable of the same name. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/modload rf 'mv State.rootMode State.RootMode' for dir in load modcmd run tool toolchain work ; do cd ../${dir} rf 'ex { import "cmd/go/internal/modload"; modload.RootMode -> modload.LoaderState.RootMode }' done cd ../modload rf 'ex { RootMode -> LoaderState.RootMode }' rf 'add State.ForceUseModules \ // RootMode determines whether a module root is needed.' rf 'rm RootMode' Change-Id: Ib5e513ee570dfc3b01cc974fe32944e5e391fd82 Reviewed-on: https://go-review.googlesource.com/c/go/+/698058 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob --- src/cmd/go/internal/load/godebug.go | 2 +- src/cmd/go/internal/load/pkg.go | 2 +- src/cmd/go/internal/modcmd/graph.go | 2 +- src/cmd/go/internal/modcmd/tidy.go | 2 +- src/cmd/go/internal/modcmd/vendor.go | 2 +- src/cmd/go/internal/modcmd/verify.go | 2 +- src/cmd/go/internal/modcmd/why.go | 2 +- src/cmd/go/internal/modload/import_test.go | 6 ++-- src/cmd/go/internal/modload/init.go | 33 +++++++++++----------- src/cmd/go/internal/run/run.go | 2 +- src/cmd/go/internal/tool/tool.go | 2 +- src/cmd/go/internal/toolchain/select.go | 4 +-- src/cmd/go/internal/work/build.go | 2 +- 13 files changed, 31 insertions(+), 32 deletions(-) diff --git a/src/cmd/go/internal/load/godebug.go b/src/cmd/go/internal/load/godebug.go index 8ea8ffab1aea1f..ff184384567afe 100644 --- a/src/cmd/go/internal/load/godebug.go +++ b/src/cmd/go/internal/load/godebug.go @@ -50,7 +50,7 @@ func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []bu return "" } goVersion := modload.MainModules.GoVersion() - if modload.RootMode == modload.NoRoot && p.Module != nil { + if modload.LoaderState.RootMode == modload.NoRoot && p.Module != nil { // This is go install pkg@version or go run pkg@version. // Use the Go version from the package. // If there isn't one, then assume Go 1.20, diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 48b2e70d74b759..27a1fbfff836fa 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -3351,7 +3351,7 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args if !modload.LoaderState.ForceUseModules { panic("modload.ForceUseModules must be true") } - if modload.RootMode != modload.NoRoot { + if modload.LoaderState.RootMode != modload.NoRoot { panic("modload.RootMode must be NoRoot") } diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index 4abae33129ab6b..5f47260e188292 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -58,7 +58,7 @@ func runGraph(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: 'go mod graph' accepts no arguments") } modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.RootMode = modload.NeedRoot goVersion := graphGo.String() if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 { diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index dde70c6d74181e..0314dcef250d81 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -120,7 +120,7 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { // that are in 'all' but outside of the main module, we must explicitly // request that their test dependencies be included. modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.RootMode = modload.NeedRoot goVersion := tidyGo.String() if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 { diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index bd3d8d602e2382..dfea571c0e0317 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -78,7 +78,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) base.Fatalf("go: 'go mod vendor' accepts no arguments") } modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.RootMode = modload.NeedRoot loadOpts := modload.PackageOpts{ Tags: imports.AnyTags(), diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index ecd25d3a40e610..157c920c067321 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -51,7 +51,7 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: verify takes no arguments") } modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.RootMode = modload.NeedRoot // Only verify up to GOMAXPROCS zips at once. type token struct{} diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index 6c4bf8abab17ee..62a5387ed8841c 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -65,7 +65,7 @@ func init() { func runWhy(ctx context.Context, cmd *base.Command, args []string) { modload.InitWorkfile() modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.RootMode = modload.NeedRoot modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules loadOpts := modload.PackageOpts{ diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go index eb4f5d64d3a3c7..f6b8bb90992735 100644 --- a/src/cmd/go/internal/modload/import_test.go +++ b/src/cmd/go/internal/modload/import_test.go @@ -60,13 +60,13 @@ func TestQueryImport(t *testing.T) { testenv.MustHaveExecPath(t, "git") oldAllowMissingModuleImports := allowMissingModuleImports - oldRootMode := RootMode + oldRootMode := LoaderState.RootMode defer func() { allowMissingModuleImports = oldAllowMissingModuleImports - RootMode = oldRootMode + LoaderState.RootMode = oldRootMode }() allowMissingModuleImports = true - RootMode = NoRoot + LoaderState.RootMode = NoRoot ctx := context.Background() rs := LoadModFile(ctx) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 264c02ef6db100..58b16a6599dbe3 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -38,9 +38,6 @@ import ( // // TODO(#40775): See if these can be plumbed as explicit parameters. var ( - // RootMode determines whether a module root is needed. - RootMode Root - allowMissingModuleImports bool // ExplicitWriteGoMod prevents LoadPackages, ListModules, and other functions @@ -370,7 +367,7 @@ func InitWorkfile() { // It is exported mainly for Go toolchain switching, which must process // the go.work very early at startup. func FindGoWork(wd string) string { - if RootMode == NoRoot { + if LoaderState.RootMode == NoRoot { return "" } @@ -403,7 +400,7 @@ func setState(s State) State { oldState := State{ initialized: LoaderState.initialized, ForceUseModules: LoaderState.ForceUseModules, - rootMode: RootMode, + RootMode: LoaderState.RootMode, modRoots: modRoots, modulesEnabled: cfg.ModulesEnabled, mainModules: MainModules, @@ -411,7 +408,7 @@ func setState(s State) State { } LoaderState.initialized = s.initialized LoaderState.ForceUseModules = s.ForceUseModules - RootMode = s.rootMode + LoaderState.RootMode = s.RootMode modRoots = s.modRoots cfg.ModulesEnabled = s.modulesEnabled MainModules = s.mainModules @@ -430,13 +427,15 @@ type State struct { // ForceUseModules may be set to force modules to be enabled when // GO111MODULE=auto or to report an error when GO111MODULE=off. ForceUseModules bool - rootMode Root - modRoots []string - modulesEnabled bool - mainModules *MainModuleSet - requirements *Requirements - workFilePath string - modfetchState modfetch.State + + // RootMode determines whether a module root is needed. + RootMode Root + modRoots []string + modulesEnabled bool + mainModules *MainModuleSet + requirements *Requirements + workFilePath string + modfetchState modfetch.State } func NewState() *State { return &State{} } @@ -495,7 +494,7 @@ func Init() { if modRoots != nil { // modRoot set before Init was called ("go mod init" does this). // No need to search for go.mod. - } else if RootMode == NoRoot { + } else if LoaderState.RootMode == NoRoot { if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") { base.Fatalf("go: -modfile cannot be used with commands that ignore the current module") } @@ -510,7 +509,7 @@ func Init() { if cfg.ModFile != "" { base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.") } - if RootMode == NeedRoot { + if LoaderState.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { @@ -525,7 +524,7 @@ func Init() { // It's a bit of a peculiar thing to disallow but quite mysterious // when it happens. See golang.org/issue/26708. fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) - if RootMode == NeedRoot { + if LoaderState.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { @@ -547,7 +546,7 @@ func Init() { gopath = list[0] if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil { fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in $GOPATH %v\n", gopath) - if RootMode == NeedRoot { + if LoaderState.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index 05ea5eaa15eea4..d922dcdd66a551 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -77,7 +77,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { // before loading packages, since it affects package locations, e.g., // for -race and -msan. modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NoRoot + modload.LoaderState.RootMode = modload.NoRoot modload.AllowMissingModuleImports() modload.Init() } else { diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go index 120ef5339bede0..ef25d17b54d892 100644 --- a/src/cmd/go/internal/tool/tool.go +++ b/src/cmd/go/internal/tool/tool.go @@ -308,7 +308,7 @@ func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []s // Ignore go.mod and go.work: we don't need them, and we want to be able // to run the tool even if there's an issue with the module or workspace the // user happens to be in. - modload.RootMode = modload.NoRoot + modload.LoaderState.RootMode = modload.NoRoot runFunc := func(b *work.Builder, ctx context.Context, a *work.Action) error { cmdline := str.StringList(builtTool(a), a.Args) diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go index 8f55076c6282c2..4f46b19c12167b 100644 --- a/src/cmd/go/internal/toolchain/select.go +++ b/src/cmd/go/internal/toolchain/select.go @@ -354,7 +354,7 @@ func Exec(gotoolchain string) { // Set up modules without an explicit go.mod, to download distribution. modload.Reset() modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NoRoot + modload.LoaderState.RootMode = modload.NoRoot modload.Init() // Download and unpack toolchain module into module cache. @@ -693,7 +693,7 @@ func maybeSwitchForGoInstallVersion(minVers string) { // Set up modules without an explicit go.mod, to download go.mod. modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NoRoot + modload.LoaderState.RootMode = modload.NoRoot modload.Init() defer modload.Reset() diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 21bbab1bf47463..adc98f93138007 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -860,7 +860,7 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag // See golang.org/issue/40276 for details and rationale. func installOutsideModule(ctx context.Context, args []string) { modload.LoaderState.ForceUseModules = true - modload.RootMode = modload.NoRoot + modload.LoaderState.RootMode = modload.NoRoot modload.AllowMissingModuleImports() modload.Init() BuildInit() From 11d5484190f80823c9b6312fd40f6491e864111b Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Tue, 7 Oct 2025 16:10:19 +0000 Subject: [PATCH 077/152] runtime: fix self-deadlock on sbrk platforms The sbrk mem.go implementation doesn't enforce being called on the systemstack, but it can call back into itself if there's a stack growth. Because the sbrk implementation requires acquiring memlock, it can self-deadlock. For the most part the mem.go API is called on the system stack, but there are cases where we call sysAlloc on the regular Go stack. This is fine in general, except on sbrk platforms because of the aforementioned deadlock. This change, rather than adding a new invariant to mem.go, switches to the systemstack in the mem.go API implementation for sbrk platforms. Change-Id: Ie0f0ea80a8d7578cdeabc8252107e64a5e633856 Reviewed-on: https://go-review.googlesource.com/c/go/+/709775 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/runtime/mem_sbrk.go | 158 ++++++++++++++++++++++++++-------------- 1 file changed, 103 insertions(+), 55 deletions(-) diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go index 05f0fdb5d74ed6..5284bbd0009865 100644 --- a/src/runtime/mem_sbrk.go +++ b/src/runtime/mem_sbrk.go @@ -48,6 +48,16 @@ type memHdrPtr uintptr func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } +// memAlloc allocates n bytes from the brk reservation, or if it's full, +// the system. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memAlloc(n uintptr) unsafe.Pointer { if p := memAllocNoGrow(n); p != nil { return p @@ -55,6 +65,15 @@ func memAlloc(n uintptr) unsafe.Pointer { return sbrk(n) } +// memAllocNoGrow attempts to allocate n bytes from the existing brk. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memAllocNoGrow(n uintptr) unsafe.Pointer { n = memRound(n) var prevp *memHdr @@ -78,6 +97,15 @@ func memAllocNoGrow(n uintptr) unsafe.Pointer { return nil } +// memFree makes [ap, ap+n) available for reallocation by memAlloc. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memFree(ap unsafe.Pointer, n uintptr) { n = memRound(n) memclrNoHeapPointers(ap, n) @@ -122,6 +150,15 @@ func memFree(ap unsafe.Pointer, n uintptr) { } } +// memCheck checks invariants around free list management. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memCheck() { if !memDebug { return @@ -158,26 +195,31 @@ func initBloc() { } func sysAllocOS(n uintptr, _ string) unsafe.Pointer { - lock(&memlock) - p := memAlloc(n) - memCheck() - unlock(&memlock) - return p + var p uintptr + systemstack(func() { + lock(&memlock) + p = uintptr(memAlloc(n)) + memCheck() + unlock(&memlock) + }) + return unsafe.Pointer(p) } func sysFreeOS(v unsafe.Pointer, n uintptr) { - lock(&memlock) - if uintptr(v)+n == bloc { - // Address range being freed is at the end of memory, - // so record a new lower value for end of memory. - // Can't actually shrink address space because segment is shared. - memclrNoHeapPointers(v, n) - bloc -= n - } else { - memFree(v, n) - memCheck() - } - unlock(&memlock) + systemstack(func() { + lock(&memlock) + if uintptr(v)+n == bloc { + // Address range being freed is at the end of memory, + // so record a new lower value for end of memory. + // Can't actually shrink address space because segment is shared. + memclrNoHeapPointers(v, n) + bloc -= n + } else { + memFree(v, n) + memCheck() + } + unlock(&memlock) + }) } func sysUnusedOS(v unsafe.Pointer, n uintptr) { @@ -202,49 +244,55 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) { } func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { - lock(&memlock) - var p unsafe.Pointer - if uintptr(v) == bloc { - // Address hint is the current end of memory, - // so try to extend the address space. - p = sbrk(n) - } - if p == nil && v == nil { - p = memAlloc(n) - memCheck() - } - unlock(&memlock) - return p + var p uintptr + systemstack(func() { + lock(&memlock) + if uintptr(v) == bloc { + // Address hint is the current end of memory, + // so try to extend the address space. + p = uintptr(sbrk(n)) + } + if p == 0 && v == nil { + p = uintptr(memAlloc(n)) + memCheck() + } + unlock(&memlock) + }) + return unsafe.Pointer(p) } func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) { - lock(&memlock) - if p := memAllocNoGrow(size + align); p != nil { - // We can satisfy the reservation from the free list. - // Trim off the unaligned parts. - pAligned := alignUp(uintptr(p), align) - if startLen := pAligned - uintptr(p); startLen > 0 { - memFree(p, startLen) + var p uintptr + systemstack(func() { + lock(&memlock) + if base := memAllocNoGrow(size + align); base != nil { + // We can satisfy the reservation from the free list. + // Trim off the unaligned parts. + start := alignUp(uintptr(base), align) + if startLen := start - uintptr(base); startLen > 0 { + memFree(base, startLen) + } + end := start + size + if endLen := (uintptr(base) + size + align) - end; endLen > 0 { + memFree(unsafe.Pointer(end), endLen) + } + memCheck() + unlock(&memlock) + p = start + return } - end := pAligned + size - if endLen := (uintptr(p) + size + align) - end; endLen > 0 { - memFree(unsafe.Pointer(end), endLen) + + // Round up bloc to align, then allocate size. + p = alignUp(bloc, align) + r := sbrk(p + size - bloc) + if r == nil { + p, size = 0, 0 + } else if l := p - uintptr(r); l > 0 { + // Free the area we skipped over for alignment. + memFree(r, l) + memCheck() } - memCheck() unlock(&memlock) - return unsafe.Pointer(pAligned), size - } - - // Round up bloc to align, then allocate size. - p := alignUp(bloc, align) - r := sbrk(p + size - bloc) - if r == nil { - p, size = 0, 0 - } else if l := p - uintptr(r); l > 0 { - // Free the area we skipped over for alignment. - memFree(r, l) - memCheck() - } - unlock(&memlock) + }) return unsafe.Pointer(p), size } From 6f7926589d03180863aa05cbb55a9d9c63e76b99 Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 19:44:11 -0400 Subject: [PATCH 078/152] cmd/go: refactor usage of `modRoots` This commit refactors usage of the global variable `modRoots` to the global LoaderState field of the same name. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/modload rf 'ex { modRoots -> LoaderState.modRoots }' rf 'add State.RootMode \ // These are primarily used to initialize the MainModules, and should\ // be eventually superseded by them but are still used in cases where\ // the module roots are required but MainModules has not been\ // initialized yet. Set to the modRoots of the main modules.\ // modRoots != nil implies len(modRoots) > 0' rf 'rm modRoots' Change-Id: Ie9e1f3d468cfceee25efefaf945b10492318b079 Reviewed-on: https://go-review.googlesource.com/c/go/+/698059 Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI --- src/cmd/go/internal/modload/init.go | 61 ++++++++++++++-------------- src/cmd/go/internal/modload/load.go | 2 +- src/cmd/go/internal/modload/query.go | 2 +- 3 files changed, 32 insertions(+), 33 deletions(-) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 58b16a6599dbe3..dc0d78499b9cd0 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -53,14 +53,7 @@ var ( // Variables set in Init. var ( - - // These are primarily used to initialize the MainModules, and should be - // eventually superseded by them but are still used in cases where the module - // roots are required but MainModules hasn't been initialized yet. Set to - // the modRoots of the main modules. - // modRoots != nil implies len(modRoots) > 0 - modRoots []string - gopath string + gopath string ) // EnterModule resets MainModules and requirements to refer to just this one module. @@ -70,7 +63,7 @@ func EnterModule(ctx context.Context, enterModroot string) { workFilePath = "" // Force module mode modfetch.Reset() - modRoots = []string{enterModroot} + LoaderState.modRoots = []string{enterModroot} LoadModFile(ctx) } @@ -401,7 +394,7 @@ func setState(s State) State { initialized: LoaderState.initialized, ForceUseModules: LoaderState.ForceUseModules, RootMode: LoaderState.RootMode, - modRoots: modRoots, + modRoots: LoaderState.modRoots, modulesEnabled: cfg.ModulesEnabled, mainModules: MainModules, requirements: requirements, @@ -409,7 +402,7 @@ func setState(s State) State { LoaderState.initialized = s.initialized LoaderState.ForceUseModules = s.ForceUseModules LoaderState.RootMode = s.RootMode - modRoots = s.modRoots + LoaderState.modRoots = s.modRoots cfg.ModulesEnabled = s.modulesEnabled MainModules = s.mainModules requirements = s.requirements @@ -429,7 +422,13 @@ type State struct { ForceUseModules bool // RootMode determines whether a module root is needed. - RootMode Root + RootMode Root + + // These are primarily used to initialize the MainModules, and should + // be eventually superseded by them but are still used in cases where + // the module roots are required but MainModules has not been + // initialized yet. Set to the modRoots of the main modules. + // modRoots != nil implies len(modRoots) > 0 modRoots []string modulesEnabled bool mainModules *MainModuleSet @@ -491,14 +490,14 @@ func Init() { if os.Getenv("GCM_INTERACTIVE") == "" { os.Setenv("GCM_INTERACTIVE", "never") } - if modRoots != nil { + if LoaderState.modRoots != nil { // modRoot set before Init was called ("go mod init" does this). // No need to search for go.mod. } else if LoaderState.RootMode == NoRoot { if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") { base.Fatalf("go: -modfile cannot be used with commands that ignore the current module") } - modRoots = nil + LoaderState.modRoots = nil } else if workFilePath != "" { // We're in workspace mode, which implies module mode. if cfg.ModFile != "" { @@ -531,7 +530,7 @@ func Init() { return } } else { - modRoots = []string{modRoot} + LoaderState.modRoots = []string{modRoot} } } if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") { @@ -566,7 +565,7 @@ func Init() { // be called until the command is installed and flags are parsed. Instead of // calling Init and Enabled, the main package can call this function. func WillBeEnabled() bool { - if modRoots != nil || cfg.ModulesEnabled { + if LoaderState.modRoots != nil || cfg.ModulesEnabled { // Already enabled. return true } @@ -619,7 +618,7 @@ func FindGoMod(wd string) string { // (usually through MustModRoot). func Enabled() bool { Init() - return modRoots != nil || cfg.ModulesEnabled + return LoaderState.modRoots != nil || cfg.ModulesEnabled } func VendorDir() string { @@ -651,7 +650,7 @@ func inWorkspaceMode() bool { // does not require a main module. func HasModRoot() bool { Init() - return modRoots != nil + return LoaderState.modRoots != nil } // MustHaveModRoot checks that a main module or main modules are present, @@ -880,16 +879,16 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) var workFile *modfile.WorkFile if inWorkspaceMode() { var err error - workFile, modRoots, err = LoadWorkFile(workFilePath) + workFile, LoaderState.modRoots, err = LoadWorkFile(workFilePath) if err != nil { return nil, err } - for _, modRoot := range modRoots { + for _, modRoot := range LoaderState.modRoots { sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum" modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile) } modfetch.GoSumFile = workFilePath + ".sum" - } else if len(modRoots) == 0 { + } else if len(LoaderState.modRoots) == 0 { // We're in module mode, but not inside a module. // // Commands like 'go build', 'go run', 'go list' have no go.mod file to @@ -908,9 +907,9 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // // See golang.org/issue/32027. } else { - modfetch.GoSumFile = strings.TrimSuffix(modFilePath(modRoots[0]), ".mod") + ".sum" + modfetch.GoSumFile = strings.TrimSuffix(modFilePath(LoaderState.modRoots[0]), ".mod") + ".sum" } - if len(modRoots) == 0 { + if len(LoaderState.modRoots) == 0 { // TODO(#49228): Instead of creating a fake module with an empty modroot, // make MainModules.Len() == 0 mean that we're in module mode but not inside // any module. @@ -956,7 +955,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) var mainModules []module.Version var indices []*modFileIndex var errs []error - for _, modroot := range modRoots { + for _, modroot := range LoaderState.modRoots { gomod := modFilePath(modroot) var fixed bool data, f, err := ReadModFile(gomod, fixVersion(ctx, &fixed)) @@ -1017,7 +1016,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) return nil, errors.Join(errs...) } - MainModules = makeMainModules(mainModules, modRoots, modFiles, indices, workFile) + MainModules = makeMainModules(mainModules, LoaderState.modRoots, modFiles, indices, workFile) setDefaultBuildMod() // possibly enable automatic vendoring rs := requirementsFromModFiles(ctx, workFile, modFiles, opts) @@ -1120,7 +1119,7 @@ func CheckReservedModulePath(path string) error { // packages at multiple versions from the same module). func CreateModFile(ctx context.Context, modPath string) { modRoot := base.Cwd() - modRoots = []string{modRoot} + LoaderState.modRoots = []string{modRoot} Init() modFilePath := modFilePath(modRoot) if _, err := fsys.Stat(modFilePath); err == nil { @@ -1509,7 +1508,7 @@ func setDefaultBuildMod() { cfg.BuildMod = "readonly" return } - if modRoots == nil { + if LoaderState.modRoots == nil { if allowMissingModuleImports { cfg.BuildMod = "mod" } else { @@ -1518,7 +1517,7 @@ func setDefaultBuildMod() { return } - if len(modRoots) >= 1 { + if len(LoaderState.modRoots) >= 1 { var goVersion string var versionSource string if inWorkspaceMode() { @@ -1537,10 +1536,10 @@ func setDefaultBuildMod() { if workFilePath != "" { vendorDir = filepath.Join(filepath.Dir(workFilePath), "vendor") } else { - if len(modRoots) != 1 { - panic(fmt.Errorf("outside workspace mode, but have %v modRoots", modRoots)) + if len(LoaderState.modRoots) != 1 { + panic(fmt.Errorf("outside workspace mode, but have %v modRoots", LoaderState.modRoots)) } - vendorDir = filepath.Join(modRoots[0], "vendor") + vendorDir = filepath.Join(LoaderState.modRoots[0], "vendor") } if fi, err := fsys.Stat(vendorDir); err == nil && fi.IsDir() { if goVersion != "" { diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 8b2be3b300e9e1..7fba712f952e2a 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -271,7 +271,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma case m.IsLocal(): // Evaluate list of file system directories on first iteration. if m.Dirs == nil { - matchModRoots := modRoots + matchModRoots := LoaderState.modRoots if opts.MainModule != (module.Version{}) { matchModRoots = []string{MainModules.ModRoot(opts.MainModule)} } diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index c4cf55442ba69b..65934b0d69e517 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -716,7 +716,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin var mainModuleMatches []module.Version for _, mainModule := range MainModules.Versions() { - m := match(mainModule, modRoots, true) + m := match(mainModule, LoaderState.modRoots, true) if len(m.Pkgs) > 0 { if query != "upgrade" && query != "patch" { return nil, nil, &QueryMatchesPackagesInMainModuleError{ From 6e4007e8cffbb870e6b606307ab7308236ecefb9 Mon Sep 17 00:00:00 2001 From: Neal Patel Date: Thu, 11 Sep 2025 16:27:04 -0400 Subject: [PATCH 079/152] crypto/x509: mitigate DoS vector when intermediate certificate contains DSA public key An attacker could craft an intermediate X.509 certificate containing a DSA public key and can crash a remote host with an unauthenticated call to any endpoint that verifies the certificate chain. Thank you to Jakub Ciolek for reporting this issue. Fixes CVE-2025-58188 Fixes #75675 Change-Id: I2ecbb87b9b8268dbc55c8795891e596ab60f0088 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2780 Reviewed-by: Damien Neil Reviewed-by: Roland Shoemaker Reviewed-on: https://go-review.googlesource.com/c/go/+/709853 Reviewed-by: Carlos Amedee Auto-Submit: Michael Pratt LUCI-TryBot-Result: Go LUCI --- src/crypto/x509/verify.go | 5 +- src/crypto/x509/verify_test.go | 127 +++++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 1 deletion(-) diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 7cc0fb2e3e0385..755c1db96c1edb 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -927,7 +927,10 @@ func alreadyInChain(candidate *Certificate, chain []*Certificate) bool { if !bytes.Equal(candidate.RawSubject, cert.RawSubject) { continue } - if !candidate.PublicKey.(pubKeyEqual).Equal(cert.PublicKey) { + // We enforce the canonical encoding of SPKI (by only allowing the + // correct AI paremeter encodings in parseCertificate), so it's safe to + // directly compare the raw bytes. + if !bytes.Equal(candidate.RawSubjectPublicKeyInfo, cert.RawSubjectPublicKeyInfo) { continue } var certSAN *pkix.Extension diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 7991f49946d587..5595f99ea5e43a 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -6,6 +6,7 @@ package x509 import ( "crypto" + "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -3048,3 +3049,129 @@ func TestInvalidPolicyWithAnyKeyUsage(t *testing.T) { t.Fatalf("unexpected error, got %q, want %q", err, expectedErr) } } + +func TestCertificateChainSignedByECDSA(t *testing.T) { + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + root := &Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: "X"}, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + IsCA: true, + KeyUsage: KeyUsageCertSign | KeyUsageCRLSign, + BasicConstraintsValid: true, + } + caDER, err := CreateCertificate(rand.Reader, root, root, &caKey.PublicKey, caKey) + if err != nil { + t.Fatal(err) + } + root, err = ParseCertificate(caDER) + if err != nil { + t.Fatal(err) + } + + leafKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + leaf := &Certificate{ + SerialNumber: big.NewInt(42), + Subject: pkix.Name{CommonName: "leaf"}, + NotBefore: time.Now().Add(-10 * time.Minute), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: KeyUsageDigitalSignature, + ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + leafDER, err := CreateCertificate(rand.Reader, leaf, root, &leafKey.PublicKey, caKey) + if err != nil { + t.Fatal(err) + } + leaf, err = ParseCertificate(leafDER) + if err != nil { + t.Fatal(err) + } + + inter, err := ParseCertificate(dsaSelfSignedCNX(t)) + if err != nil { + t.Fatal(err) + } + + inters := NewCertPool() + inters.AddCert(root) + inters.AddCert(inter) + + wantErr := "certificate signed by unknown authority" + _, err = leaf.Verify(VerifyOptions{Intermediates: inters, Roots: NewCertPool()}) + if !strings.Contains(err.Error(), wantErr) { + t.Errorf("got %v, want %q", err, wantErr) + } +} + +// dsaSelfSignedCNX produces DER-encoded +// certificate with the properties: +// +// Subject=Issuer=CN=X +// DSA SPKI +// Matching inner/outer signature OIDs +// Dummy ECDSA signature +func dsaSelfSignedCNX(t *testing.T) []byte { + t.Helper() + var params dsa.Parameters + if err := dsa.GenerateParameters(¶ms, rand.Reader, dsa.L1024N160); err != nil { + t.Fatal(err) + } + + var dsaPriv dsa.PrivateKey + dsaPriv.Parameters = params + if err := dsa.GenerateKey(&dsaPriv, rand.Reader); err != nil { + t.Fatal(err) + } + dsaPub := &dsaPriv.PublicKey + + type dsaParams struct{ P, Q, G *big.Int } + paramDER, err := asn1.Marshal(dsaParams{dsaPub.P, dsaPub.Q, dsaPub.G}) + if err != nil { + t.Fatal(err) + } + yDER, err := asn1.Marshal(dsaPub.Y) + if err != nil { + t.Fatal(err) + } + + spki := publicKeyInfo{ + Algorithm: pkix.AlgorithmIdentifier{ + Algorithm: oidPublicKeyDSA, + Parameters: asn1.RawValue{FullBytes: paramDER}, + }, + PublicKey: asn1.BitString{Bytes: yDER, BitLength: 8 * len(yDER)}, + } + + rdn := pkix.Name{CommonName: "X"}.ToRDNSequence() + b, err := asn1.Marshal(rdn) + if err != nil { + t.Fatal(err) + } + rawName := asn1.RawValue{FullBytes: b} + + algoIdent := pkix.AlgorithmIdentifier{Algorithm: oidSignatureDSAWithSHA256} + tbs := tbsCertificate{ + Version: 0, + SerialNumber: big.NewInt(1002), + SignatureAlgorithm: algoIdent, + Issuer: rawName, + Validity: validity{NotBefore: time.Now().Add(-time.Hour), NotAfter: time.Now().Add(24 * time.Hour)}, + Subject: rawName, + PublicKey: spki, + } + c := certificate{ + TBSCertificate: tbs, + SignatureAlgorithm: algoIdent, + SignatureValue: asn1.BitString{Bytes: []byte{0}, BitLength: 8}, + } + dsaDER, err := asn1.Marshal(c) + if err != nil { + t.Fatal(err) + } + return dsaDER +} From 3fc4c79fdbb17b9b29ea9f8c29dd780df075d4c4 Mon Sep 17 00:00:00 2001 From: Neal Patel Date: Mon, 15 Sep 2025 16:31:22 -0400 Subject: [PATCH 080/152] crypto/x509: improve domain name verification Don't use domainToReverseLabels to check if domain names are valid, since it is not particularly performant, and can contribute to DoS vectors. Instead just iterate over the name and enforce the properties we care about. This also enforces that DNS names, both in SANs and name constraints, are valid. We previously allowed invalid SANs, because some intermediates had these weird names (see #23995), but there are currently no trusted intermediates that have this property, and since we target the web PKI, supporting this particular case is not a high priority. Thank you to Jakub Ciolek for reporting this issue. Fixes CVE-2025-58187 Fixes #75681 Change-Id: I6ebce847dcbe5fc63ef2f9a74f53f11c4c56d3d1 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2820 Reviewed-by: Damien Neil Reviewed-by: Roland Shoemaker Reviewed-on: https://go-review.googlesource.com/c/go/+/709854 Auto-Submit: Michael Pratt Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI --- src/crypto/x509/name_constraints_test.go | 75 ++--------------------- src/crypto/x509/parser.go | 77 ++++++++++++++---------- src/crypto/x509/parser_test.go | 43 +++++++++++++ src/crypto/x509/verify.go | 1 + 4 files changed, 96 insertions(+), 100 deletions(-) diff --git a/src/crypto/x509/name_constraints_test.go b/src/crypto/x509/name_constraints_test.go index a5851845164d10..831fcbc8d2eb82 100644 --- a/src/crypto/x509/name_constraints_test.go +++ b/src/crypto/x509/name_constraints_test.go @@ -1456,63 +1456,7 @@ var nameConstraintsTests = []nameConstraintsTest{ expectedError: "incompatible key usage", }, - // An invalid DNS SAN should be detected only at validation time so - // that we can process CA certificates in the wild that have invalid SANs. - // See https://github.com/golang/go/issues/23995 - - // #77: an invalid DNS or mail SAN will not be detected if name constraint - // checking is not triggered. - { - roots: make([]constraintsSpec, 1), - intermediates: [][]constraintsSpec{ - { - {}, - }, - }, - leaf: leafSpec{ - sans: []string{"dns:this is invalid", "email:this @ is invalid"}, - }, - }, - - // #78: an invalid DNS SAN will be detected if any name constraint checking - // is triggered. - { - roots: []constraintsSpec{ - { - bad: []string{"uri:"}, - }, - }, - intermediates: [][]constraintsSpec{ - { - {}, - }, - }, - leaf: leafSpec{ - sans: []string{"dns:this is invalid"}, - }, - expectedError: "cannot parse dnsName", - }, - - // #79: an invalid email SAN will be detected if any name constraint - // checking is triggered. - { - roots: []constraintsSpec{ - { - bad: []string{"uri:"}, - }, - }, - intermediates: [][]constraintsSpec{ - { - {}, - }, - }, - leaf: leafSpec{ - sans: []string{"email:this @ is invalid"}, - }, - expectedError: "cannot parse rfc822Name", - }, - - // #80: if several EKUs are requested, satisfying any of them is sufficient. + // #77: if several EKUs are requested, satisfying any of them is sufficient. { roots: make([]constraintsSpec, 1), intermediates: [][]constraintsSpec{ @@ -1527,7 +1471,7 @@ var nameConstraintsTests = []nameConstraintsTest{ requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth, ExtKeyUsageEmailProtection}, }, - // #81: EKUs that are not asserted in VerifyOpts are not required to be + // #78: EKUs that are not asserted in VerifyOpts are not required to be // nested. { roots: make([]constraintsSpec, 1), @@ -1546,7 +1490,7 @@ var nameConstraintsTests = []nameConstraintsTest{ }, }, - // #82: a certificate without SANs and CN is accepted in a constrained chain. + // #79: a certificate without SANs and CN is accepted in a constrained chain. { roots: []constraintsSpec{ { @@ -1563,7 +1507,7 @@ var nameConstraintsTests = []nameConstraintsTest{ }, }, - // #83: a certificate without SANs and with a CN that does not parse as a + // #80: a certificate without SANs and with a CN that does not parse as a // hostname is accepted in a constrained chain. { roots: []constraintsSpec{ @@ -1582,7 +1526,7 @@ var nameConstraintsTests = []nameConstraintsTest{ }, }, - // #84: a certificate with SANs and CN is accepted in a constrained chain. + // #81: a certificate with SANs and CN is accepted in a constrained chain. { roots: []constraintsSpec{ { @@ -1600,14 +1544,7 @@ var nameConstraintsTests = []nameConstraintsTest{ }, }, - // #85: .example.com is an invalid DNS name, it should not match the - // constraint example.com. - { - roots: []constraintsSpec{{ok: []string{"dns:example.com"}}}, - leaf: leafSpec{sans: []string{"dns:.example.com"}}, - expectedError: "cannot parse dnsName \".example.com\"", - }, - // #86: URIs with IPv6 addresses with zones and ports are rejected + // #82: URIs with IPv6 addresses with zones and ports are rejected { roots: []constraintsSpec{ { diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go index 4abcc1b7b590e2..9d6bfd6e95f949 100644 --- a/src/crypto/x509/parser.go +++ b/src/crypto/x509/parser.go @@ -413,10 +413,14 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string if err := isIA5String(email); err != nil { return errors.New("x509: SAN rfc822Name is malformed") } + parsed, ok := parseRFC2821Mailbox(email) + if !ok || (ok && !domainNameValid(parsed.domain, false)) { + return errors.New("x509: SAN rfc822Name is malformed") + } emailAddresses = append(emailAddresses, email) case nameTypeDNS: name := string(data) - if err := isIA5String(name); err != nil { + if err := isIA5String(name); err != nil || (err == nil && !domainNameValid(name, false)) { return errors.New("x509: SAN dNSName is malformed") } dnsNames = append(dnsNames, string(name)) @@ -426,14 +430,9 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string return errors.New("x509: SAN uniformResourceIdentifier is malformed") } uri, err := url.Parse(uriStr) - if err != nil { + if err != nil || (err == nil && uri.Host != "" && !domainNameValid(uri.Host, false)) { return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) } - if len(uri.Host) > 0 { - if _, ok := domainToReverseLabels(uri.Host); !ok { - return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) - } - } uris = append(uris, uri) case nameTypeIP: switch len(data) { @@ -598,15 +597,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) } - trimmedDomain := domain - if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { - // constraints can have a leading - // period to exclude the domain - // itself, but that's not valid in a - // normal domain name. - trimmedDomain = trimmedDomain[1:] - } - if _, ok := domainToReverseLabels(trimmedDomain); !ok { + if !domainNameValid(domain, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) } dnsNames = append(dnsNames, domain) @@ -647,12 +638,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) } } else { - // Otherwise it's a domain name. - domain := constraint - if len(domain) > 0 && domain[0] == '.' { - domain = domain[1:] - } - if _, ok := domainToReverseLabels(domain); !ok { + if !domainNameValid(constraint, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) } } @@ -668,15 +654,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) } - trimmedDomain := domain - if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { - // constraints can have a leading - // period to exclude the domain itself, - // but that's not valid in a normal - // domain name. - trimmedDomain = trimmedDomain[1:] - } - if _, ok := domainToReverseLabels(trimmedDomain); !ok { + if !domainNameValid(domain, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) } uriDomains = append(uriDomains, domain) @@ -1317,3 +1295,40 @@ func ParseRevocationList(der []byte) (*RevocationList, error) { return rl, nil } + +// domainNameValid does minimal domain name validity checking. In particular it +// enforces the following properties: +// - names cannot have the trailing period +// - names can only have a leading period if constraint is true +// - names must be <= 253 characters +// - names cannot have empty labels +// - names cannot labels that are longer than 63 characters +// +// Note that this does not enforce the LDH requirements for domain names. +func domainNameValid(s string, constraint bool) bool { + if len(s) == 0 && constraint { + return true + } + if len(s) == 0 || (!constraint && s[0] == '.') || s[len(s)-1] == '.' || len(s) > 253 { + return false + } + lastDot := -1 + if constraint && s[0] == '.' { + s = s[1:] + } + + for i := 0; i <= len(s); i++ { + if i == len(s) || s[i] == '.' { + labelLen := i + if lastDot >= 0 { + labelLen -= lastDot + 1 + } + if labelLen == 0 || labelLen > 63 { + return false + } + lastDot = i + } + } + + return true +} diff --git a/src/crypto/x509/parser_test.go b/src/crypto/x509/parser_test.go index 3b9d9aed826b01..1b553e362e48a0 100644 --- a/src/crypto/x509/parser_test.go +++ b/src/crypto/x509/parser_test.go @@ -8,6 +8,7 @@ import ( "encoding/asn1" "encoding/pem" "os" + "strings" "testing" cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" @@ -251,3 +252,45 @@ d5l1tRhScKu2NBgm74nYmJxJYgvuTA38wGhRrGU= } } } + +func TestDomainNameValid(t *testing.T) { + for _, tc := range []struct { + name string + dnsName string + constraint bool + valid bool + }{ + {"empty name, name", "", false, false}, + {"empty name, constraint", "", true, true}, + {"empty label, name", "a..a", false, false}, + {"empty label, constraint", "a..a", true, false}, + {"period, name", ".", false, false}, + {"period, constraint", ".", true, false}, // TODO(roland): not entirely clear if this is a valid constraint (require at least one label?) + {"valid, name", "a.b.c", false, true}, + {"valid, constraint", "a.b.c", true, true}, + {"leading period, name", ".a.b.c", false, false}, + {"leading period, constraint", ".a.b.c", true, true}, + {"trailing period, name", "a.", false, false}, + {"trailing period, constraint", "a.", true, false}, + {"bare label, name", "a", false, true}, + {"bare label, constraint", "a", true, true}, + {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, false}, + {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, false}, + {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, false}, + {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, false}, + {"64 char single label, name", strings.Repeat("a", 64), false, false}, + {"64 char single label, constraint", strings.Repeat("a", 64), true, false}, + {"63 char single label, name", strings.Repeat("a", 63), false, true}, + {"63 char single label, constraint", strings.Repeat("a", 63), true, true}, + {"64 char label, name", "a." + strings.Repeat("a", 64), false, false}, + {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, false}, + {"63 char label, name", "a." + strings.Repeat("a", 63), false, true}, + {"63 char label, constraint", "a." + strings.Repeat("a", 63), true, true}, + } { + t.Run(tc.name, func(t *testing.T) { + if tc.valid != domainNameValid(tc.dnsName, tc.constraint) { + t.Errorf("domainNameValid(%q, %t) = %v; want %v", tc.dnsName, tc.constraint, !tc.valid, tc.valid) + } + }) + } +} diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 755c1db96c1edb..058153fbe73461 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -391,6 +391,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { // domainToReverseLabels converts a textual domain name like foo.example.com to // the list of labels in reverse order, e.g. ["com", "example", "foo"]. func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + reverseLabels = make([]string, 0, strings.Count(domain, ".")+1) for len(domain) > 0 { if i := strings.LastIndexByte(domain, '.'); i == -1 { reverseLabels = append(reverseLabels, domain) From 9b9d02c5a015910ce57024788de2ff254c6cfca6 Mon Sep 17 00:00:00 2001 From: Nicholas Husin Date: Tue, 30 Sep 2025 14:02:38 -0400 Subject: [PATCH 081/152] net/http: add httpcookiemaxnum GODEBUG option to limit number of cookies parsed When handling HTTP headers, net/http does not currently limit the number of cookies that can be parsed. The only limitation that exists is for the size of the entire HTTP header, which is controlled by MaxHeaderBytes (defaults to 1 MB). Unfortunately, this allows a malicious actor to send HTTP headers which contain a massive amount of small cookies, such that as much cookies as possible can be fitted within the MaxHeaderBytes limitation. Internally, this causes us to allocate a massive number of Cookie struct. For example, a 1 MB HTTP header with cookies that repeats "a=;" will cause an allocation of ~66 MB in the heap. This can serve as a way for malicious actors to induce memory exhaustion. To fix this, we will now limit the number of cookies we are willing to parse to 3000 by default. This behavior can be changed by setting a new GODEBUG option: GODEBUG=httpcookiemaxnum. httpcookiemaxnum can be set to allow a higher or lower cookie limit. Setting it to 0 will also allow an infinite number of cookies to be parsed. Thanks to jub0bs for reporting this issue. For #75672 Fixes CVE-2025-58186 Change-Id: Ied58b3bc8acf5d11c880f881f36ecbf1d5d52622 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2720 Reviewed-by: Roland Shoemaker Reviewed-by: Damien Neil Reviewed-on: https://go-review.googlesource.com/c/go/+/709855 Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI Auto-Submit: Michael Pratt --- doc/godebug.md | 10 ++ src/internal/godebugs/table.go | 1 + src/net/http/cookie.go | 59 +++++++++- src/net/http/cookie_test.go | 206 ++++++++++++++++++++++----------- src/runtime/metrics/doc.go | 5 + 5 files changed, 206 insertions(+), 75 deletions(-) diff --git a/doc/godebug.md b/doc/godebug.md index aaa0f9dd55e570..c12ce5311d90d1 100644 --- a/doc/godebug.md +++ b/doc/godebug.md @@ -153,6 +153,16 @@ for example, see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables) and the [go command documentation](/cmd/go#hdr-Build_and_test_caching). +### Go 1.26 + +Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number +of cookies that net/http will accept when parsing HTTP headers. If the number of +cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing +will fail early. The default value is `httpcookiemaxnum=3000`. Setting +`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite +number of cookies. To avoid denial of service attacks, this setting and default +was backported to Go 1.25.2 and Go 1.24.8. + ### Go 1.25 Go 1.25 added a new `decoratemappings` setting that controls whether the Go diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index 2d008825459bb2..852305e8553aab 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -42,6 +42,7 @@ var All = []Info{ {Name: "http2client", Package: "net/http"}, {Name: "http2debug", Package: "net/http", Opaque: true}, {Name: "http2server", Package: "net/http"}, + {Name: "httpcookiemaxnum", Package: "net/http", Changed: 24, Old: "0"}, {Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpservecontentkeepheaders", Package: "net/http", Changed: 23, Old: "1"}, diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go index efe6cc3e77e5b5..f74bc1043c509e 100644 --- a/src/net/http/cookie.go +++ b/src/net/http/cookie.go @@ -7,6 +7,7 @@ package http import ( "errors" "fmt" + "internal/godebug" "log" "net" "net/http/internal/ascii" @@ -16,6 +17,8 @@ import ( "time" ) +var httpcookiemaxnum = godebug.New("httpcookiemaxnum") + // A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an // HTTP response or the Cookie header of an HTTP request. // @@ -58,16 +61,37 @@ const ( ) var ( - errBlankCookie = errors.New("http: blank cookie") - errEqualNotFoundInCookie = errors.New("http: '=' not found in cookie") - errInvalidCookieName = errors.New("http: invalid cookie name") - errInvalidCookieValue = errors.New("http: invalid cookie value") + errBlankCookie = errors.New("http: blank cookie") + errEqualNotFoundInCookie = errors.New("http: '=' not found in cookie") + errInvalidCookieName = errors.New("http: invalid cookie name") + errInvalidCookieValue = errors.New("http: invalid cookie value") + errCookieNumLimitExceeded = errors.New("http: number of cookies exceeded limit") ) +const defaultCookieMaxNum = 3000 + +func cookieNumWithinMax(cookieNum int) bool { + withinDefaultMax := cookieNum <= defaultCookieMaxNum + if httpcookiemaxnum.Value() == "" { + return withinDefaultMax + } + if customMax, err := strconv.Atoi(httpcookiemaxnum.Value()); err == nil { + withinCustomMax := customMax == 0 || cookieNum <= customMax + if withinDefaultMax != withinCustomMax { + httpcookiemaxnum.IncNonDefault() + } + return withinCustomMax + } + return withinDefaultMax +} + // ParseCookie parses a Cookie header value and returns all the cookies // which were set in it. Since the same cookie name can appear multiple times // the returned Values can contain more than one value for a given key. func ParseCookie(line string) ([]*Cookie, error) { + if !cookieNumWithinMax(strings.Count(line, ";") + 1) { + return nil, errCookieNumLimitExceeded + } parts := strings.Split(textproto.TrimString(line), ";") if len(parts) == 1 && parts[0] == "" { return nil, errBlankCookie @@ -197,11 +221,21 @@ func ParseSetCookie(line string) (*Cookie, error) { // readSetCookies parses all "Set-Cookie" values from // the header h and returns the successfully parsed Cookies. +// +// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum +// GODEBUG option is not explicitly turned off, this function will silently +// fail and return an empty slice. func readSetCookies(h Header) []*Cookie { cookieCount := len(h["Set-Cookie"]) if cookieCount == 0 { return []*Cookie{} } + // Cookie limit was unfortunately introduced at a later point in time. + // As such, we can only fail by returning an empty slice rather than + // explicit error. + if !cookieNumWithinMax(cookieCount) { + return []*Cookie{} + } cookies := make([]*Cookie, 0, cookieCount) for _, line := range h["Set-Cookie"] { if cookie, err := ParseSetCookie(line); err == nil { @@ -329,13 +363,28 @@ func (c *Cookie) Valid() error { // readCookies parses all "Cookie" values from the header h and // returns the successfully parsed Cookies. // -// if filter isn't empty, only cookies of that name are returned. +// If filter isn't empty, only cookies of that name are returned. +// +// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum +// GODEBUG option is not explicitly turned off, this function will silently +// fail and return an empty slice. func readCookies(h Header, filter string) []*Cookie { lines := h["Cookie"] if len(lines) == 0 { return []*Cookie{} } + // Cookie limit was unfortunately introduced at a later point in time. + // As such, we can only fail by returning an empty slice rather than + // explicit error. + cookieCount := 0 + for _, line := range lines { + cookieCount += strings.Count(line, ";") + 1 + } + if !cookieNumWithinMax(cookieCount) { + return []*Cookie{} + } + cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";")) for _, line := range lines { line = textproto.TrimString(line) diff --git a/src/net/http/cookie_test.go b/src/net/http/cookie_test.go index 8db4957b2cc37d..f452b4ec76830f 100644 --- a/src/net/http/cookie_test.go +++ b/src/net/http/cookie_test.go @@ -11,6 +11,7 @@ import ( "log" "os" "reflect" + "slices" "strings" "testing" "time" @@ -255,16 +256,17 @@ func TestAddCookie(t *testing.T) { } var readSetCookiesTests = []struct { - Header Header - Cookies []*Cookie + header Header + cookies []*Cookie + godebug string }{ { - Header{"Set-Cookie": {"Cookie-1=v$1"}}, - []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, + header: Header{"Set-Cookie": {"Cookie-1=v$1"}}, + cookies: []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, }, { - Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, + cookies: []*Cookie{{ Name: "NID", Value: "99=YsDT5i3E-CXax-", Path: "/", @@ -276,8 +278,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, + cookies: []*Cookie{{ Name: ".ASPXAUTH", Value: "7E3AA", Path: "/", @@ -288,8 +290,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, + cookies: []*Cookie{{ Name: "ASP.NET_SessionId", Value: "foo", Path: "/", @@ -298,8 +300,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitedefault=foo; SameSite"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitedefault=foo; SameSite"}}, + cookies: []*Cookie{{ Name: "samesitedefault", Value: "foo", SameSite: SameSiteDefaultMode, @@ -307,8 +309,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, + cookies: []*Cookie{{ Name: "samesiteinvalidisdefault", Value: "foo", SameSite: SameSiteDefaultMode, @@ -316,8 +318,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, + cookies: []*Cookie{{ Name: "samesitelax", Value: "foo", SameSite: SameSiteLaxMode, @@ -325,8 +327,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitestrict=foo; SameSite=Strict"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitestrict=foo; SameSite=Strict"}}, + cookies: []*Cookie{{ Name: "samesitestrict", Value: "foo", SameSite: SameSiteStrictMode, @@ -334,8 +336,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitenone=foo; SameSite=None"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitenone=foo; SameSite=None"}}, + cookies: []*Cookie{{ Name: "samesitenone", Value: "foo", SameSite: SameSiteNoneMode, @@ -345,47 +347,66 @@ var readSetCookiesTests = []struct { // Make sure we can properly read back the Set-Cookie headers we create // for values containing spaces or commas: { - Header{"Set-Cookie": {`special-1=a z`}}, - []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, + header: Header{"Set-Cookie": {`special-1=a z`}}, + cookies: []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, }, { - Header{"Set-Cookie": {`special-2=" z"`}}, - []*Cookie{{Name: "special-2", Value: " z", Quoted: true, Raw: `special-2=" z"`}}, + header: Header{"Set-Cookie": {`special-2=" z"`}}, + cookies: []*Cookie{{Name: "special-2", Value: " z", Quoted: true, Raw: `special-2=" z"`}}, }, { - Header{"Set-Cookie": {`special-3="a "`}}, - []*Cookie{{Name: "special-3", Value: "a ", Quoted: true, Raw: `special-3="a "`}}, + header: Header{"Set-Cookie": {`special-3="a "`}}, + cookies: []*Cookie{{Name: "special-3", Value: "a ", Quoted: true, Raw: `special-3="a "`}}, }, { - Header{"Set-Cookie": {`special-4=" "`}}, - []*Cookie{{Name: "special-4", Value: " ", Quoted: true, Raw: `special-4=" "`}}, + header: Header{"Set-Cookie": {`special-4=" "`}}, + cookies: []*Cookie{{Name: "special-4", Value: " ", Quoted: true, Raw: `special-4=" "`}}, }, { - Header{"Set-Cookie": {`special-5=a,z`}}, - []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, + header: Header{"Set-Cookie": {`special-5=a,z`}}, + cookies: []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, }, { - Header{"Set-Cookie": {`special-6=",z"`}}, - []*Cookie{{Name: "special-6", Value: ",z", Quoted: true, Raw: `special-6=",z"`}}, + header: Header{"Set-Cookie": {`special-6=",z"`}}, + cookies: []*Cookie{{Name: "special-6", Value: ",z", Quoted: true, Raw: `special-6=",z"`}}, }, { - Header{"Set-Cookie": {`special-7=a,`}}, - []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, + header: Header{"Set-Cookie": {`special-7=a,`}}, + cookies: []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, }, { - Header{"Set-Cookie": {`special-8=","`}}, - []*Cookie{{Name: "special-8", Value: ",", Quoted: true, Raw: `special-8=","`}}, + header: Header{"Set-Cookie": {`special-8=","`}}, + cookies: []*Cookie{{Name: "special-8", Value: ",", Quoted: true, Raw: `special-8=","`}}, }, // Make sure we can properly read back the Set-Cookie headers // for names containing spaces: { - Header{"Set-Cookie": {`special-9 =","`}}, - []*Cookie{{Name: "special-9", Value: ",", Quoted: true, Raw: `special-9 =","`}}, + header: Header{"Set-Cookie": {`special-9 =","`}}, + cookies: []*Cookie{{Name: "special-9", Value: ",", Quoted: true, Raw: `special-9 =","`}}, }, // Quoted values (issue #46443) { - Header{"Set-Cookie": {`cookie="quoted"`}}, - []*Cookie{{Name: "cookie", Value: "quoted", Quoted: true, Raw: `cookie="quoted"`}}, + header: Header{"Set-Cookie": {`cookie="quoted"`}}, + cookies: []*Cookie{{Name: "cookie", Value: "quoted", Quoted: true, Raw: `cookie="quoted"`}}, + }, + { + header: Header{"Set-Cookie": slices.Repeat([]string{"a="}, defaultCookieMaxNum+1)}, + cookies: []*Cookie{}, + }, + { + header: Header{"Set-Cookie": slices.Repeat([]string{"a="}, 10)}, + cookies: []*Cookie{}, + godebug: "httpcookiemaxnum=5", + }, + { + header: Header{"Set-Cookie": strings.Split(strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], ";")}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false, Raw: "a="}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + header: Header{"Set-Cookie": strings.Split(strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], ";")}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false, Raw: "a="}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), }, // TODO(bradfitz): users have reported seeing this in the @@ -405,79 +426,103 @@ func toJSON(v any) string { func TestReadSetCookies(t *testing.T) { for i, tt := range readSetCookiesTests { + t.Setenv("GODEBUG", tt.godebug) for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input - c := readSetCookies(tt.Header) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) + c := readSetCookies(tt.header) + if !reflect.DeepEqual(c, tt.cookies) { + t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.cookies)) } } } } var readCookiesTests = []struct { - Header Header - Filter string - Cookies []*Cookie + header Header + filter string + cookies []*Cookie + godebug string }{ { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1"}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "c2", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + filter: "c2", + cookies: []*Cookie{ {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1"}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "c2", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + filter: "c2", + cookies: []*Cookie{ {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, - "", - []*Cookie{ + header: Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1", Quoted: true}, {Name: "c2", Value: "v2", Quoted: true}, }, }, { - Header{"Cookie": {`Cookie-1="v$1"; c2=v2;`}}, - "", - []*Cookie{ + header: Header{"Cookie": {`Cookie-1="v$1"; c2=v2;`}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1", Quoted: true}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {``}}, - "", - []*Cookie{}, + header: Header{"Cookie": {``}}, + filter: "", + cookies: []*Cookie{}, + }, + // GODEBUG=httpcookiemaxnum should work regardless if all cookies are sent + // via one "Cookie" field, or multiple fields. + { + header: Header{"Cookie": {strings.Repeat(";a=", defaultCookieMaxNum+1)[1:]}}, + cookies: []*Cookie{}, + }, + { + header: Header{"Cookie": slices.Repeat([]string{"a="}, 10)}, + cookies: []*Cookie{}, + godebug: "httpcookiemaxnum=5", + }, + { + header: Header{"Cookie": {strings.Repeat(";a=", defaultCookieMaxNum+1)[1:]}}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + header: Header{"Cookie": slices.Repeat([]string{"a="}, defaultCookieMaxNum+1)}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), }, } func TestReadCookies(t *testing.T) { for i, tt := range readCookiesTests { + t.Setenv("GODEBUG", tt.godebug) for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input - c := readCookies(tt.Header, tt.Filter) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies)) + c := readCookies(tt.header, tt.filter) + if !reflect.DeepEqual(c, tt.cookies) { + t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.cookies)) } } } @@ -690,6 +735,7 @@ func TestParseCookie(t *testing.T) { line string cookies []*Cookie err error + godebug string }{ { line: "Cookie-1=v$1", @@ -723,8 +769,28 @@ func TestParseCookie(t *testing.T) { line: "k1=\\", err: errInvalidCookieValue, }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + err: errCookieNumLimitExceeded, + }, + { + line: strings.Repeat(";a=", 10)[1:], + err: errCookieNumLimitExceeded, + godebug: "httpcookiemaxnum=5", + }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), + }, } for i, tt := range tests { + t.Setenv("GODEBUG", tt.godebug) gotCookies, gotErr := ParseCookie(tt.line) if !errors.Is(gotErr, tt.err) { t.Errorf("#%d ParseCookie got error %v, want error %v", i, gotErr, tt.err) diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go index e40ce25ff9d1bc..05646132ce4e69 100644 --- a/src/runtime/metrics/doc.go +++ b/src/runtime/metrics/doc.go @@ -309,6 +309,11 @@ Below is the full list of supported metrics, ordered lexicographically. The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. + /godebug/non-default-behavior/httpcookiemaxnum:events + The number of non-default behaviors executed by the net/http + package due to a non-default GODEBUG=httpcookiemaxnum=... + setting. + /godebug/non-default-behavior/httplaxcontentlength:events The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=httplaxcontentlength=... From 8709a41d5ef7321f486a1857f189c3fee20e8edd Mon Sep 17 00:00:00 2001 From: Nicholas Husin Date: Wed, 3 Sep 2025 09:30:56 -0400 Subject: [PATCH 082/152] encoding/asn1: prevent memory exhaustion when parsing using internal/saferio Within parseSequenceOf, reflect.MakeSlice is being used to pre-allocate a slice that is needed in order to fully validate the given DER payload. The size of the slice allocated are also multiple times larger than the input DER: - When using asn1.Unmarshal directly, the allocated slice is ~28x larger. - When passing in DER using x509.ParseCertificateRequest, the allocated slice is ~48x larger. - When passing in DER using ocsp.ParseResponse, the allocated slice is ~137x larger. As a result, a malicious actor can craft a big empty DER payload, resulting in an unnecessary large allocation of memories. This can be a way to cause memory exhaustion. To prevent this, we now use SliceCapWithSize within internal/saferio to enforce a memory allocation cap. Thanks to Jakub Ciolek for reporting this issue. For #75671 Fixes CVE-2025-58185 Change-Id: Id50e76187eda43f594be75e516b9ca1d2ae6f428 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2700 Reviewed-by: Roland Shoemaker Reviewed-by: Damien Neil Reviewed-on: https://go-review.googlesource.com/c/go/+/709856 Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI Auto-Submit: Michael Pratt --- src/encoding/asn1/asn1.go | 10 ++++++++- src/encoding/asn1/asn1_test.go | 38 ++++++++++++++++++++++++++++++++++ src/go/build/deps_test.go | 2 +- 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go index 0b64f06d368b53..f4be515b98ef1c 100644 --- a/src/encoding/asn1/asn1.go +++ b/src/encoding/asn1/asn1.go @@ -22,6 +22,7 @@ package asn1 import ( "errors" "fmt" + "internal/saferio" "math" "math/big" "reflect" @@ -666,10 +667,17 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type offset += t.length numElements++ } - ret = reflect.MakeSlice(sliceType, numElements, numElements) + elemSize := uint64(elemType.Size()) + safeCap := saferio.SliceCapWithSize(elemSize, uint64(numElements)) + if safeCap < 0 { + err = SyntaxError{fmt.Sprintf("%s slice too big: %d elements of %d bytes", elemType.Kind(), numElements, elemSize)} + return + } + ret = reflect.MakeSlice(sliceType, 0, safeCap) params := fieldParameters{} offset := 0 for i := 0; i < numElements; i++ { + ret = reflect.Append(ret, reflect.Zero(elemType)) offset, err = parseField(ret.Index(i), bytes, offset, params) if err != nil { return diff --git a/src/encoding/asn1/asn1_test.go b/src/encoding/asn1/asn1_test.go index 0597740bd5e5ec..41cc0ba50ec304 100644 --- a/src/encoding/asn1/asn1_test.go +++ b/src/encoding/asn1/asn1_test.go @@ -7,10 +7,12 @@ package asn1 import ( "bytes" "encoding/hex" + "errors" "fmt" "math" "math/big" "reflect" + "runtime" "strings" "testing" "time" @@ -1216,3 +1218,39 @@ func TestImplicitTypeRoundtrip(t *testing.T) { t.Fatalf("Unexpected diff after roundtripping struct\na: %#v\nb: %#v", a, b) } } + +func TestParsingMemoryConsumption(t *testing.T) { + // Craft a syntatically valid, but empty, ~10 MB DER bomb. A successful + // unmarshal of this bomb should yield ~280 MB. However, the parsing should + // fail due to the empty content; and, in such cases, we want to make sure + // that we do not unnecessarily allocate memories. + derBomb := make([]byte, 10_000_000) + for i := range derBomb { + derBomb[i] = 0x30 + } + derBomb = append([]byte{0x30, 0x83, 0x98, 0x96, 0x80}, derBomb...) + + var m runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&m) + memBefore := m.TotalAlloc + + var out []struct { + Id []int + Critical bool `asn1:"optional"` + Value []byte + } + _, err := Unmarshal(derBomb, &out) + if !errors.As(err, &SyntaxError{}) { + t.Fatalf("Incorrect error result: want (%v), but got (%v) instead", &SyntaxError{}, err) + } + + runtime.ReadMemStats(&m) + memDiff := m.TotalAlloc - memBefore + + // Ensure that the memory allocated does not exceed 10<<21 (~20 MB) when + // the parsing fails. + if memDiff > 10<<21 { + t.Errorf("Too much memory allocated while parsing DER: %v MiB", memDiff/1024/1024) + } +} diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index c76b254b23ffc8..d6a18c19fab024 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -564,7 +564,7 @@ var depsRules = ` # CRYPTO-MATH is crypto that exposes math/big APIs - no cgo, net; fmt now ok. - CRYPTO, FMT, math/big + CRYPTO, FMT, math/big, internal/saferio < crypto/internal/boring/bbig < crypto/internal/fips140cache < crypto/rand From 3ee761739b0cbb074f5a6e8b28b491664ec1414a Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Mon, 6 Oct 2025 17:28:37 -0400 Subject: [PATCH 083/152] runtime: free spanQueue on P destroy Span queues must be empty when destroying a P since we are outside of the mark phase. But we don't actually free them, so they simply sit around using memory. More importantly, they are still in work.spanSPMCs.all, so freeDeadSpanSPMCs must continue traversing past them until the end of time. Prior to CL 709575, keeping them in work.spanSPMCs.all allowed programs with low GOMAXPROCS to continue triggering the bug if they ever had high GOMAXPROCS in the past. The spanSPMCs list is singly-linked, so it is not efficient to remove a random element from the middle. Instead, we simply mark it as dead to all freeDeadSpanSPMCs to free it when it scans the full list. For #75771. Change-Id: I6a6a636cfa22a4bdef0c273d083c91553e923fe5 Reviewed-on: https://go-review.googlesource.com/c/go/+/709656 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek --- src/runtime/mgcmark_greenteagc.go | 34 +++++++++++++++++++++++++++++ src/runtime/mgcmark_nogreenteagc.go | 3 +++ src/runtime/proc.go | 2 ++ 3 files changed, 39 insertions(+) diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index 7f8d60349ffb67..6ebd7ced81b901 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -618,6 +618,40 @@ func (q *spanQueue) refill(r *spanSPMC) objptr { return q.tryGetFast() } +// destroy frees all chains in an empty spanQueue. +// +// Preconditions: +// - World is stopped. +// - GC is outside of the mark phase. +// - (Therefore) the queue is empty. +func (q *spanQueue) destroy() { + assertWorldStopped() + if gcphase != _GCoff { + throw("spanQueue.destroy during the mark phase") + } + if !q.empty() { + throw("spanQueue.destroy on non-empty queue") + } + + lock(&work.spanSPMCs.lock) + + // Mark each ring as dead. The sweeper will actually free them. + // + // N.B., we could free directly here, but work.spanSPMCs.all is a + // singly-linked list, so we'd need to walk the entire list to find the + // previous node. If the list becomes doubly-linked, we can free + // directly. + for r := (*spanSPMC)(q.chain.tail.Load()); r != nil; r = (*spanSPMC)(r.prev.Load()) { + r.dead.Store(true) + } + + q.chain.head = nil + q.chain.tail.Store(nil) + q.putsSinceDrain = 0 + + unlock(&work.spanSPMCs.lock) +} + // spanSPMC is a ring buffer of objptrs that represent spans. // Accessed without a lock. // diff --git a/src/runtime/mgcmark_nogreenteagc.go b/src/runtime/mgcmark_nogreenteagc.go index 883c3451abec6b..024565ef3e250d 100644 --- a/src/runtime/mgcmark_nogreenteagc.go +++ b/src/runtime/mgcmark_nogreenteagc.go @@ -63,6 +63,9 @@ func (q *spanQueue) empty() bool { return true } +func (q *spanQueue) destroy() { +} + type spanSPMC struct { _ sys.NotInHeap } diff --git a/src/runtime/proc.go b/src/runtime/proc.go index d36895b046c8c6..fadd9a59639f87 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -5824,6 +5824,8 @@ func (pp *p) destroy() { println("runtime: p id", pp.id, "destroyed during GC phase", phase) throw("P destroyed while GC is running") } + // We should free the queues though. + pp.gcw.spanq.destroy() clear(pp.sudogbuf[:]) pp.sudogcache = pp.sudogbuf[:0] From 7dd54e1fd7f3a25fccbb5c6ab7066e2baad23e66 Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Mon, 6 Oct 2025 17:55:06 -0400 Subject: [PATCH 084/152] runtime: make work.spanSPMCs.all doubly-linked Making this a doubly-linked list allows spanQueue.destroy to immediately remove and free rings rather than simply marking them as dead and waiting for the sweeper to deal with them. For #75771. Change-Id: I6a6a636c0fb6be08ee967cb6d8f0577511a33c13 Reviewed-on: https://go-review.googlesource.com/c/go/+/709657 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Knyszek --- src/runtime/mgcmark_greenteagc.go | 61 +++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index 6ebd7ced81b901..7b78611cf7b6fe 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -635,14 +635,22 @@ func (q *spanQueue) destroy() { lock(&work.spanSPMCs.lock) - // Mark each ring as dead. The sweeper will actually free them. - // - // N.B., we could free directly here, but work.spanSPMCs.all is a - // singly-linked list, so we'd need to walk the entire list to find the - // previous node. If the list becomes doubly-linked, we can free - // directly. + // Remove and free each ring. for r := (*spanSPMC)(q.chain.tail.Load()); r != nil; r = (*spanSPMC)(r.prev.Load()) { - r.dead.Store(true) + prev := r.allprev + next := r.allnext + if prev != nil { + prev.allnext = next + } + if next != nil { + next.allprev = prev + } + if work.spanSPMCs.all == r { + work.spanSPMCs.all = next + } + + r.deinit() + mheap_.spanSPMCAlloc.free(unsafe.Pointer(r)) } q.chain.head = nil @@ -685,6 +693,11 @@ type spanSPMC struct { // work.spanSPMCs.lock. allnext *spanSPMC + // allprev is the link to the previous spanSPMC on the work.spanSPMCs + // list. This is used to find and free dead spanSPMCs. Protected by + // work.spanSPMCs.lock. + allprev *spanSPMC + // dead indicates whether the spanSPMC is no longer in use. // Protected by the CAS to the prev field of the spanSPMC pointing // to this spanSPMC. That is, whoever wins that CAS takes ownership @@ -711,7 +724,11 @@ type spanSPMC struct { func newSpanSPMC(cap uint32) *spanSPMC { lock(&work.spanSPMCs.lock) r := (*spanSPMC)(mheap_.spanSPMCAlloc.alloc()) - r.allnext = work.spanSPMCs.all + next := work.spanSPMCs.all + r.allnext = next + if next != nil { + next.allprev = r + } work.spanSPMCs.all = r unlock(&work.spanSPMCs.lock) @@ -748,6 +765,8 @@ func (r *spanSPMC) deinit() { r.head.Store(0) r.tail.Store(0) r.cap = 0 + r.allnext = nil + r.allprev = nil } // slot returns a pointer to slot i%r.cap. @@ -780,22 +799,26 @@ func freeDeadSpanSPMCs() { unlock(&work.spanSPMCs.lock) return } - rp := &work.spanSPMCs.all - for { - r := *rp - if r == nil { - break - } + r := work.spanSPMCs.all + for r != nil { + next := r.allnext if r.dead.Load() { // It's dead. Deinitialize and free it. - *rp = r.allnext + prev := r.allprev + if prev != nil { + prev.allnext = next + } + if next != nil { + next.allprev = prev + } + if work.spanSPMCs.all == r { + work.spanSPMCs.all = next + } + r.deinit() mheap_.spanSPMCAlloc.free(unsafe.Pointer(r)) - } else { - // Still alive, likely in some P's chain. - // Skip it. - rp = &r.allnext } + r = next } unlock(&work.spanSPMCs.lock) } From f6f4e8b3ef21299db1ea3a343c3e55e91365a7fd Mon Sep 17 00:00:00 2001 From: Ethan Lee Date: Fri, 29 Aug 2025 17:35:55 +0000 Subject: [PATCH 085/152] net/url: enforce stricter parsing of bracketed IPv6 hostnames - Previously, url.Parse did not enforce validation of hostnames within square brackets. - RFC 3986 stipulates that only IPv6 hostnames can be embedded within square brackets in a URL. - Now, the parsing logic should strictly enforce that only IPv6 hostnames can be resolved when in square brackets. IPv4, IPv4-mapped addresses and other input will be rejected. - Update url_test to add test cases that cover the above scenarios. Thanks to Enze Wang, Jingcheng Yang and Zehui Miao of Tsinghua University for reporting this issue. Fixes CVE-2025-47912 Fixes #75678 Change-Id: Iaa41432bf0ee86de95a39a03adae5729e4deb46c Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2680 Reviewed-by: Damien Neil Reviewed-by: Roland Shoemaker Reviewed-on: https://go-review.googlesource.com/c/go/+/709857 TryBot-Bypass: Michael Pratt Reviewed-by: Carlos Amedee Auto-Submit: Michael Pratt --- src/go/build/deps_test.go | 10 ++++++---- src/net/url/url.go | 42 +++++++++++++++++++++++++++++---------- src/net/url/url_test.go | 39 ++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 14 deletions(-) diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index d6a18c19fab024..8966254d0d9110 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -237,7 +237,6 @@ var depsRules = ` internal/types/errors, mime/quotedprintable, net/internal/socktest, - net/url, runtime/trace, text/scanner, text/tabwriter; @@ -300,6 +299,12 @@ var depsRules = ` FMT < text/template/parse; + internal/bytealg, internal/itoa, math/bits, slices, strconv, unique + < net/netip; + + FMT, net/netip + < net/url; + net/url, text/template/parse < text/template < internal/lazytemplate; @@ -414,9 +419,6 @@ var depsRules = ` < golang.org/x/net/dns/dnsmessage, golang.org/x/net/lif; - internal/bytealg, internal/itoa, math/bits, slices, strconv, unique - < net/netip; - os, net/netip < internal/routebsd; diff --git a/src/net/url/url.go b/src/net/url/url.go index 015c5b2751974a..292bc6bb12187a 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -16,6 +16,7 @@ import ( "errors" "fmt" "maps" + "net/netip" "path" "slices" "strconv" @@ -642,40 +643,61 @@ func parseAuthority(authority string) (user *Userinfo, host string, err error) { // parseHost parses host as an authority without user // information. That is, as host[:port]. func parseHost(host string) (string, error) { - if strings.HasPrefix(host, "[") { + if openBracketIdx := strings.LastIndex(host, "["); openBracketIdx != -1 { // Parse an IP-Literal in RFC 3986 and RFC 6874. // E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80". - i := strings.LastIndex(host, "]") - if i < 0 { + closeBracketIdx := strings.LastIndex(host, "]") + if closeBracketIdx < 0 { return "", errors.New("missing ']' in host") } - colonPort := host[i+1:] + + colonPort := host[closeBracketIdx+1:] if !validOptionalPort(colonPort) { return "", fmt.Errorf("invalid port %q after host", colonPort) } + unescapedColonPort, err := unescape(colonPort, encodeHost) + if err != nil { + return "", err + } + hostname := host[openBracketIdx+1 : closeBracketIdx] + var unescapedHostname string // RFC 6874 defines that %25 (%-encoded percent) introduces // the zone identifier, and the zone identifier can use basically // any %-encoding it likes. That's different from the host, which // can only %-encode non-ASCII bytes. // We do impose some restrictions on the zone, to avoid stupidity // like newlines. - zone := strings.Index(host[:i], "%25") - if zone >= 0 { - host1, err := unescape(host[:zone], encodeHost) + zoneIdx := strings.Index(hostname, "%25") + if zoneIdx >= 0 { + hostPart, err := unescape(hostname[:zoneIdx], encodeHost) if err != nil { return "", err } - host2, err := unescape(host[zone:i], encodeZone) + zonePart, err := unescape(hostname[zoneIdx:], encodeZone) if err != nil { return "", err } - host3, err := unescape(host[i:], encodeHost) + unescapedHostname = hostPart + zonePart + } else { + var err error + unescapedHostname, err = unescape(hostname, encodeHost) if err != nil { return "", err } - return host1 + host2 + host3, nil } + + // Per RFC 3986, only a host identified by a valid + // IPv6 address can be enclosed by square brackets. + // This excludes any IPv4 or IPv4-mapped addresses. + addr, err := netip.ParseAddr(unescapedHostname) + if err != nil { + return "", fmt.Errorf("invalid host: %w", err) + } + if addr.Is4() || addr.Is4In6() { + return "", errors.New("invalid IPv6 host") + } + return "[" + unescapedHostname + "]" + unescapedColonPort, nil } else if i := strings.LastIndex(host, ":"); i != -1 { colonPort := host[i:] if !validOptionalPort(colonPort) { diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index 16e08b63c6d098..32065583f27dd7 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -383,6 +383,16 @@ var urltests = []URLTest{ }, "", }, + // valid IPv6 host with port and path + { + "https://[2001:db8::1]:8443/test/path", + &URL{ + Scheme: "https", + Host: "[2001:db8::1]:8443", + Path: "/test/path", + }, + "", + }, // host subcomponent; IPv6 address with zone identifier in RFC 6874 { "http://[fe80::1%25en0]/", // alphanum zone identifier @@ -707,6 +717,24 @@ var parseRequestURLTests = []struct { // RFC 6874. {"http://[fe80::1%en0]/", false}, {"http://[fe80::1%en0]:8080/", false}, + + // Tests exercising RFC 3986 compliance + {"https://[1:2:3:4:5:6:7:8]", true}, // full IPv6 address + {"https://[2001:db8::a:b:c:d]", true}, // compressed IPv6 address + {"https://[fe80::1%25eth0]", true}, // link-local address with zone ID (interface name) + {"https://[fe80::abc:def%254]", true}, // link-local address with zone ID (interface index) + {"https://[2001:db8::1]/path", true}, // compressed IPv6 address with path + {"https://[fe80::1%25eth0]/path?query=1", true}, // link-local with zone, path, and query + + {"https://[::ffff:192.0.2.1]", false}, + {"https://[:1] ", false}, + {"https://[1:2:3:4:5:6:7:8:9]", false}, + {"https://[1::1::1]", false}, + {"https://[1:2:3:]", false}, + {"https://[ffff::127.0.0.4000]", false}, + {"https://[0:0::test.com]:80", false}, + {"https://[2001:db8::test.com]", false}, + {"https://[test.com]", false}, } func TestParseRequestURI(t *testing.T) { @@ -1643,6 +1671,17 @@ func TestParseErrors(t *testing.T) { {"cache_object:foo", true}, {"cache_object:foo/bar", true}, {"cache_object/:foo/bar", false}, + + {"http://[192.168.0.1]/", true}, // IPv4 in brackets + {"http://[192.168.0.1]:8080/", true}, // IPv4 in brackets with port + {"http://[::ffff:192.168.0.1]/", true}, // IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1]:8080/", true}, // IPv4-mapped IPv6 in brackets with port + {"http://[::ffff:c0a8:1]/", true}, // IPv4-mapped IPv6 in brackets (hex) + {"http://[not-an-ip]/", true}, // invalid IP string in brackets + {"http://[fe80::1%foo]/", true}, // invalid zone format in brackets + {"http://[fe80::1", true}, // missing closing bracket + {"http://fe80::1]/", true}, // missing opening bracket + {"http://[test.com]/", true}, // domain name in brackets } for _, tt := range tests { u, err := Parse(tt.in) From 5ce8cd16f3859ec5ac4106ad8ec15d6236f4501b Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Tue, 30 Sep 2025 11:16:56 -0700 Subject: [PATCH 086/152] encoding/pem: make Decode complexity linear Because Decode scanned the input first for the first BEGIN line, and then the first END line, the complexity of Decode is quadratic. If the input contained a large number of BEGINs and then a single END right at the end of the input, we would find the first BEGIN, and then scan the entire input for the END, and fail to parse the block, so move onto the next BEGIN, scan the entire input for the END, etc. Instead, look for the first END in the input, and then the first BEGIN that precedes the found END. We then process the bytes between the BEGIN and END, and move onto the bytes after the END for further processing. This gives us linear complexity. Fixes CVE-2025-61723 Fixes #75676 Change-Id: I813c4f63e78bca4054226c53e13865c781564ccf Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2921 Reviewed-by: Nicholas Husin Reviewed-by: Damien Neil Reviewed-on: https://go-review.googlesource.com/c/go/+/709858 TryBot-Bypass: Michael Pratt Auto-Submit: Michael Pratt Reviewed-by: Carlos Amedee --- src/encoding/pem/pem.go | 67 ++++++++++++++++++++---------------- src/encoding/pem/pem_test.go | 13 +++---- 2 files changed, 44 insertions(+), 36 deletions(-) diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go index dcc7416ee21ffe..21887008ca2182 100644 --- a/src/encoding/pem/pem.go +++ b/src/encoding/pem/pem.go @@ -37,7 +37,7 @@ type Block struct { // line bytes. The remainder of the byte array (also not including the new line // bytes) is also returned and this will always be smaller than the original // argument. -func getLine(data []byte) (line, rest []byte) { +func getLine(data []byte) (line, rest []byte, consumed int) { i := bytes.IndexByte(data, '\n') var j int if i < 0 { @@ -49,7 +49,7 @@ func getLine(data []byte) (line, rest []byte) { i-- } } - return bytes.TrimRight(data[0:i], " \t"), data[j:] + return bytes.TrimRight(data[0:i], " \t"), data[j:], j } // removeSpacesAndTabs returns a copy of its input with all spaces and tabs @@ -90,20 +90,32 @@ func Decode(data []byte) (p *Block, rest []byte) { // pemStart begins with a newline. However, at the very beginning of // the byte array, we'll accept the start string without it. rest = data + for { - if bytes.HasPrefix(rest, pemStart[1:]) { - rest = rest[len(pemStart)-1:] - } else if _, after, ok := bytes.Cut(rest, pemStart); ok { - rest = after - } else { + // Find the first END line, and then find the last BEGIN line before + // the end line. This lets us skip any repeated BEGIN lines that don't + // have a matching END. + endIndex := bytes.Index(rest, pemEnd) + if endIndex < 0 { + return nil, data + } + endTrailerIndex := endIndex + len(pemEnd) + beginIndex := bytes.LastIndex(rest[:endIndex], pemStart[1:]) + if beginIndex < 0 || beginIndex > 0 && rest[beginIndex-1] != '\n' { return nil, data } + rest = rest[beginIndex+len(pemStart)-1:] + endIndex -= beginIndex + len(pemStart) - 1 + endTrailerIndex -= beginIndex + len(pemStart) - 1 var typeLine []byte - typeLine, rest = getLine(rest) + var consumed int + typeLine, rest, consumed = getLine(rest) if !bytes.HasSuffix(typeLine, pemEndOfLine) { continue } + endIndex -= consumed + endTrailerIndex -= consumed typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)] p = &Block{ @@ -117,7 +129,7 @@ func Decode(data []byte) (p *Block, rest []byte) { if len(rest) == 0 { return nil, data } - line, next := getLine(rest) + line, next, consumed := getLine(rest) key, val, ok := bytes.Cut(line, colon) if !ok { @@ -129,21 +141,13 @@ func Decode(data []byte) (p *Block, rest []byte) { val = bytes.TrimSpace(val) p.Headers[string(key)] = string(val) rest = next + endIndex -= consumed + endTrailerIndex -= consumed } - var endIndex, endTrailerIndex int - - // If there were no headers, the END line might occur - // immediately, without a leading newline. - if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) { - endIndex = 0 - endTrailerIndex = len(pemEnd) - 1 - } else { - endIndex = bytes.Index(rest, pemEnd) - endTrailerIndex = endIndex + len(pemEnd) - } - - if endIndex < 0 { + // If there were headers, there must be a newline between the headers + // and the END line, so endIndex should be >= 0. + if len(p.Headers) > 0 && endIndex < 0 { continue } @@ -163,21 +167,24 @@ func Decode(data []byte) (p *Block, rest []byte) { } // The line must end with only whitespace. - if s, _ := getLine(restOfEndLine); len(s) != 0 { + if s, _, _ := getLine(restOfEndLine); len(s) != 0 { continue } - base64Data := removeSpacesAndTabs(rest[:endIndex]) - p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) - n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) - if err != nil { - continue + p.Bytes = []byte{} + if endIndex > 0 { + base64Data := removeSpacesAndTabs(rest[:endIndex]) + p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) + n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) + if err != nil { + continue + } + p.Bytes = p.Bytes[:n] } - p.Bytes = p.Bytes[:n] // the -1 is because we might have only matched pemEnd without the // leading newline if the PEM block was empty. - _, rest = getLine(rest[endIndex+len(pemEnd)-1:]) + _, rest, _ = getLine(rest[endIndex+len(pemEnd)-1:]) return p, rest } } diff --git a/src/encoding/pem/pem_test.go b/src/encoding/pem/pem_test.go index e252ffd8ed1613..2c9b3eabcd1c12 100644 --- a/src/encoding/pem/pem_test.go +++ b/src/encoding/pem/pem_test.go @@ -34,7 +34,7 @@ var getLineTests = []GetLineTest{ func TestGetLine(t *testing.T) { for i, test := range getLineTests { - x, y := getLine([]byte(test.in)) + x, y, _ := getLine([]byte(test.in)) if string(x) != test.out1 || string(y) != test.out2 { t.Errorf("#%d got:%+v,%+v want:%s,%s", i, x, y, test.out1, test.out2) } @@ -46,6 +46,7 @@ func TestDecode(t *testing.T) { if !reflect.DeepEqual(result, certificate) { t.Errorf("#0 got:%#v want:%#v", result, certificate) } + result, remainder = Decode(remainder) if !reflect.DeepEqual(result, privateKey) { t.Errorf("#1 got:%#v want:%#v", result, privateKey) @@ -68,7 +69,7 @@ func TestDecode(t *testing.T) { } result, remainder = Decode(remainder) - if result == nil || result.Type != "HEADERS" || len(result.Headers) != 1 { + if result == nil || result.Type != "VALID HEADERS" || len(result.Headers) != 1 { t.Errorf("#5 expected single header block but got :%v", result) } @@ -381,15 +382,15 @@ ZWAaUoVtWIQ52aKS0p19G99hhb+IVANC4akkdHV4SP8i7MVNZhfUmg== # This shouldn't be recognised because of the missing newline after the headers. ------BEGIN HEADERS----- +-----BEGIN INVALID HEADERS----- Header: 1 ------END HEADERS----- +-----END INVALID HEADERS----- # This should be valid, however. ------BEGIN HEADERS----- +-----BEGIN VALID HEADERS----- Header: 1 ------END HEADERS-----`) +-----END VALID HEADERS-----`) var certificate = &Block{Type: "CERTIFICATE", Headers: map[string]string{}, From 5ede095649db7783726c28390812bca9ce2c684a Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Tue, 30 Sep 2025 15:11:16 -0700 Subject: [PATCH 087/152] net/textproto: avoid quadratic complexity in Reader.ReadResponse Reader.ReadResponse constructed a response string from repeated string concatenation, permitting a malicious sender to cause excessive memory allocation and CPU consumption by sending a response consisting of many short lines. Use a strings.Builder to construct the string instead. Thanks to Jakub Ciolek for reporting this issue. Fixes CVE-2025-61724 Fixes #75716 Change-Id: I1a98ce85a21b830cb25799f9ac9333a67400d736 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2940 Reviewed-by: Roland Shoemaker Reviewed-by: Nicholas Husin Reviewed-on: https://go-review.googlesource.com/c/go/+/709859 TryBot-Bypass: Michael Pratt Auto-Submit: Michael Pratt Reviewed-by: Carlos Amedee --- src/net/textproto/reader.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go index 668c06c24c1372..6df3a630917d78 100644 --- a/src/net/textproto/reader.go +++ b/src/net/textproto/reader.go @@ -285,8 +285,10 @@ func (r *Reader) ReadCodeLine(expectCode int) (code int, message string, err err // // An expectCode <= 0 disables the check of the status code. func (r *Reader) ReadResponse(expectCode int) (code int, message string, err error) { - code, continued, message, err := r.readCodeLine(expectCode) + code, continued, first, err := r.readCodeLine(expectCode) multi := continued + var messageBuilder strings.Builder + messageBuilder.WriteString(first) for continued { line, err := r.ReadLine() if err != nil { @@ -297,12 +299,15 @@ func (r *Reader) ReadResponse(expectCode int) (code int, message string, err err var moreMessage string code2, continued, moreMessage, err = parseCodeLine(line, 0) if err != nil || code2 != code { - message += "\n" + strings.TrimRight(line, "\r\n") + messageBuilder.WriteByte('\n') + messageBuilder.WriteString(strings.TrimRight(line, "\r\n")) continued = true continue } - message += "\n" + moreMessage + messageBuilder.WriteByte('\n') + messageBuilder.WriteString(moreMessage) } + message = messageBuilder.String() if err != nil && multi && message != "" { // replace one line error message with all lines (full message) err = &Error{code, message} From 463165699d874ef0ac7965fc5788fe1693eaae9a Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Thu, 25 Sep 2025 14:41:53 -0700 Subject: [PATCH 088/152] net/mail: avoid quadratic behavior in mail address parsing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RFC 5322 domain-literal parsing built the dtext value one character at a time with string concatenation, resulting in excessive resource consumption when parsing very large domain-literal values. Replace with a subslice. Benchmark not included in this CL because it's too narrow to be of general ongoing use, but for: ParseAddress("alice@[" + strings.Repeat("a", 0x40000) + "]") goos: darwin goarch: arm64 pkg: net/mail cpu: Apple M4 Pro │ /tmp/bench.0 │ /tmp/bench.1 │ │ sec/op │ sec/op vs base │ ParseAddress-14 1987.732m ± 9% 1.524m ± 5% -99.92% (p=0.000 n=10) │ /tmp/bench.0 │ /tmp/bench.1 │ │ B/op │ B/op vs base │ ParseAddress-14 33692.767Mi ± 0% 1.282Mi ± 0% -100.00% (p=0.000 n=10) │ /tmp/bench.0 │ /tmp/bench.1 │ │ allocs/op │ allocs/op vs base │ ParseAddress-14 263711.00 ± 0% 17.00 ± 0% -99.99% (p=0.000 n=10) Thanks to Philippe Antoine (Catena cyber) for reporting this issue. Fixes CVE-2025-61725 Fixes #75680 Change-Id: Id971c2d5b59882bb476e22fceb7e01ec08234bb7 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2840 Reviewed-by: Roland Shoemaker Reviewed-by: Nicholas Husin Reviewed-on: https://go-review.googlesource.com/c/go/+/709860 Reviewed-by: Carlos Amedee TryBot-Bypass: Michael Pratt Auto-Submit: Michael Pratt --- src/net/mail/message.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/net/mail/message.go b/src/net/mail/message.go index 14f839a03077c1..1502b3596252ba 100644 --- a/src/net/mail/message.go +++ b/src/net/mail/message.go @@ -724,7 +724,8 @@ func (p *addrParser) consumeDomainLiteral() (string, error) { } // Parse the dtext - var dtext string + dtext := p.s + dtextLen := 0 for { if p.empty() { return "", errors.New("mail: unclosed domain-literal") @@ -741,9 +742,10 @@ func (p *addrParser) consumeDomainLiteral() (string, error) { return "", fmt.Errorf("mail: bad character in domain-literal: %q", r) } - dtext += p.s[:size] + dtextLen += size p.s = p.s[size:] } + dtext = dtext[:dtextLen] // Skip the trailing ] if !p.consume(']') { From f7a68d3804efabd271f0338391858bc1e7e57422 Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Thu, 11 Sep 2025 13:32:10 -0700 Subject: [PATCH 089/152] archive/tar: set a limit on the size of GNU sparse file 1.0 regions Sparse files in tar archives contain only the non-zero components of the file. There are several different encodings for sparse files. When reading GNU tar pax 1.0 sparse files, archive/tar did not set a limit on the size of the sparse region data. A malicious archive containing a large number of sparse blocks could cause archive/tar to read an unbounded amount of data from the archive into memory. Since a malicious input can be highly compressable, a small compressed input could cause very large allocations. Cap the size of the sparse block data to the same limit used for PAX headers (1 MiB). Thanks to Harshit Gupta (Mr HAX) (https://www.linkedin.com/in/iam-harshit-gupta/) for reporting this issue. Fixes CVE-2025-58183 Fixes #75677 Change-Id: I70b907b584a7b8676df8a149a1db728ae681a770 Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2800 Reviewed-by: Roland Shoemaker Reviewed-by: Nicholas Husin Reviewed-on: https://go-review.googlesource.com/c/go/+/709861 Auto-Submit: Michael Pratt TryBot-Bypass: Michael Pratt Reviewed-by: Carlos Amedee --- src/archive/tar/common.go | 1 + src/archive/tar/reader.go | 9 +++++++-- src/archive/tar/reader_test.go | 5 +++++ .../tar/testdata/gnu-sparse-many-zeros.tar.bz2 | Bin 0 -> 1642 bytes 4 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go index 7b3945ff153144..ad31bbb64aaa5c 100644 --- a/src/archive/tar/common.go +++ b/src/archive/tar/common.go @@ -39,6 +39,7 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go index 8483fb52a28f66..16ac2f5b17c28b 100644 --- a/src/archive/tar/reader.go +++ b/src/archive/tar/reader.go @@ -531,12 +531,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block + totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -569,8 +574,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go index 99340a30471914..fca53dae741bd5 100644 --- a/src/archive/tar/reader_test.go +++ b/src/archive/tar/reader_test.go @@ -621,6 +621,11 @@ func TestReader(t *testing.T) { }, Format: FormatPAX, }}, + }, { + // Small compressed file that uncompresses to + // a file with a very large GNU 1.0 sparse map. + file: "testdata/gnu-sparse-many-zeros.tar.bz2", + err: errSparseTooLong, }} for _, v := range vectors { diff --git a/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..751d7fd4b68be1a7439413b4089dbbde33a2900a GIT binary patch literal 1642 zcmZ>Y%CIzaj8qGb%wCwXiS2H0{DC_Y9GL$z2|RUTNGrcrz`)SKVZdrPcO01{ps1pvVeGD$*OEWa_73ksV;zBwoQh0BPEH%vtSITeAlMvtlB3E`^y)8{ zw~&+g8{-?Ahs_oW3u(QcHog6FpP~f~~=z-9g zf_wTZ7^XHZn&-}Zc8R{Fft9C2PEyk*pJkiw?fk^YcK6r>1_p%&1_y>jhFQI$94?0z za?Cl(nljTjt?TS76W-mu3JeU63=9nnhZs#ILKKBvodhkrxK0|)I$ibrgoSju6wr@N z42%qnElgP^KzXhfrD74$NvWo@w9QvsFnedxz`)4Dz{J4J!Ez-rpv4zx#WM|Ul~dWe zujeZ~$i0&Z3?dE&76vu}&J`B}T70wwI?n`zc}}^OvF6@lhub?YF)*+QFmN#NCX1lVw7II6Iq$It1A~AA zg93v=gRdLVZlD!@quw8l{n5lfn)hi|^v!SyiA^y0F{gnCT=X?DusWMCWpyr3vv&I) ZCg2!yghOkJkj3svnul7NKE7EI0RUq%CTjoy literal 0 HcmV?d00001 From f2d0d05d28c3493a8f2b5d4e3c0080e18b9a3bdc Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 19:56:29 -0400 Subject: [PATCH 090/152] cmd/go: refactor usage of `MainModules` This commit refactors usage of the global variable `MainModules` to the global LoaderState variable of the same name. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/modload rf 'mv State.mainModules State.MainModules' rf 'ex { MainModules -> LoaderState.MainModules }' for dir in load modcmd modget test tool workcmd ; do cd ../${dir} rf 'ex { import "cmd/go/internal/modload" modload.MainModules -> modload.LoaderState.MainModules }' done cd ../modload rf 'rm MainModules' Change-Id: I15644c84190717d62ae953747a288ec6495ef168 Reviewed-on: https://go-review.googlesource.com/c/go/+/698060 Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI --- src/cmd/go/internal/load/godebug.go | 4 +- src/cmd/go/internal/load/pkg.go | 2 +- src/cmd/go/internal/load/search.go | 4 +- src/cmd/go/internal/modcmd/download.go | 6 +- src/cmd/go/internal/modcmd/vendor.go | 20 +++---- src/cmd/go/internal/modcmd/verify.go | 2 +- src/cmd/go/internal/modget/get.go | 30 +++++----- src/cmd/go/internal/modget/query.go | 2 +- src/cmd/go/internal/modload/build.go | 4 +- src/cmd/go/internal/modload/buildlist.go | 36 ++++++------ src/cmd/go/internal/modload/edit.go | 6 +- src/cmd/go/internal/modload/import.go | 30 +++++----- src/cmd/go/internal/modload/init.go | 72 ++++++++++++------------ src/cmd/go/internal/modload/list.go | 2 +- src/cmd/go/internal/modload/load.go | 44 +++++++-------- src/cmd/go/internal/modload/modfile.go | 36 ++++++------ src/cmd/go/internal/modload/mvs.go | 4 +- src/cmd/go/internal/modload/query.go | 28 ++++----- src/cmd/go/internal/modload/search.go | 20 +++---- src/cmd/go/internal/modload/vendor.go | 4 +- src/cmd/go/internal/test/test.go | 2 +- src/cmd/go/internal/tool/tool.go | 4 +- src/cmd/go/internal/workcmd/sync.go | 4 +- 23 files changed, 182 insertions(+), 184 deletions(-) diff --git a/src/cmd/go/internal/load/godebug.go b/src/cmd/go/internal/load/godebug.go index ff184384567afe..c795d42f117f0e 100644 --- a/src/cmd/go/internal/load/godebug.go +++ b/src/cmd/go/internal/load/godebug.go @@ -49,7 +49,7 @@ func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []bu if p.Name != "main" { return "" } - goVersion := modload.MainModules.GoVersion() + goVersion := modload.LoaderState.MainModules.GoVersion() if modload.LoaderState.RootMode == modload.NoRoot && p.Module != nil { // This is go install pkg@version or go run pkg@version. // Use the Go version from the package. @@ -73,7 +73,7 @@ func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []bu } // Add directives from main module go.mod. - for _, g := range modload.MainModules.Godebugs() { + for _, g := range modload.LoaderState.MainModules.Godebugs() { if m == nil { m = make(map[string]string) } diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 27a1fbfff836fa..135d7579d65dff 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -1552,7 +1552,7 @@ func disallowInternal(ctx context.Context, srcDir string, importer *Package, imp // directory containing them. // If the directory is outside the main modules, this will resolve to ".", // which is not a prefix of any valid module. - importerPath, _ = modload.MainModules.DirImportPath(ctx, importer.Dir) + importerPath, _ = modload.LoaderState.MainModules.DirImportPath(ctx, importer.Dir) } parentOfInternal := p.ImportPath[:i] if str.HasPathPrefix(importerPath, parentOfInternal) { diff --git a/src/cmd/go/internal/load/search.go b/src/cmd/go/internal/load/search.go index 941cfb77a2ec08..51c8cc0932406e 100644 --- a/src/cmd/go/internal/load/search.go +++ b/src/cmd/go/internal/load/search.go @@ -56,11 +56,11 @@ func MatchPackage(pattern, cwd string) func(*Package) bool { return func(p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") } case pattern == "tool" && modload.Enabled(): return func(p *Package) bool { - return modload.MainModules.Tools()[p.ImportPath] + return modload.LoaderState.MainModules.Tools()[p.ImportPath] } case pattern == "work" && modload.Enabled(): return func(p *Package) bool { - return p.Module != nil && modload.MainModules.Contains(p.Module.Path) + return p.Module != nil && modload.LoaderState.MainModules.Contains(p.Module.Path) } default: diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 6d12d689f0ff51..8df11bfa59fa43 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -120,7 +120,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { modload.LoadModFile(ctx) // to fill MainModules if haveExplicitArgs { - for _, mainModule := range modload.MainModules.Versions() { + for _, mainModule := range modload.LoaderState.MainModules.Versions() { targetAtUpgrade := mainModule.Path + "@upgrade" targetAtPatch := mainModule.Path + "@patch" for _, arg := range args { @@ -136,8 +136,8 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // https://go-review.googlesource.com/c/go/+/359794/comments/ce946a80_6cf53992. args = []string{"all"} } else { - mainModule := modload.MainModules.Versions()[0] - modFile := modload.MainModules.ModFile(mainModule) + mainModule := modload.LoaderState.MainModules.Versions()[0] + modFile := modload.LoaderState.MainModules.ModFile(mainModule) if modFile.Go == nil || gover.Compare(modFile.Go.Version, gover.ExplicitIndirectVersion) < 0 { if len(modFile.Require) > 0 { args = []string{"all"} diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index dfea571c0e0317..df673e885c1473 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -106,7 +106,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) modpkgs := make(map[module.Version][]string) for _, pkg := range pkgs { m := modload.PackageModule(pkg) - if m.Path == "" || modload.MainModules.Contains(m.Path) { + if m.Path == "" || modload.LoaderState.MainModules.Contains(m.Path) { continue } modpkgs[m] = append(modpkgs[m], pkg) @@ -116,13 +116,13 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) includeAllReplacements := false includeGoVersions := false isExplicit := map[module.Version]bool{} - gv := modload.MainModules.GoVersion() + gv := modload.LoaderState.MainModules.GoVersion() if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(base.Cwd()) != "" || modload.ModFile().Go != nil) { // If the Go version is at least 1.14, annotate all explicit 'require' and // 'replace' targets found in the go.mod file so that we can perform a // stronger consistency check when -mod=vendor is set. - for _, m := range modload.MainModules.Versions() { - if modFile := modload.MainModules.ModFile(m); modFile != nil { + for _, m := range modload.LoaderState.MainModules.Versions() { + if modFile := modload.LoaderState.MainModules.ModFile(m); modFile != nil { for _, r := range modFile.Require { isExplicit[r.Mod] = true } @@ -156,7 +156,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) w = io.MultiWriter(&buf, os.Stderr) } - if modload.MainModules.WorkFile() != nil { + if modload.LoaderState.MainModules.WorkFile() != nil { fmt.Fprintf(w, "## workspace\n") } @@ -192,8 +192,8 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) // Record unused and wildcard replacements at the end of the modules.txt file: // without access to the complete build list, the consumer of the vendor // directory can't otherwise determine that those replacements had no effect. - for _, m := range modload.MainModules.Versions() { - if workFile := modload.MainModules.WorkFile(); workFile != nil { + for _, m := range modload.LoaderState.MainModules.Versions() { + if workFile := modload.LoaderState.MainModules.WorkFile(); workFile != nil { for _, r := range workFile.Replace { if replacementWritten[r.Old] { // We already recorded this replacement. @@ -208,7 +208,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) } } } - if modFile := modload.MainModules.ModFile(m); modFile != nil { + if modFile := modload.LoaderState.MainModules.ModFile(m); modFile != nil { for _, r := range modFile.Replace { if replacementWritten[r.Old] { // We already recorded this replacement. @@ -315,7 +315,7 @@ func vendorPkg(vdir, pkg string) { } } var embedPatterns []string - if gover.Compare(modload.MainModules.GoVersion(), "1.22") >= 0 { + if gover.Compare(modload.LoaderState.MainModules.GoVersion(), "1.22") >= 0 { embedPatterns = bp.EmbedPatterns } else { // Maintain the behavior of https://github.com/golang/go/issues/63473 @@ -431,7 +431,7 @@ func matchPotentialSourceFile(dir string, info fs.DirEntry) bool { return false } if info.Name() == "go.mod" || info.Name() == "go.sum" { - if gv := modload.MainModules.GoVersion(); gover.Compare(gv, "1.17") >= 0 { + if gv := modload.LoaderState.MainModules.GoVersion(); gover.Compare(gv, "1.17") >= 0 { // As of Go 1.17, we strip go.mod and go.sum files from dependency modules. // Otherwise, 'go' commands invoked within the vendor subtree may misidentify // an arbitrary directory within the vendor tree as a module root. diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index 157c920c067321..8de444ff06ad1d 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -94,7 +94,7 @@ func verifyMod(ctx context.Context, mod module.Version) []error { // "go" and "toolchain" have no disk footprint; nothing to verify. return nil } - if modload.MainModules.Contains(mod.Path) { + if modload.LoaderState.MainModules.Contains(mod.Path) { return nil } var errs []error diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 167f515be98a7d..d8b1f83bf1d132 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -426,7 +426,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { if gowork := modload.FindGoWork(base.Cwd()); gowork != "" { wf, err := modload.ReadWorkFile(gowork) - if err == nil && modload.UpdateWorkGoVersion(wf, modload.MainModules.GoVersion()) { + if err == nil && modload.UpdateWorkGoVersion(wf, modload.LoaderState.MainModules.GoVersion()) { modload.WriteWorkFile(gowork, wf) } } @@ -722,7 +722,7 @@ func (r *resolver) queryNone(ctx context.Context, q *query) { if !q.isWildcard() { q.pathOnce(q.pattern, func() pathSet { hasModRoot := modload.HasModRoot() - if hasModRoot && modload.MainModules.Contains(q.pattern) { + if hasModRoot && modload.LoaderState.MainModules.Contains(q.pattern) { v := module.Version{Path: q.pattern} // The user has explicitly requested to downgrade their own module to // version "none". This is not an entirely unreasonable request: it @@ -746,7 +746,7 @@ func (r *resolver) queryNone(ctx context.Context, q *query) { continue } q.pathOnce(curM.Path, func() pathSet { - if modload.HasModRoot() && curM.Version == "" && modload.MainModules.Contains(curM.Path) { + if modload.HasModRoot() && curM.Version == "" && modload.LoaderState.MainModules.Contains(curM.Path) { return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{curM}, Pattern: q.pattern, Query: q.version}) } return pathSet{mod: module.Version{Path: curM.Path, Version: "none"}} @@ -766,13 +766,13 @@ func (r *resolver) performLocalQueries(ctx context.Context) { // Absolute paths like C:\foo and relative paths like ../foo... are // restricted to matching packages in the main module. - pkgPattern, mainModule := modload.MainModules.DirImportPath(ctx, q.pattern) + pkgPattern, mainModule := modload.LoaderState.MainModules.DirImportPath(ctx, q.pattern) if pkgPattern == "." { modload.MustHaveModRoot() - versions := modload.MainModules.Versions() + versions := modload.LoaderState.MainModules.Versions() modRoots := make([]string, 0, len(versions)) for _, m := range versions { - modRoots = append(modRoots, modload.MainModules.ModRoot(m)) + modRoots = append(modRoots, modload.LoaderState.MainModules.ModRoot(m)) } var plural string if len(modRoots) != 1 { @@ -792,7 +792,7 @@ func (r *resolver) performLocalQueries(ctx context.Context) { } if !q.isWildcard() { modload.MustHaveModRoot() - return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.MainModules.ModRoot(mainModule))) + return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.LoaderState.MainModules.ModRoot(mainModule))) } search.WarnUnmatched([]*search.Match{match}) return pathSet{} @@ -848,7 +848,7 @@ func (r *resolver) queryWildcard(ctx context.Context, q *query) { return pathSet{} } - if modload.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) { + if modload.LoaderState.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) { if q.matchesPath(curM.Path) { return errSet(&modload.QueryMatchesMainModulesError{ MainModules: []module.Version{curM}, @@ -1065,7 +1065,7 @@ func (r *resolver) queryPath(ctx context.Context, q *query) { // pattern is "tool". func (r *resolver) performToolQueries(ctx context.Context) { for _, q := range r.toolQueries { - for tool := range modload.MainModules.Tools() { + for tool := range modload.LoaderState.MainModules.Tools() { q.pathOnce(tool, func() pathSet { pkgMods, err := r.queryPackages(ctx, tool, q.version, r.initialSelected) return pathSet{pkgMods: pkgMods, err: err} @@ -1082,10 +1082,10 @@ func (r *resolver) performWorkQueries(ctx context.Context) { // TODO(matloob): Maybe export MainModules.mustGetSingleMainModule and call that. // There are a few other places outside the modload package where we expect // a single main module. - if len(modload.MainModules.Versions()) != 1 { + if len(modload.LoaderState.MainModules.Versions()) != 1 { panic("internal error: number of main modules is not exactly one in resolution phase of go get") } - mainModule := modload.MainModules.Versions()[0] + mainModule := modload.LoaderState.MainModules.Versions()[0] // We know what the result is going to be, assuming the main module is not // empty, (it's the main module itself) but first check to see that there @@ -1496,7 +1496,7 @@ func (r *resolver) disambiguate(cs pathSet) (filtered pathSet, isPackage bool, m continue } - if modload.MainModules.Contains(m.Path) { + if modload.LoaderState.MainModules.Contains(m.Path) { if m.Version == "" { return pathSet{}, true, m, true } @@ -1612,7 +1612,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin // info, but switch back to single module mode when fetching sums so that we update // the single module's go.sum file. var exitWorkspace func() - if r.workspace != nil && r.workspace.hasModule(modload.MainModules.Versions()[0].Path) { + if r.workspace != nil && r.workspace.hasModule(modload.LoaderState.MainModules.Versions()[0].Path) { var err error exitWorkspace, err = modload.EnterWorkspace(ctx) if err != nil { @@ -1951,7 +1951,7 @@ func (r *resolver) resolve(q *query, m module.Version) { panic("internal error: resolving a module.Version with an empty path") } - if modload.MainModules.Contains(m.Path) && m.Version != "" { + if modload.LoaderState.MainModules.Contains(m.Path) && m.Version != "" { reportError(q, &modload.QueryMatchesMainModulesError{ MainModules: []module.Version{{Path: m.Path}}, Pattern: q.pattern, @@ -1983,7 +1983,7 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi resolved := make([]module.Version, 0, len(r.resolvedVersion)) for mPath, rv := range r.resolvedVersion { - if !modload.MainModules.Contains(mPath) { + if !modload.LoaderState.MainModules.Contains(mPath) { resolved = append(resolved, module.Version{Path: mPath, Version: rv.version}) } } diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go index 05872d52ec4e04..db09947293c6dc 100644 --- a/src/cmd/go/internal/modget/query.go +++ b/src/cmd/go/internal/modget/query.go @@ -192,7 +192,7 @@ func (q *query) validate() error { // request that we remove all module requirements, leaving only the main // module and standard library. Perhaps we should implement that someday. return &modload.QueryUpgradesAllError{ - MainModules: modload.MainModules.Versions(), + MainModules: modload.LoaderState.MainModules.Versions(), Query: q.version, } } diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index 6e30afd5247b36..cb168d58a227bc 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -290,7 +290,7 @@ func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { // in rs (which may be nil to indicate that m was not loaded from a requirement // graph). func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic { - if m.Version == "" && MainModules.Contains(m.Path) { + if m.Version == "" && LoaderState.MainModules.Contains(m.Path) { info := &modinfo.ModulePublic{ Path: m.Path, Version: m.Version, @@ -301,7 +301,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } else { panic("internal error: GoVersion not set for main module") } - if modRoot := MainModules.ModRoot(m); modRoot != "" { + if modRoot := LoaderState.MainModules.ModRoot(m); modRoot != "" { info.Dir = modRoot info.GoMod = modFilePath(modRoot) } diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index 2ba04f707b5472..8afea0b205c4d7 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -128,7 +128,7 @@ func newRequirements(pruning modPruning, rootModules []module.Version, direct ma panic("in workspace mode, but pruning is not workspace in newRequirements") } for i, m := range rootModules { - if m.Version == "" && MainModules.Contains(m.Path) { + if m.Version == "" && LoaderState.MainModules.Contains(m.Path) { panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is a main module", i)) } if m.Path == "" || m.Version == "" { @@ -174,7 +174,7 @@ func (rs *Requirements) String() string { // requirements. func (rs *Requirements) initVendor(vendorList []module.Version) { rs.graphOnce.Do(func() { - roots := MainModules.Versions() + roots := LoaderState.MainModules.Versions() if inWorkspaceMode() { // Use rs.rootModules to pull in the go and toolchain roots // from the go.work file and preserve the invariant that all @@ -186,7 +186,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { } if rs.pruning == pruned { - mainModule := MainModules.mustGetSingleMainModule() + mainModule := LoaderState.MainModules.mustGetSingleMainModule() // The roots of a single pruned module should already include every module in the // vendor list, because the vendored modules are the same as those needed // for graph pruning. @@ -219,14 +219,14 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { // dependencies. vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""} if inWorkspaceMode() { - for _, m := range MainModules.Versions() { - reqs, _ := rootsFromModFile(m, MainModules.ModFile(m), omitToolchainRoot) + for _, m := range LoaderState.MainModules.Versions() { + reqs, _ := rootsFromModFile(m, LoaderState.MainModules.ModFile(m), omitToolchainRoot) mg.g.Require(m, append(reqs, vendorMod)) } mg.g.Require(vendorMod, vendorList) } else { - mainModule := MainModules.mustGetSingleMainModule() + mainModule := LoaderState.MainModules.mustGetSingleMainModule() mg.g.Require(mainModule, append(rs.rootModules, vendorMod)) mg.g.Require(vendorMod, vendorList) } @@ -249,7 +249,7 @@ func (rs *Requirements) GoVersion() string { // path, or the zero module.Version and ok=false if the module is not a root // dependency. func (rs *Requirements) rootSelected(path string) (version string, ok bool) { - if MainModules.Contains(path) { + if LoaderState.MainModules.Contains(path) { return "", true } if v, ok := rs.maxRootVersion[path]; ok { @@ -264,7 +264,7 @@ func (rs *Requirements) rootSelected(path string) (version string, ok bool) { // selection. func (rs *Requirements) hasRedundantRoot() bool { for i, m := range rs.rootModules { - if MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { + if LoaderState.MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { return true } } @@ -346,7 +346,7 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio if inWorkspaceMode() { graphRoots = roots } else { - graphRoots = MainModules.Versions() + graphRoots = LoaderState.MainModules.Versions() } var ( mu sync.Mutex // guards mg.g and hasError during loading @@ -360,7 +360,7 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio if inWorkspaceMode() { panic("pruning is not workspace in workspace mode") } - mg.g.Require(MainModules.mustGetSingleMainModule(), roots) + mg.g.Require(LoaderState.MainModules.mustGetSingleMainModule(), roots) } type dedupKey struct { @@ -540,9 +540,9 @@ func (mg *ModuleGraph) findError() error { func (mg *ModuleGraph) allRootsSelected() bool { var roots []module.Version if inWorkspaceMode() { - roots = MainModules.Versions() + roots = LoaderState.MainModules.Versions() } else { - roots, _ = mg.g.RequiredBy(MainModules.mustGetSingleMainModule()) + roots, _ = mg.g.RequiredBy(LoaderState.MainModules.mustGetSingleMainModule()) } for _, m := range roots { if mg.Selected(m.Path) != m.Version { @@ -776,7 +776,7 @@ func (c Conflict) String() string { // both retain the same versions of all packages in pkgs and satisfy the // graph-pruning invariants (if applicable). func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { - mainModule := MainModules.mustGetSingleMainModule() + mainModule := LoaderState.MainModules.mustGetSingleMainModule() if rs.pruning == unpruned { return tidyUnprunedRoots(ctx, mainModule, rs, pkgs) } @@ -1168,7 +1168,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem roots = make([]module.Version, 0, len(rs.rootModules)) rootsUpgraded = false inRootPaths := make(map[string]bool, len(rs.rootModules)+1) - for _, mm := range MainModules.Versions() { + for _, mm := range LoaderState.MainModules.Versions() { inRootPaths[mm.Path] = true } for _, m := range rs.rootModules { @@ -1445,7 +1445,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir // This is only for convenience and clarity for end users: in an unpruned module, // the choice of explicit vs. implicit dependency has no impact on MVS // selection (for itself or any other module). - keep := append(mg.BuildList()[MainModules.Len():], add...) + keep := append(mg.BuildList()[LoaderState.MainModules.Len():], add...) for _, m := range keep { if direct[m.Path] && !inRootPaths[m.Path] { rootPaths = append(rootPaths, m.Path) @@ -1454,14 +1454,14 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir } var roots []module.Version - for _, mainModule := range MainModules.Versions() { + for _, mainModule := range LoaderState.MainModules.Versions() { min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{roots: keep}) if err != nil { return rs, err } roots = append(roots, min...) } - if MainModules.Len() > 1 { + if LoaderState.MainModules.Len() > 1 { gover.ModSort(roots) } if rs.pruning == unpruned && slices.Equal(roots, rs.rootModules) && maps.Equal(direct, rs.direct) { @@ -1501,5 +1501,5 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) ( if err != nil { return rs, err } - return newRequirements(pruned, mg.BuildList()[MainModules.Len():], rs.direct), nil + return newRequirements(pruned, mg.BuildList()[LoaderState.MainModules.Len():], rs.direct), nil } diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go index b406193dc5a673..153c21a90cc423 100644 --- a/src/cmd/go/internal/modload/edit.go +++ b/src/cmd/go/internal/modload/edit.go @@ -106,7 +106,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // to begin with, so we can't edit those requirements in a coherent way. return orig, false, err } - bl := mg.BuildList()[MainModules.Len():] + bl := mg.BuildList()[LoaderState.MainModules.Len():] selectedRoot = make(map[string]string, len(bl)) for _, m := range bl { selectedRoot[m.Path] = m.Version @@ -513,7 +513,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // The modules in mustSelect are always promoted to be explicit. for _, m := range mustSelect { - if m.Version != "none" && !MainModules.Contains(m.Path) { + if m.Version != "none" && !LoaderState.MainModules.Contains(m.Path) { rootPaths = append(rootPaths, m.Path) } } @@ -530,7 +530,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel } } - roots, err = mvs.Req(MainModules.mustGetSingleMainModule(), rootPaths, &mvsReqs{roots: roots}) + roots, err = mvs.Req(LoaderState.MainModules.mustGetSingleMainModule(), rootPaths, &mvsReqs{roots: roots}) if err != nil { return nil, false, err } diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 171d9d692fbb82..83e7e037117a4d 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -82,8 +82,8 @@ func (e *ImportMissingError) Error() string { if e.QueryErr != nil { return fmt.Sprintf("%s: %v", message, e.QueryErr) } - if e.ImportingMainModule.Path != "" && e.ImportingMainModule != MainModules.ModContainingCWD() { - return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, MainModules.ModRoot(e.ImportingMainModule), e.Path) + if e.ImportingMainModule.Path != "" && e.ImportingMainModule != LoaderState.MainModules.ModContainingCWD() { + return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, LoaderState.MainModules.ModRoot(e.ImportingMainModule), e.Path) } return fmt.Sprintf("%s; to add it:\n\tgo get %s", message, e.Path) } @@ -299,12 +299,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // Is the package in the standard library? pathIsStd := search.IsStandardImportPath(path) if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { - for _, mainModule := range MainModules.Versions() { - if MainModules.InGorootSrc(mainModule) { - if dir, ok, err := dirInModule(path, MainModules.PathPrefix(mainModule), MainModules.ModRoot(mainModule), true); err != nil { - return module.Version{}, MainModules.ModRoot(mainModule), dir, nil, err + for _, mainModule := range LoaderState.MainModules.Versions() { + if LoaderState.MainModules.InGorootSrc(mainModule) { + if dir, ok, err := dirInModule(path, LoaderState.MainModules.PathPrefix(mainModule), LoaderState.MainModules.ModRoot(mainModule), true); err != nil { + return module.Version{}, LoaderState.MainModules.ModRoot(mainModule), dir, nil, err } else if ok { - return mainModule, MainModules.ModRoot(mainModule), dir, nil, nil + return mainModule, LoaderState.MainModules.ModRoot(mainModule), dir, nil, nil } } } @@ -321,10 +321,10 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // Everything must be in the main modules or the main module's or workspace's vendor directory. if cfg.BuildMod == "vendor" { var mainErr error - for _, mainModule := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mainModule) + for _, mainModule := range LoaderState.MainModules.Versions() { + modRoot := LoaderState.MainModules.ModRoot(mainModule) if modRoot != "" { - dir, mainOK, err := dirInModule(path, MainModules.PathPrefix(mainModule), modRoot, true) + dir, mainOK, err := dirInModule(path, LoaderState.MainModules.PathPrefix(mainModule), modRoot, true) if mainErr == nil { mainErr = err } @@ -345,7 +345,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // vendor/modules.txt does not exist or the user manually added directories to the vendor directory. // Go 1.23 and later require vendored packages to be present in modules.txt to be imported. _, ok := vendorPkgModule[path] - if ok || (gover.Compare(MainModules.GoVersion(), gover.ExplicitModulesTxtImportVersion) < 0) { + if ok || (gover.Compare(LoaderState.MainModules.GoVersion(), gover.ExplicitModulesTxtImportVersion) < 0) { mods = append(mods, vendorPkgModule[path]) dirs = append(dirs, dir) roots = append(roots, vendorDir) @@ -471,7 +471,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // If the module graph is pruned and this is a test-only dependency // of a package in "all", we didn't necessarily load that file // when we read the module graph, so do it now to be sure. - if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !MainModules.Contains(mods[0].Path) { + if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !LoaderState.MainModules.Contains(mods[0].Path) { if _, err := goModSummary(mods[0]); err != nil { return module.Version{}, "", "", nil, err } @@ -511,8 +511,8 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver // To avoid spurious remote fetches, try the latest replacement for each // module (golang.org/issue/26241). var mods []module.Version - if MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. - for mp, mv := range MainModules.HighestReplaced() { + if LoaderState.MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. + for mp, mv := range LoaderState.MainModules.HighestReplaced() { if !maybeInModule(path, mp) { continue } @@ -748,7 +748,7 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile // The isLocal return value reports whether the replacement, // if any, is local to the filesystem. func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) { - if modRoot := MainModules.ModRoot(mod); modRoot != "" { + if modRoot := LoaderState.MainModules.ModRoot(mod); modRoot != "" { return modRoot, true, nil } if r := Replacement(mod); r.Path != "" { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index dc0d78499b9cd0..28f55a621d60de 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -58,7 +58,7 @@ var ( // EnterModule resets MainModules and requirements to refer to just this one module. func EnterModule(ctx context.Context, enterModroot string) { - MainModules = nil // reset MainModules + LoaderState.MainModules = nil // reset MainModules requirements = nil workFilePath = "" // Force module mode modfetch.Reset() @@ -73,7 +73,7 @@ func EnterModule(ctx context.Context, enterModroot string) { // EnterWorkspace will modify the global state they depend on in a non-thread-safe way. func EnterWorkspace(ctx context.Context) (exit func(), err error) { // Find the identity of the main module that will be updated before we reset modload state. - mm := MainModules.mustGetSingleMainModule() + mm := LoaderState.MainModules.mustGetSingleMainModule() // Get the updated modfile we will use for that module. _, _, updatedmodfile, err := UpdateGoModFromReqs(ctx, WriteOpts{}) if err != nil { @@ -89,8 +89,8 @@ func EnterWorkspace(ctx context.Context) (exit func(), err error) { LoadModFile(ctx) // Update the content of the previous main module, and recompute the requirements. - *MainModules.ModFile(mm) = *updatedmodfile - requirements = requirementsFromModFiles(ctx, MainModules.workFile, slices.Collect(maps.Values(MainModules.modFiles)), nil) + *LoaderState.MainModules.ModFile(mm) = *updatedmodfile + requirements = requirementsFromModFiles(ctx, LoaderState.MainModules.workFile, slices.Collect(maps.Values(LoaderState.MainModules.modFiles)), nil) return func() { setState(oldstate) @@ -294,8 +294,6 @@ func (mms *MainModuleSet) WorkFileReplaceMap() map[module.Version]module.Version return mms.workFileReplaceMap } -var MainModules *MainModuleSet - type Root int const ( @@ -324,7 +322,7 @@ const ( // in go.mod, edit it before loading. func ModFile() *modfile.File { Init() - modFile := MainModules.ModFile(MainModules.mustGetSingleMainModule()) + modFile := LoaderState.MainModules.ModFile(LoaderState.MainModules.mustGetSingleMainModule()) if modFile == nil { die() } @@ -396,7 +394,7 @@ func setState(s State) State { RootMode: LoaderState.RootMode, modRoots: LoaderState.modRoots, modulesEnabled: cfg.ModulesEnabled, - mainModules: MainModules, + MainModules: LoaderState.MainModules, requirements: requirements, } LoaderState.initialized = s.initialized @@ -404,7 +402,7 @@ func setState(s State) State { LoaderState.RootMode = s.RootMode LoaderState.modRoots = s.modRoots cfg.ModulesEnabled = s.modulesEnabled - MainModules = s.mainModules + LoaderState.MainModules = s.MainModules requirements = s.requirements workFilePath = s.workFilePath // The modfetch package's global state is used to compute @@ -431,7 +429,7 @@ type State struct { // modRoots != nil implies len(modRoots) > 0 modRoots []string modulesEnabled bool - mainModules *MainModuleSet + MainModules *MainModuleSet requirements *Requirements workFilePath string modfetchState modfetch.State @@ -628,7 +626,7 @@ func VendorDir() string { // Even if -mod=vendor, we could be operating with no mod root (and thus no // vendor directory). As long as there are no dependencies that is expected // to work. See script/vendor_outside_module.txt. - modRoot := MainModules.ModRoot(MainModules.mustGetSingleMainModule()) + modRoot := LoaderState.MainModules.ModRoot(LoaderState.MainModules.mustGetSingleMainModule()) if modRoot == "" { panic("vendor directory does not exist when in single module mode outside of a module") } @@ -914,7 +912,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // make MainModules.Len() == 0 mean that we're in module mode but not inside // any module. mainModule := module.Version{Path: "command-line-arguments"} - MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) + LoaderState.MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) var ( goVersion string pruning modPruning @@ -925,7 +923,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // Since we are in a workspace, the Go version for the synthetic // "command-line-arguments" module must not exceed the Go version // for the workspace. - goVersion = MainModules.GoVersion() + goVersion = LoaderState.MainModules.GoVersion() pruning = workspace roots = []module.Version{ mainModule, @@ -1016,20 +1014,20 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) return nil, errors.Join(errs...) } - MainModules = makeMainModules(mainModules, LoaderState.modRoots, modFiles, indices, workFile) + LoaderState.MainModules = makeMainModules(mainModules, LoaderState.modRoots, modFiles, indices, workFile) setDefaultBuildMod() // possibly enable automatic vendoring rs := requirementsFromModFiles(ctx, workFile, modFiles, opts) if cfg.BuildMod == "vendor" { readVendorList(VendorDir()) - versions := MainModules.Versions() + versions := LoaderState.MainModules.Versions() indexes := make([]*modFileIndex, 0, len(versions)) modFiles := make([]*modfile.File, 0, len(versions)) modRoots := make([]string, 0, len(versions)) for _, m := range versions { - indexes = append(indexes, MainModules.Index(m)) - modFiles = append(modFiles, MainModules.ModFile(m)) - modRoots = append(modRoots, MainModules.ModRoot(m)) + indexes = append(indexes, LoaderState.MainModules.Index(m)) + modFiles = append(modFiles, LoaderState.MainModules.ModFile(m)) + modRoots = append(modRoots, LoaderState.MainModules.ModRoot(m)) } checkVendorConsistency(indexes, modFiles, modRoots) rs.initVendor(vendorList) @@ -1041,7 +1039,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) return rs, nil } - mainModule := MainModules.mustGetSingleMainModule() + mainModule := LoaderState.MainModules.mustGetSingleMainModule() if rs.hasRedundantRoot() { // If any module path appears more than once in the roots, we know that the @@ -1054,7 +1052,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } - if MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { + if LoaderState.MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { // TODO(#45551): Do something more principled instead of checking // cfg.CmdName directly here. if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" { @@ -1063,7 +1061,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) if opts != nil && opts.TidyGoVersion != "" { v = opts.TidyGoVersion } - addGoStmt(MainModules.ModFile(mainModule), mainModule, v) + addGoStmt(LoaderState.MainModules.ModFile(mainModule), mainModule, v) rs = overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: v}}) // We need to add a 'go' version to the go.mod file, but we must assume @@ -1156,7 +1154,7 @@ func CreateModFile(ctx context.Context, modPath string) { fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath) modFile := new(modfile.File) modFile.AddModuleStmt(modPath) - MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) + LoaderState.MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) addGoStmt(modFile, modFile.Module.Mod, gover.Local()) // Add the go directive before converted module requirements. rs := requirementsFromModFiles(ctx, nil, []*modfile.File{modFile}, nil) @@ -1380,8 +1378,8 @@ func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, m var pruning modPruning if inWorkspaceMode() { pruning = workspace - roots = make([]module.Version, len(MainModules.Versions()), 2+len(MainModules.Versions())) - copy(roots, MainModules.Versions()) + roots = make([]module.Version, len(LoaderState.MainModules.Versions()), 2+len(LoaderState.MainModules.Versions())) + copy(roots, LoaderState.MainModules.Versions()) goVersion := gover.FromGoWork(workFile) var toolchain string if workFile.Toolchain != nil { @@ -1390,12 +1388,12 @@ func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, m roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct) direct = directRequirements(modFiles) } else { - pruning = pruningForGoVersion(MainModules.GoVersion()) + pruning = pruningForGoVersion(LoaderState.MainModules.GoVersion()) if len(modFiles) != 1 { panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles))) } modFile := modFiles[0] - roots, direct = rootsFromModFile(MainModules.mustGetSingleMainModule(), modFile, withToolchainRoot) + roots, direct = rootsFromModFile(LoaderState.MainModules.mustGetSingleMainModule(), modFile, withToolchainRoot) } gover.ModSort(roots) @@ -1430,7 +1428,7 @@ func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot } roots = make([]module.Version, 0, padding+len(modFile.Require)) for _, r := range modFile.Require { - if index := MainModules.Index(m); index != nil && index.exclude[r.Mod] { + if index := LoaderState.MainModules.Index(m); index != nil && index.exclude[r.Mod] { if cfg.BuildMod == "mod" { fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) } else { @@ -1522,12 +1520,12 @@ func setDefaultBuildMod() { var versionSource string if inWorkspaceMode() { versionSource = "go.work" - if wfg := MainModules.WorkFile().Go; wfg != nil { + if wfg := LoaderState.MainModules.WorkFile().Go; wfg != nil { goVersion = wfg.Version } } else { versionSource = "go.mod" - index := MainModules.GetSingleIndexOrNil() + index := LoaderState.MainModules.GetSingleIndexOrNil() if index != nil { goVersion = index.goVersion } @@ -1812,12 +1810,12 @@ var errNoChange = errors.New("no update needed") // UpdateGoModFromReqs returns a modified go.mod file using the current // requirements. It does not commit these changes to disk. func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) { - if MainModules.Len() != 1 || MainModules.ModRoot(MainModules.Versions()[0]) == "" { + if LoaderState.MainModules.Len() != 1 || LoaderState.MainModules.ModRoot(LoaderState.MainModules.Versions()[0]) == "" { // We aren't in a module, so we don't have anywhere to write a go.mod file. return nil, nil, nil, errNoChange } - mainModule := MainModules.mustGetSingleMainModule() - modFile = MainModules.ModFile(mainModule) + mainModule := LoaderState.MainModules.mustGetSingleMainModule() + modFile = LoaderState.MainModules.ModFile(mainModule) if modFile == nil { // command-line-arguments has no .mod file to write. return nil, nil, nil, errNoChange @@ -1925,7 +1923,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { return err } - index := MainModules.GetSingleIndexOrNil() + index := LoaderState.MainModules.GetSingleIndexOrNil() dirty := index.modFileIsDirty(modFile) || len(opts.DropTools) > 0 || len(opts.AddTools) > 0 if dirty && cfg.BuildMod != "mod" { // If we're about to fail due to -mod=readonly, @@ -1946,8 +1944,8 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { return nil } - mainModule := MainModules.mustGetSingleMainModule() - modFilePath := modFilePath(MainModules.ModRoot(mainModule)) + mainModule := LoaderState.MainModules.mustGetSingleMainModule() + modFilePath := modFilePath(LoaderState.MainModules.ModRoot(mainModule)) if fsys.Replaced(modFilePath) { if dirty { return errors.New("updates to go.mod needed, but go.mod is part of the overlay specified with -overlay") @@ -1956,7 +1954,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { } defer func() { // At this point we have determined to make the go.mod file on disk equal to new. - MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false)) + LoaderState.MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false)) // Update go.sum after releasing the side lock and refreshing the index. // 'go mod init' shouldn't write go.sum, since it will be incomplete. @@ -2018,7 +2016,7 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // ambiguous import errors the next time we load the package. keepModSumsForZipSums := true if ld == nil { - if gover.Compare(MainModules.GoVersion(), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { + if gover.Compare(LoaderState.MainModules.GoVersion(), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { keepModSumsForZipSums = false } } else { diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 53cb6c2ffe1406..b2d071dcf69723 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -126,7 +126,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) { if len(args) == 0 { var ms []*modinfo.ModulePublic - for _, m := range MainModules.Versions() { + for _, m := range LoaderState.MainModules.Versions() { if gover.IsToolchain(m.Path) { continue } diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 7fba712f952e2a..54e1da902cfd00 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -273,7 +273,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma if m.Dirs == nil { matchModRoots := LoaderState.modRoots if opts.MainModule != (module.Version{}) { - matchModRoots = []string{MainModules.ModRoot(opts.MainModule)} + matchModRoots = []string{LoaderState.MainModules.ModRoot(opts.MainModule)} } matchLocalDirs(ctx, matchModRoots, m, rs) } @@ -324,7 +324,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma matchPackages(ctx, m, opts.Tags, includeStd, mg.BuildList()) case m.Pattern() == "work": - matchModules := MainModules.Versions() + matchModules := LoaderState.MainModules.Versions() if opts.MainModule != (module.Version{}) { matchModules = []module.Version{opts.MainModule} } @@ -335,12 +335,12 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // The initial roots are the packages and tools in the main module. // loadFromRoots will expand that to "all". m.Errs = m.Errs[:0] - matchModules := MainModules.Versions() + matchModules := LoaderState.MainModules.Versions() if opts.MainModule != (module.Version{}) { matchModules = []module.Version{opts.MainModule} } matchPackages(ctx, m, opts.Tags, omitStd, matchModules) - for tool := range MainModules.Tools() { + for tool := range LoaderState.MainModules.Tools() { m.Pkgs = append(m.Pkgs, tool) } } else { @@ -355,7 +355,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } case m.Pattern() == "tool": - for tool := range MainModules.Tools() { + for tool := range LoaderState.MainModules.Tools() { m.Pkgs = append(m.Pkgs, tool) } default: @@ -596,13 +596,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str } } - for _, mod := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mod) + for _, mod := range LoaderState.MainModules.Versions() { + modRoot := LoaderState.MainModules.ModRoot(mod) if modRoot != "" && absDir == modRoot { if absDir == cfg.GOROOTsrc { return "", errPkgIsGorootSrc } - return MainModules.PathPrefix(mod), nil + return LoaderState.MainModules.PathPrefix(mod), nil } } @@ -611,8 +611,8 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str // It's not strictly necessary but helpful to keep the checks. var pkgNotFoundErr error pkgNotFoundLongestPrefix := "" - for _, mainModule := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mainModule) + for _, mainModule := range LoaderState.MainModules.Versions() { + modRoot := LoaderState.MainModules.ModRoot(mainModule) if modRoot != "" && str.HasFilePathPrefix(absDir, modRoot) && !strings.Contains(absDir[len(modRoot):], "@") { suffix := filepath.ToSlash(str.TrimFilePathPrefix(absDir, modRoot)) if pkg, found := strings.CutPrefix(suffix, "vendor/"); found { @@ -627,7 +627,7 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str return pkg, nil } - mainModulePrefix := MainModules.PathPrefix(mainModule) + mainModulePrefix := LoaderState.MainModules.PathPrefix(mainModule) if mainModulePrefix == "" { pkg := suffix if pkg == "builtin" { @@ -820,7 +820,7 @@ func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path s return mms.PathPrefix(v), v } if str.HasFilePathPrefix(dir, modRoot) { - pathPrefix := MainModules.PathPrefix(v) + pathPrefix := LoaderState.MainModules.PathPrefix(v) if pathPrefix > longestPrefix { longestPrefix = pathPrefix longestPrefixVersion = v @@ -1068,7 +1068,7 @@ func (pkg *loadPkg) fromExternalModule() bool { if pkg.mod.Path == "" { return false // loaded from the standard library, not a module } - return !MainModules.Contains(pkg.mod.Path) + return !LoaderState.MainModules.Contains(pkg.mod.Path) } var errMissing = errors.New("cannot find package") @@ -1390,7 +1390,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err } } } - if pkg.mod.Version != "" || !MainModules.Contains(pkg.mod.Path) { + if pkg.mod.Version != "" || !LoaderState.MainModules.Contains(pkg.mod.Path) { continue } @@ -1587,7 +1587,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod var ime *ImportMissingError if errors.As(err, &ime) { for curstack := pkg.stack; curstack != nil; curstack = curstack.stack { - if MainModules.Contains(curstack.mod.Path) { + if LoaderState.MainModules.Contains(curstack.mod.Path) { ime.ImportingMainModule = curstack.mod break } @@ -1709,7 +1709,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // so it's ok if we call it more than is strictly necessary. wantTest := false switch { - case ld.allPatternIsRoot && MainModules.Contains(pkg.mod.Path): + case ld.allPatternIsRoot && LoaderState.MainModules.Contains(pkg.mod.Path): // We are loading the "all" pattern, which includes packages imported by // tests in the main module. This package is in the main module, so we // need to identify the imports of its test even if LoadTests is not set. @@ -1730,7 +1730,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg if wantTest { var testFlags loadPkgFlags - if MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { + if LoaderState.MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { // Tests of packages in the main module are in "all", in the sense that // they cause the packages they import to also be in "all". So are tests // of packages in "all" if "all" closes over test dependencies. @@ -1858,7 +1858,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { var modroot string pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles) - if MainModules.Tools()[pkg.path] { + if LoaderState.MainModules.Tools()[pkg.path] { // Tools declared by main modules are always in "all". // We apply the package flags before returning so that missing // tool dependencies report an error https://go.dev/issue/70582 @@ -1867,7 +1867,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { if pkg.dir == "" { return } - if MainModules.Contains(pkg.mod.Path) { + if LoaderState.MainModules.Contains(pkg.mod.Path) { // Go ahead and mark pkg as in "all". This provides the invariant that a // package that is *only* imported by other packages in "all" is always // marked as such before loading its imports. @@ -1975,14 +1975,14 @@ func (ld *loader) stdVendor(parentPath, path string) string { } if str.HasPathPrefix(parentPath, "cmd") { - if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("cmd") { + if !ld.VendorModulesInGOROOTSrc || !LoaderState.MainModules.Contains("cmd") { vendorPath := pathpkg.Join("cmd", "vendor", path) if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { return vendorPath } } - } else if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { + } else if !ld.VendorModulesInGOROOTSrc || !LoaderState.MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { // If we are outside of the 'std' module, resolve imports from within 'std' // to the vendor directory. // @@ -2067,7 +2067,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, fmt.Fprintln(os.Stderr) goFlag := "" - if goVersion != MainModules.GoVersion() { + if goVersion != LoaderState.MainModules.GoVersion() { goFlag = " -go=" + goVersion } diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 04e204cc984c59..6b432ed9637ec5 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -156,8 +156,8 @@ var ErrDisallowed = errors.New("disallowed module version") // CheckExclusions returns an error equivalent to ErrDisallowed if module m is // excluded by the main module's go.mod file. func CheckExclusions(ctx context.Context, m module.Version) error { - for _, mainModule := range MainModules.Versions() { - if index := MainModules.Index(mainModule); index != nil && index.exclude[m] { + for _, mainModule := range LoaderState.MainModules.Versions() { + if index := LoaderState.MainModules.Index(mainModule); index != nil && index.exclude[m] { return module.VersionError(m, errExcluded) } } @@ -349,19 +349,19 @@ func Replacement(mod module.Version) module.Version { // and the source of the replacement. The replacement is relative to the go.work or go.mod file it appears in. func replacementFrom(mod module.Version) (r module.Version, modroot string, fromFile string) { foundFrom, found, foundModRoot := "", module.Version{}, "" - if MainModules == nil { + if LoaderState.MainModules == nil { return module.Version{}, "", "" - } else if MainModules.Contains(mod.Path) && mod.Version == "" { + } else if LoaderState.MainModules.Contains(mod.Path) && mod.Version == "" { // Don't replace the workspace version of the main module. return module.Version{}, "", "" } - if _, r, ok := replacement(mod, MainModules.WorkFileReplaceMap()); ok { + if _, r, ok := replacement(mod, LoaderState.MainModules.WorkFileReplaceMap()); ok { return r, "", workFilePath } - for _, v := range MainModules.Versions() { - if index := MainModules.Index(v); index != nil { + for _, v := range LoaderState.MainModules.Versions() { + if index := LoaderState.MainModules.Index(v); index != nil { if from, r, ok := replacement(mod, index.replace); ok { - modRoot := MainModules.ModRoot(v) + modRoot := LoaderState.MainModules.ModRoot(v) if foundModRoot != "" && foundFrom != from && found != r { base.Errorf("conflicting replacements found for %v in workspace modules defined by %v and %v", mod, modFilePath(foundModRoot), modFilePath(modRoot)) @@ -378,7 +378,7 @@ func replaceRelativeTo() string { if workFilePath := WorkFilePath(); workFilePath != "" { return filepath.Dir(workFilePath) } - return MainModules.ModRoot(MainModules.mustGetSingleMainModule()) + return LoaderState.MainModules.ModRoot(LoaderState.MainModules.mustGetSingleMainModule()) } // canonicalizeReplacePath ensures that relative, on-disk, replaced module paths @@ -572,7 +572,7 @@ type retraction struct { // // The caller must not modify the returned summary. func goModSummary(m module.Version) (*modFileSummary, error) { - if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) { + if m.Version == "" && !inWorkspaceMode() && LoaderState.MainModules.Contains(m.Path) { panic("internal error: goModSummary called on a main module") } if gover.IsToolchain(m.Path) { @@ -639,8 +639,8 @@ func goModSummary(m module.Version) (*modFileSummary, error) { } } - for _, mainModule := range MainModules.Versions() { - if index := MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { + for _, mainModule := range LoaderState.MainModules.Versions() { + if index := LoaderState.MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { // Drop any requirements on excluded versions. // Don't modify the cached summary though, since we might need the raw // summary separately. @@ -684,7 +684,7 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { } return &modFileSummary{module: m}, nil } - if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) { + if m.Version == "" && !inWorkspaceMode() && LoaderState.MainModules.Contains(m.Path) { // Calling rawGoModSummary implies that we are treating m as a module whose // requirements aren't the roots of the module graph and can't be modified. // @@ -697,13 +697,13 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { // If there are no modules in the workspace, we synthesize an empty // command-line-arguments module, which rawGoModData cannot read a go.mod for. return &modFileSummary{module: m}, nil - } else if m.Version == "" && inWorkspaceMode() && MainModules.Contains(m.Path) { + } else if m.Version == "" && inWorkspaceMode() && LoaderState.MainModules.Contains(m.Path) { // When go get uses EnterWorkspace to check that the workspace loads properly, // it will update the contents of the workspace module's modfile in memory. To use the updated // contents of the modfile when doing the load, don't read from disk and instead // recompute a summary using the updated contents of the modfile. - if mf := MainModules.ModFile(m); mf != nil { - return summaryFromModFile(m, MainModules.modFiles[m]) + if mf := LoaderState.MainModules.ModFile(m); mf != nil { + return summaryFromModFile(m, LoaderState.MainModules.modFiles[m]) } } return rawGoModSummaryCache.Do(m, func() (*modFileSummary, error) { @@ -783,8 +783,8 @@ func rawGoModData(m module.Version) (name string, data []byte, err error) { if m.Version == "" { dir := m.Path if !filepath.IsAbs(dir) { - if inWorkspaceMode() && MainModules.Contains(m.Path) { - dir = MainModules.ModRoot(m) + if inWorkspaceMode() && LoaderState.MainModules.Contains(m.Path) { + dir = LoaderState.MainModules.ModRoot(m) } else { // m is a replacement module with only a file path. dir = filepath.Join(replaceRelativeTo(), dir) diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 8ae2dbff1e8887..97e6fe44dd7545 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -43,7 +43,7 @@ type mvsReqs struct { } func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { - if mod.Version == "" && MainModules.Contains(mod.Path) { + if mod.Version == "" && LoaderState.MainModules.Contains(mod.Path) { // Use the build list as it existed when r was constructed, not the current // global build list. return r.roots, nil @@ -112,7 +112,7 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) (versions [ // Since the version of a main module is not found in the version list, // it has no previous version. func previousVersion(ctx context.Context, m module.Version) (module.Version, error) { - if m.Version == "" && MainModules.Contains(m.Path) { + if m.Version == "" && LoaderState.MainModules.Contains(m.Path) { return module.Version{Path: m.Path, Version: "none"}, nil } diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index 65934b0d69e517..94ee8bc955918c 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -211,7 +211,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed allowed = func(context.Context, module.Version) error { return nil } } - if MainModules.Contains(path) && (query == "upgrade" || query == "patch") { + if LoaderState.MainModules.Contains(path) && (query == "upgrade" || query == "patch") { m := module.Version{Path: path} if err := allowed(ctx, m); err != nil { return nil, fmt.Errorf("internal error: main module version is not allowed: %w", err) @@ -700,8 +700,8 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin match = func(mod module.Version, roots []string, isLocal bool) *search.Match { m := search.NewMatch(pattern) prefix := mod.Path - if MainModules.Contains(mod.Path) { - prefix = MainModules.PathPrefix(module.Version{Path: mod.Path}) + if LoaderState.MainModules.Contains(mod.Path) { + prefix = LoaderState.MainModules.PathPrefix(module.Version{Path: mod.Path}) } for _, root := range roots { if _, ok, err := dirInModule(pattern, prefix, root, isLocal); err != nil { @@ -715,7 +715,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin } var mainModuleMatches []module.Version - for _, mainModule := range MainModules.Versions() { + for _, mainModule := range LoaderState.MainModules.Versions() { m := match(mainModule, LoaderState.modRoots, true) if len(m.Pkgs) > 0 { if query != "upgrade" && query != "patch" { @@ -842,7 +842,7 @@ func modulePrefixesExcludingTarget(path string) []string { prefixes := make([]string, 0, strings.Count(path, "/")+1) mainModulePrefixes := make(map[string]bool) - for _, m := range MainModules.Versions() { + for _, m := range LoaderState.MainModules.Versions() { mainModulePrefixes[m.Path] = true } @@ -905,7 +905,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod case *PackageNotInModuleError: // Given the option, prefer to attribute “package not in module” // to modules other than the main one. - if noPackage == nil || MainModules.Contains(noPackage.Mod.Path) { + if noPackage == nil || LoaderState.MainModules.Contains(noPackage.Mod.Path) { noPackage = rErr } case *NoMatchingVersionError: @@ -1127,9 +1127,9 @@ func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err repo = emptyRepo{path: path, err: err} } - if MainModules == nil { + if LoaderState.MainModules == nil { return repo, err - } else if _, ok := MainModules.HighestReplaced()[path]; ok { + } else if _, ok := LoaderState.MainModules.HighestReplaced()[path]; ok { return &replacementRepo{repo: repo}, nil } @@ -1186,8 +1186,8 @@ func (rr *replacementRepo) Versions(ctx context.Context, prefix string) (*modfet } versions := repoVersions.List - for _, mm := range MainModules.Versions() { - if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 { + for _, mm := range LoaderState.MainModules.Versions() { + if index := LoaderState.MainModules.Index(mm); index != nil && len(index.replace) > 0 { path := rr.ModulePath() for m := range index.replace { if m.Path == path && strings.HasPrefix(m.Version, prefix) && m.Version != "" && !module.IsPseudoVersion(m.Version) { @@ -1215,8 +1215,8 @@ func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevI return info, err } var hasReplacements bool - for _, v := range MainModules.Versions() { - if index := MainModules.Index(v); index != nil && len(index.replace) > 0 { + for _, v := range LoaderState.MainModules.Versions() { + if index := LoaderState.MainModules.Index(v); index != nil && len(index.replace) > 0 { hasReplacements = true } } @@ -1249,7 +1249,7 @@ func (rr *replacementRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error info, err := rr.repo.Latest(ctx) path := rr.ModulePath() - if v, ok := MainModules.HighestReplaced()[path]; ok { + if v, ok := LoaderState.MainModules.HighestReplaced()[path]; ok { if v == "" { // The only replacement is a wildcard that doesn't specify a version, so // synthesize a pseudo-version with an appropriate major version and a @@ -1290,7 +1290,7 @@ type QueryMatchesMainModulesError struct { } func (e *QueryMatchesMainModulesError) Error() string { - if MainModules.Contains(e.Pattern) { + if LoaderState.MainModules.Contains(e.Pattern) { return fmt.Sprintf("can't request version %q of the main module (%s)", e.Query, e.Pattern) } diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go index 9ff9738e281118..205db3e8f7c8db 100644 --- a/src/cmd/go/internal/modload/search.go +++ b/src/cmd/go/internal/modload/search.go @@ -171,9 +171,9 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f } if cfg.BuildMod == "vendor" { - for _, mod := range MainModules.Versions() { - if modRoot := MainModules.ModRoot(mod); modRoot != "" { - walkPkgs(modRoot, MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) + for _, mod := range LoaderState.MainModules.Versions() { + if modRoot := LoaderState.MainModules.ModRoot(mod); modRoot != "" { + walkPkgs(modRoot, LoaderState.MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) } } if HasModRoot() { @@ -191,12 +191,12 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f root, modPrefix string isLocal bool ) - if MainModules.Contains(mod.Path) { - if MainModules.ModRoot(mod) == "" { + if LoaderState.MainModules.Contains(mod.Path) { + if LoaderState.MainModules.ModRoot(mod) == "" { continue // If there is no main module, we can't search in it. } - root = MainModules.ModRoot(mod) - modPrefix = MainModules.PathPrefix(mod) + root = LoaderState.MainModules.ModRoot(mod) + modPrefix = LoaderState.MainModules.PathPrefix(mod) isLocal = true } else { var err error @@ -330,12 +330,12 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo } var modRoot string var ignorePatterns []string - if MainModules.Contains(mod.Path) { - modRoot = MainModules.ModRoot(mod) + if LoaderState.MainModules.Contains(mod.Path) { + modRoot = LoaderState.MainModules.ModRoot(mod) if modRoot == "" { continue } - modIndex := MainModules.Index(mod) + modIndex := LoaderState.MainModules.Index(mod) if modIndex == nil { continue } diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go index c7fe73193582ab..d3f055acf64f50 100644 --- a/src/cmd/go/internal/modload/vendor.go +++ b/src/cmd/go/internal/modload/vendor.go @@ -236,8 +236,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m for _, modFile := range modFiles { checkReplace(modFile.Replace) } - if MainModules.workFile != nil { - checkReplace(MainModules.workFile.Replace) + if LoaderState.MainModules.workFile != nil { + checkReplace(LoaderState.MainModules.workFile.Replace) } for _, mod := range vendorList { diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 8bfb3c149b6c69..7a2963ff29b06e 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -730,7 +730,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { // the module cache (or permanently alter the behavior of std tests for all // users) by writing the failing input to the package's testdata directory. // (See https://golang.org/issue/48495 and test_fuzz_modcache.txt.) - mainMods := modload.MainModules + mainMods := modload.LoaderState.MainModules if m := pkgs[0].Module; m != nil && m.Path != "" { if !mainMods.Contains(m.Path) { base.Fatalf("cannot use -fuzz flag on package outside the main module") diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go index ef25d17b54d892..e25c06a8f046bd 100644 --- a/src/cmd/go/internal/tool/tool.go +++ b/src/cmd/go/internal/tool/tool.go @@ -163,7 +163,7 @@ func listTools(ctx context.Context) { modload.InitWorkfile() modload.LoadModFile(ctx) - modTools := slices.Sorted(maps.Keys(modload.MainModules.Tools())) + modTools := slices.Sorted(maps.Keys(modload.LoaderState.MainModules.Tools())) for _, tool := range modTools { fmt.Println(tool) } @@ -256,7 +256,7 @@ func loadModTool(ctx context.Context, name string) string { modload.LoadModFile(ctx) matches := []string{} - for tool := range modload.MainModules.Tools() { + for tool := range modload.LoaderState.MainModules.Tools() { if tool == name || defaultExecName(tool) == name { matches = append(matches, tool) } diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go index 800dd15dd6f6d1..640771d8f75996 100644 --- a/src/cmd/go/internal/workcmd/sync.go +++ b/src/cmd/go/internal/workcmd/sync.go @@ -60,7 +60,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { } mustSelectFor := map[module.Version][]module.Version{} - mms := modload.MainModules + mms := modload.LoaderState.MainModules opts := modload.PackageOpts{ Tags: imports.AnyTags(), @@ -131,7 +131,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { }, "all") modload.WriteGoMod(ctx, modload.WriteOpts{}) } - goV = gover.Max(goV, modload.MainModules.GoVersion()) + goV = gover.Max(goV, modload.LoaderState.MainModules.GoVersion()) } wf, err := modload.ReadWorkFile(workFilePath) From cb81270113968408d7cc41c0b1530adb51dd8496 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 7 Oct 2025 10:12:43 -0700 Subject: [PATCH 091/152] Revert "crypto/internal/fips140/subtle: add assembly implementation of xorBytes for mipsx" This reverts commit 343e486bfdbf9ca614d3e197afd79ad7ed5fef3e. Reason for revert: doesn't handle unaligned accesses correctly. Update #74998 Change-Id: I1d6210eeca9336f2ce311e99944cb270565563aa Reviewed-on: https://go-review.googlesource.com/c/go/+/709795 Reviewed-by: Cherry Mui Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/crypto/internal/fips140/subtle/xor_asm.go | 2 +- .../internal/fips140/subtle/xor_generic.go | 2 +- .../internal/fips140/subtle/xor_mipsx.s | 212 ------------------ 3 files changed, 2 insertions(+), 214 deletions(-) delete mode 100644 src/crypto/internal/fips140/subtle/xor_mipsx.s diff --git a/src/crypto/internal/fips140/subtle/xor_asm.go b/src/crypto/internal/fips140/subtle/xor_asm.go index b07239da3e31c1..e10ea8b441429b 100644 --- a/src/crypto/internal/fips140/subtle/xor_asm.go +++ b/src/crypto/internal/fips140/subtle/xor_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (amd64 || arm64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego +//go:build (amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_generic.go b/src/crypto/internal/fips140/subtle/xor_generic.go index ed484bc630e98d..08af84de2a3dab 100644 --- a/src/crypto/internal/fips140/subtle/xor_generic.go +++ b/src/crypto/internal/fips140/subtle/xor_generic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego +//go:build (!amd64 && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_mipsx.s b/src/crypto/internal/fips140/subtle/xor_mipsx.s deleted file mode 100644 index 1a6b3f409dddc9..00000000000000 --- a/src/crypto/internal/fips140/subtle/xor_mipsx.s +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (mips || mipsle) && !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVW dst+0(FP), R1 - MOVW a+4(FP), R2 - MOVW b+8(FP), R3 - MOVW n+12(FP), R4 - - SGTU $64, R4, R5 // R5 = 1 if (64 > R4) - BNE R5, xor_32_check -xor_64: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - MOVW 16(R2), R6 - MOVW 20(R2), R7 - MOVW 24(R2), R8 - MOVW 28(R2), R9 - MOVW 16(R3), R10 - MOVW 20(R3), R11 - MOVW 24(R3), R12 - MOVW 28(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 16(R1) - MOVW R11, 20(R1) - MOVW R12, 24(R1) - MOVW R13, 28(R1) - MOVW 32(R2), R6 - MOVW 36(R2), R7 - MOVW 40(R2), R8 - MOVW 44(R2), R9 - MOVW 32(R3), R10 - MOVW 36(R3), R11 - MOVW 40(R3), R12 - MOVW 44(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 32(R1) - MOVW R11, 36(R1) - MOVW R12, 40(R1) - MOVW R13, 44(R1) - MOVW 48(R2), R6 - MOVW 52(R2), R7 - MOVW 56(R2), R8 - MOVW 60(R2), R9 - MOVW 48(R3), R10 - MOVW 52(R3), R11 - MOVW 56(R3), R12 - MOVW 60(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 48(R1) - MOVW R11, 52(R1) - MOVW R12, 56(R1) - MOVW R13, 60(R1) - ADD $64, R2 - ADD $64, R3 - ADD $64, R1 - SUB $64, R4 - SGTU $64, R4, R5 - BEQ R0, R5, xor_64 - BEQ R0, R4, end - -xor_32_check: - SGTU $32, R4, R5 - BNE R5, xor_16_check -xor_32: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - MOVW 16(R2), R6 - MOVW 20(R2), R7 - MOVW 24(R2), R8 - MOVW 28(R2), R9 - MOVW 16(R3), R10 - MOVW 20(R3), R11 - MOVW 24(R3), R12 - MOVW 28(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 16(R1) - MOVW R11, 20(R1) - MOVW R12, 24(R1) - MOVW R13, 28(R1) - ADD $32, R2 - ADD $32, R3 - ADD $32, R1 - SUB $32, R4 - BEQ R0, R4, end - -xor_16_check: - SGTU $16, R4, R5 - BNE R5, xor_8_check -xor_16: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - ADD $16, R2 - ADD $16, R3 - ADD $16, R1 - SUB $16, R4 - BEQ R0, R4, end - -xor_8_check: - SGTU $8, R4, R5 - BNE R5, xor_4_check -xor_8: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW (R3), R8 - MOVW 4(R3), R9 - XOR R6, R8 - XOR R7, R9 - MOVW R8, (R1) - MOVW R9, 4(R1) - ADD $8, R1 - ADD $8, R2 - ADD $8, R3 - SUB $8, R4 - BEQ R0, R4, end - -xor_4_check: - SGTU $4, R4, R5 - BNE R5, xor_2_check -xor_4: - MOVW (R2), R6 - MOVW (R3), R7 - XOR R6, R7 - MOVW R7, (R1) - ADD $4, R2 - ADD $4, R3 - ADD $4, R1 - SUB $4, R4 - BEQ R0, R4, end - -xor_2_check: - SGTU $2, R4, R5 - BNE R5, xor_1 -xor_2: - MOVH (R2), R6 - MOVH (R3), R7 - XOR R6, R7 - MOVH R7, (R1) - ADD $2, R2 - ADD $2, R3 - ADD $2, R1 - SUB $2, R4 - BEQ R0, R4, end - -xor_1: - MOVB (R2), R6 - MOVB (R3), R7 - XOR R6, R7 - MOVB R7, (R1) - -end: - RET From a1661e776f57602b4d4470389a0246f9784fd722 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 7 Oct 2025 10:15:43 -0700 Subject: [PATCH 092/152] Revert "crypto/internal/fips140/subtle: add assembly implementation of xorBytes for mips64x" This reverts commit 49d6777d87a0abb3eda032da95eff024156835f7. Reason for revert: doesn't handle unaligned accesses correctly Fixes #74998 Change-Id: Ia272245a6a2a91b305d411207430bad660ee355b Reviewed-on: https://go-review.googlesource.com/c/go/+/709757 Reviewed-by: Keith Randall Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI --- src/crypto/internal/fips140/subtle/xor_asm.go | 2 +- .../internal/fips140/subtle/xor_generic.go | 2 +- .../internal/fips140/subtle/xor_mips64x.s | 153 ------------------ 3 files changed, 2 insertions(+), 155 deletions(-) delete mode 100644 src/crypto/internal/fips140/subtle/xor_mips64x.s diff --git a/src/crypto/internal/fips140/subtle/xor_asm.go b/src/crypto/internal/fips140/subtle/xor_asm.go index e10ea8b441429b..bb85aefef4013e 100644 --- a/src/crypto/internal/fips140/subtle/xor_asm.go +++ b/src/crypto/internal/fips140/subtle/xor_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego +//go:build (amd64 || arm64 || ppc64 || ppc64le || riscv64) && !purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_generic.go b/src/crypto/internal/fips140/subtle/xor_generic.go index 08af84de2a3dab..0b31eec60197d3 100644 --- a/src/crypto/internal/fips140/subtle/xor_generic.go +++ b/src/crypto/internal/fips140/subtle/xor_generic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego +//go:build (!amd64 && !arm64 && !loong64 && !ppc64 && !ppc64le && !riscv64) || purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_mips64x.s b/src/crypto/internal/fips140/subtle/xor_mips64x.s deleted file mode 100644 index e580235914aeaf..00000000000000 --- a/src/crypto/internal/fips140/subtle/xor_mips64x.s +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (mips64 || mips64le) && !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVV dst+0(FP), R1 - MOVV a+8(FP), R2 - MOVV b+16(FP), R3 - MOVV n+24(FP), R4 - -xor_64_check: - SGTU $64, R4, R5 // R5 = 1 if (64 > R4) - BNE R5, xor_32_check -xor_64: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV 16(R2), R8 - MOVV 24(R2), R9 - MOVV (R3), R10 - MOVV 8(R3), R11 - MOVV 16(R3), R12 - MOVV 24(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, (R1) - MOVV R11, 8(R1) - MOVV R12, 16(R1) - MOVV R13, 24(R1) - MOVV 32(R2), R6 - MOVV 40(R2), R7 - MOVV 48(R2), R8 - MOVV 56(R2), R9 - MOVV 32(R3), R10 - MOVV 40(R3), R11 - MOVV 48(R3), R12 - MOVV 56(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, 32(R1) - MOVV R11, 40(R1) - MOVV R12, 48(R1) - MOVV R13, 56(R1) - ADDV $64, R2 - ADDV $64, R3 - ADDV $64, R1 - SUBV $64, R4 - SGTU $64, R4, R5 - BEQ R0, R5, xor_64 - BEQ R0, R4, end - -xor_32_check: - SGTU $32, R4, R5 - BNE R5, xor_16_check -xor_32: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV 16(R2), R8 - MOVV 24(R2), R9 - MOVV (R3), R10 - MOVV 8(R3), R11 - MOVV 16(R3), R12 - MOVV 24(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, (R1) - MOVV R11, 8(R1) - MOVV R12, 16(R1) - MOVV R13, 24(R1) - ADDV $32, R2 - ADDV $32, R3 - ADDV $32, R1 - SUBV $32, R4 - BEQ R0, R4, end - -xor_16_check: - SGTU $16, R4, R5 - BNE R5, xor_8_check -xor_16: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV (R3), R8 - MOVV 8(R3), R9 - XOR R6, R8 - XOR R7, R9 - MOVV R8, (R1) - MOVV R9, 8(R1) - ADDV $16, R2 - ADDV $16, R3 - ADDV $16, R1 - SUBV $16, R4 - BEQ R0, R4, end - -xor_8_check: - SGTU $8, R4, R5 - BNE R5, xor_4_check -xor_8: - MOVV (R2), R6 - MOVV (R3), R7 - XOR R6, R7 - MOVV R7, (R1) - ADDV $8, R1 - ADDV $8, R2 - ADDV $8, R3 - SUBV $8, R4 - BEQ R0, R4, end - -xor_4_check: - SGTU $4, R4, R5 - BNE R5, xor_2_check -xor_4: - MOVW (R2), R6 - MOVW (R3), R7 - XOR R6, R7 - MOVW R7, (R1) - ADDV $4, R2 - ADDV $4, R3 - ADDV $4, R1 - SUBV $4, R4 - BEQ R0, R4, end - -xor_2_check: - SGTU $2, R4, R5 - BNE R5, xor_1 -xor_2: - MOVH (R2), R6 - MOVH (R3), R7 - XOR R6, R7 - MOVH R7, (R1) - ADDV $2, R2 - ADDV $2, R3 - ADDV $2, R1 - SUBV $2, R4 - BEQ R0, R4, end - -xor_1: - MOVB (R2), R6 - MOVB (R3), R7 - XOR R6, R7 - MOVB R7, (R1) - -end: - RET From 162392773085d4cc12072200853a0424117983c0 Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 20:14:59 -0400 Subject: [PATCH 093/152] cmd/go: refactor usage of `requirements` This commit refactors usage of the global variable `requirements` to the global LoaderState field of the same name. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/modload rf 'ex { requirements -> LoaderState.requirements }' rf 'add State.MainModules \ // requirements is the requirement graph for the main module.\ //\ // It is always non-nil if the main module'\\\''s go.mod file has been\ // loaded.\ //\ // This variable should only be read from the loadModFile\ // function, and should only be written in the loadModFile and\ // commitRequirements functions. All other functions that need or\ // produce a *Requirements should accept and/or return an explicit\ // parameter.' rf 'rm requirements' Change-Id: I9d7d1d301a9e89f9214ce632fa5b656dd2940f39 Reviewed-on: https://go-review.googlesource.com/c/go/+/698061 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob --- src/cmd/go/internal/modload/buildlist.go | 14 +----- src/cmd/go/internal/modload/init.go | 55 ++++++++++++++---------- src/cmd/go/internal/modload/list.go | 2 +- src/cmd/go/internal/modload/load.go | 8 ++-- 4 files changed, 40 insertions(+), 39 deletions(-) diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index 8afea0b205c4d7..b73cb43d0dc3cf 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -85,16 +85,6 @@ type cachedGraph struct { err error // If err is non-nil, mg may be incomplete (but must still be non-nil). } -// requirements is the requirement graph for the main module. -// -// It is always non-nil if the main module's go.mod file has been loaded. -// -// This variable should only be read from the loadModFile function, and should -// only be written in the loadModFile and commitRequirements functions. -// All other functions that need or produce a *Requirements should -// accept and/or return an explicit parameter. -var requirements *Requirements - func mustHaveGoRoot(roots []module.Version) { for _, m := range roots { if m.Path == "go" { @@ -589,7 +579,7 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { if err != nil { return nil, err } - requirements = rs + LoaderState.requirements = rs return mg, nil } @@ -654,7 +644,7 @@ func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (chang if err != nil { return false, err } - requirements = rs + LoaderState.requirements = rs return changed, nil } diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 28f55a621d60de..1c8aa379d3cf78 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -59,7 +59,7 @@ var ( // EnterModule resets MainModules and requirements to refer to just this one module. func EnterModule(ctx context.Context, enterModroot string) { LoaderState.MainModules = nil // reset MainModules - requirements = nil + LoaderState.requirements = nil workFilePath = "" // Force module mode modfetch.Reset() @@ -90,7 +90,7 @@ func EnterWorkspace(ctx context.Context) (exit func(), err error) { // Update the content of the previous main module, and recompute the requirements. *LoaderState.MainModules.ModFile(mm) = *updatedmodfile - requirements = requirementsFromModFiles(ctx, LoaderState.MainModules.workFile, slices.Collect(maps.Values(LoaderState.MainModules.modFiles)), nil) + LoaderState.requirements = requirementsFromModFiles(ctx, LoaderState.MainModules.workFile, slices.Collect(maps.Values(LoaderState.MainModules.modFiles)), nil) return func() { setState(oldstate) @@ -395,7 +395,7 @@ func setState(s State) State { modRoots: LoaderState.modRoots, modulesEnabled: cfg.ModulesEnabled, MainModules: LoaderState.MainModules, - requirements: requirements, + requirements: LoaderState.requirements, } LoaderState.initialized = s.initialized LoaderState.ForceUseModules = s.ForceUseModules @@ -403,7 +403,7 @@ func setState(s State) State { LoaderState.modRoots = s.modRoots cfg.ModulesEnabled = s.modulesEnabled LoaderState.MainModules = s.MainModules - requirements = s.requirements + LoaderState.requirements = s.requirements workFilePath = s.workFilePath // The modfetch package's global state is used to compute // the go.sum file, so save and restore it along with the @@ -430,9 +430,20 @@ type State struct { modRoots []string modulesEnabled bool MainModules *MainModuleSet - requirements *Requirements - workFilePath string - modfetchState modfetch.State + + // requirements is the requirement graph for the main module. + // + // It is always non-nil if the main module's go.mod file has been + // loaded. + // + // This variable should only be read from the loadModFile + // function, and should only be written in the loadModFile and + // commitRequirements functions. All other functions that need or + // produce a *Requirements should accept and/or return an explicit + // parameter. + requirements *Requirements + workFilePath string + modfetchState modfetch.State } func NewState() *State { return &State{} } @@ -869,8 +880,8 @@ func LoadModFile(ctx context.Context) *Requirements { } func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) { - if requirements != nil { - return requirements, nil + if LoaderState.requirements != nil { + return LoaderState.requirements, nil } Init() @@ -939,14 +950,14 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } rawGoVersion.Store(mainModule, goVersion) - requirements = newRequirements(pruning, roots, direct) + LoaderState.requirements = newRequirements(pruning, roots, direct) if cfg.BuildMod == "vendor" { // For issue 56536: Some users may have GOFLAGS=-mod=vendor set. // Make sure it behaves as though the fake module is vendored // with no dependencies. - requirements.initVendor(nil) + LoaderState.requirements.initVendor(nil) } - return requirements, nil + return LoaderState.requirements, nil } var modFiles []*modfile.File @@ -1035,7 +1046,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) if inWorkspaceMode() { // We don't need to update the mod file so return early. - requirements = rs + LoaderState.requirements = rs return rs, nil } @@ -1081,8 +1092,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } - requirements = rs - return requirements, nil + LoaderState.requirements = rs + return LoaderState.requirements, nil } func errWorkTooOld(gomod string, wf *modfile.WorkFile, goVers string) error { @@ -1162,7 +1173,7 @@ func CreateModFile(ctx context.Context, modPath string) { if err != nil { base.Fatal(err) } - requirements = rs + LoaderState.requirements = rs if err := commitRequirements(ctx, WriteOpts{}); err != nil { base.Fatal(err) } @@ -1801,7 +1812,7 @@ type WriteOpts struct { // WriteGoMod writes the current build list back to go.mod. func WriteGoMod(ctx context.Context, opts WriteOpts) error { - requirements = LoadModFile(ctx) + LoaderState.requirements = LoadModFile(ctx) return commitRequirements(ctx, opts) } @@ -1828,7 +1839,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b var list []*modfile.Require toolchain := "" goVersion := "" - for _, m := range requirements.rootModules { + for _, m := range LoaderState.requirements.rootModules { if m.Path == "go" { goVersion = m.Version continue @@ -1839,7 +1850,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b } list = append(list, &modfile.Require{ Mod: m, - Indirect: !requirements.direct[m.Path], + Indirect: !LoaderState.requirements.direct[m.Path], }) } @@ -1913,7 +1924,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { if inWorkspaceMode() { // go.mod files aren't updated in workspace mode, but we still want to // update the go.work.sum file. - return modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + return modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, LoaderState.requirements, addBuildListZipSums), mustHaveCompleteRequirements()) } _, updatedGoMod, modFile, err := UpdateGoModFromReqs(ctx, opts) if err != nil { @@ -1937,7 +1948,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { // Don't write go.mod, but write go.sum in case we added or trimmed sums. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { - if err := modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()); err != nil { + if err := modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, LoaderState.requirements, addBuildListZipSums), mustHaveCompleteRequirements()); err != nil { return err } } @@ -1960,7 +1971,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { if err == nil { - err = modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + err = modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, LoaderState.requirements, addBuildListZipSums), mustHaveCompleteRequirements()) } } }() diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index b2d071dcf69723..b66e73a112cedd 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -109,7 +109,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st } if err == nil { - requirements = rs + LoaderState.requirements = rs // TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3 // where "go mod tidy" and "go list -m -u all" fight over whether the go.sum // should be considered up-to-date. The fix for now is to always treat the diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 54e1da902cfd00..413de8148fb79e 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -455,7 +455,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma if opts.TidyDiff { cfg.BuildMod = "readonly" loaded = ld - requirements = loaded.requirements + LoaderState.requirements = loaded.requirements currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(ctx, WriteOpts{}) if err != nil { base.Fatal(err) @@ -466,7 +466,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // Dropping compatibility for 1.16 may result in a strictly smaller go.sum. // Update the keep map with only the loaded.requirements. if gover.Compare(compatVersion, "1.16") > 0 { - keep = keepSums(ctx, loaded, requirements, addBuildListZipSums) + keep = keepSums(ctx, loaded, LoaderState.requirements, addBuildListZipSums) } currentGoSum, tidyGoSum := modfetch.TidyGoSum(keep) goSumDiff := diff.Diff("current/go.sum", currentGoSum, "tidy/go.sum", tidyGoSum) @@ -505,7 +505,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // to call WriteGoMod itself) or if ResolveMissingImports is false (the // command wants to examine the package graph as-is). loaded = ld - requirements = loaded.requirements + LoaderState.requirements = loaded.requirements for _, pkg := range ld.pkgs { if !pkg.isTest() { @@ -788,7 +788,7 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { return roots }, }) - requirements = loaded.requirements + LoaderState.requirements = loaded.requirements if !ExplicitWriteGoMod { if err := commitRequirements(ctx, WriteOpts{}); err != nil { From bb1ca7ae81ea8ca49a2773ace8ccff8fbc7f4dfd Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Fri, 15 Aug 2025 15:24:05 -0700 Subject: [PATCH 094/152] cmd/go, testing: add TB.ArtifactDir and -artifacts flag Add TB.ArtifactDir, which returns a directory for a test to store output files in. Add a -artifacts testflag which enables persistent storage of artifacts in the output directory (-outputdir, or the current directory by default). Fixes #71287 Change-Id: I5f6515a6cd6c103f88588f4c033d5ea11ffd0c3c Reviewed-on: https://go-review.googlesource.com/c/go/+/696399 LUCI-TryBot-Result: Go LUCI Reviewed-by: Alan Donovan --- api/next/71287.txt | 4 + doc/next/6-stdlib/99-minor/testing/71287.md | 18 ++ src/cmd/go/alldocs.go | 12 +- src/cmd/go/internal/load/test.go | 9 + src/cmd/go/internal/test/flagdefs.go | 1 + src/cmd/go/internal/test/test.go | 13 +- src/cmd/go/internal/test/testflag.go | 4 +- src/cmd/internal/test2json/test2json.go | 24 ++- src/testing/internal/testdeps/deps.go | 6 + src/testing/testing.go | 207 +++++++++++++++----- src/testing/testing_test.go | 106 +++++++++- 11 files changed, 333 insertions(+), 71 deletions(-) create mode 100644 api/next/71287.txt create mode 100644 doc/next/6-stdlib/99-minor/testing/71287.md diff --git a/api/next/71287.txt b/api/next/71287.txt new file mode 100644 index 00000000000000..c1e09a1f523082 --- /dev/null +++ b/api/next/71287.txt @@ -0,0 +1,4 @@ +pkg testing, method (*B) ArtifactDir() string #71287 +pkg testing, method (*F) ArtifactDir() string #71287 +pkg testing, method (*T) ArtifactDir() string #71287 +pkg testing, type TB interface, ArtifactDir() string #71287 diff --git a/doc/next/6-stdlib/99-minor/testing/71287.md b/doc/next/6-stdlib/99-minor/testing/71287.md new file mode 100644 index 00000000000000..82cac638101099 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/testing/71287.md @@ -0,0 +1,18 @@ +The new methods [T.ArtifactDir], [B.ArtifactDir], and [F.ArtifactDir] +return a directory in which to write test output files (artifacts). + +When the `-artifacts` flag is provided to `go test`, +this directory will be located under the output directory +(specified with `-outputdir`, or the current directory by default). +Otherwise, artifacts are stored in a temporary directory +which is removed after the test completes. + +The first call to `ArtifactDir` when `-artifacts` is provided +writes the location of the directory to the test log. + +For example, in a test named `TestArtifacts`, +`t.ArtifactDir()` emits: + +``` +=== ARTIFACTS Test /path/to/artifact/dir +``` diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 19b48f0579bb29..51f2223283b175 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -3244,6 +3244,10 @@ // The following flags are recognized by the 'go test' command and // control the execution of any test: // +// -artifacts +// Save test artifacts in the directory specified by -outputdir. +// See 'go doc testing.T.ArtifactDir'. +// // -bench regexp // Run only those benchmarks matching a regular expression. // By default, no benchmarks are run. @@ -3338,6 +3342,10 @@ // This will only list top-level tests. No subtest or subbenchmarks will be // shown. // +// -outputdir directory +// Place output files from profiling and test artifacts in the +// specified directory, by default the directory in which "go test" is running. +// // -parallel n // Allow parallel execution of test functions that call t.Parallel, and // fuzz targets that call t.Parallel when running the seed corpus. @@ -3449,10 +3457,6 @@ // Sample 1 in n stack traces of goroutines holding a // contended mutex. // -// -outputdir directory -// Place output files from profiling in the specified directory, -// by default the directory in which "go test" is running. -// // -trace trace.out // Write an execution trace to the specified file before exiting. // diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go index f895e3a2461d9e..9849ee138a5781 100644 --- a/src/cmd/go/internal/load/test.go +++ b/src/cmd/go/internal/load/test.go @@ -649,6 +649,14 @@ func (t *testFuncs) ImportPath() string { return pkg } +func (t *testFuncs) ModulePath() string { + m := t.Package.Module + if m == nil { + return "" + } + return m.Path +} + // Covered returns a string describing which packages are being tested for coverage. // If the covered package is the same as the tested package, it returns the empty string. // Otherwise it is a comma-separated human-readable list of packages beginning with @@ -836,6 +844,7 @@ func init() { testdeps.CoverMarkProfileEmittedFunc = cfile.MarkProfileEmitted {{end}} + testdeps.ModulePath = {{.ModulePath | printf "%q"}} testdeps.ImportPath = {{.ImportPath | printf "%q"}} } diff --git a/src/cmd/go/internal/test/flagdefs.go b/src/cmd/go/internal/test/flagdefs.go index 8aa0bfc2bf3120..b8b4bf649e42e7 100644 --- a/src/cmd/go/internal/test/flagdefs.go +++ b/src/cmd/go/internal/test/flagdefs.go @@ -9,6 +9,7 @@ package test // passFlagToTest contains the flags that should be forwarded to // the test binary with the prefix "test.". var passFlagToTest = map[string]bool{ + "artifacts": true, "bench": true, "benchmem": true, "benchtime": true, diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 7a2963ff29b06e..15ffc618c65dab 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -192,6 +192,10 @@ and -show_bytes options of pprof control how the information is presented. The following flags are recognized by the 'go test' command and control the execution of any test: + -artifacts + Save test artifacts in the directory specified by -outputdir. + See 'go doc testing.T.ArtifactDir'. + -bench regexp Run only those benchmarks matching a regular expression. By default, no benchmarks are run. @@ -286,6 +290,10 @@ control the execution of any test: This will only list top-level tests. No subtest or subbenchmarks will be shown. + -outputdir directory + Place output files from profiling and test artifacts in the + specified directory, by default the directory in which "go test" is running. + -parallel n Allow parallel execution of test functions that call t.Parallel, and fuzz targets that call t.Parallel when running the seed corpus. @@ -397,10 +405,6 @@ profile the tests during execution: Sample 1 in n stack traces of goroutines holding a contended mutex. - -outputdir directory - Place output files from profiling in the specified directory, - by default the directory in which "go test" is running. - -trace trace.out Write an execution trace to the specified file before exiting. @@ -540,6 +544,7 @@ See the documentation of the testing package for more information. } var ( + testArtifacts bool // -artifacts flag testBench string // -bench flag testC bool // -c flag testCoverPkgs []*load.Package // -coverpkg flag diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index 983e8f56e9af09..fc2b22cb56a9ee 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -44,6 +44,7 @@ func init() { // some of them so that cmd/go knows what to do with the test output, or knows // to build the test in a way that supports the use of the flag. + cf.BoolVar(&testArtifacts, "artifacts", false, "") cf.StringVar(&testBench, "bench", "", "") cf.Bool("benchmem", false, "") cf.String("benchtime", "", "") @@ -392,7 +393,8 @@ func testFlags(args []string) (packageNames, passToTest []string) { // directory, but 'go test' defaults it to the working directory of the 'go' // command. Set it explicitly if it is needed due to some other flag that // requests output. - if testProfile() != "" && !outputDirSet { + needOutputDir := testProfile() != "" || testArtifacts + if needOutputDir && !outputDirSet { injectedFlags = append(injectedFlags, "-test.outputdir="+testOutputDir.getAbs()) } diff --git a/src/cmd/internal/test2json/test2json.go b/src/cmd/internal/test2json/test2json.go index d08ef389f82a21..f28051e1771db8 100644 --- a/src/cmd/internal/test2json/test2json.go +++ b/src/cmd/internal/test2json/test2json.go @@ -38,6 +38,7 @@ type event struct { FailedBuild string `json:",omitempty"` Key string `json:",omitempty"` Value string `json:",omitempty"` + Path string `json:",omitempty"` } // textBytes is a hack to get JSON to emit a []byte as a string @@ -180,6 +181,7 @@ var ( []byte("=== FAIL "), []byte("=== SKIP "), []byte("=== ATTR "), + []byte("=== ARTIFACTS "), } reports = [][]byte{ @@ -251,7 +253,6 @@ func (c *Converter) handleInputLine(line []byte) { // "=== RUN " // "=== PAUSE " // "=== CONT " - actionColon := false origLine := line ok := false indent := 0 @@ -273,7 +274,6 @@ func (c *Converter) handleInputLine(line []byte) { } for _, magic := range reports { if bytes.HasPrefix(line, magic) { - actionColon = true ok = true break } @@ -296,16 +296,11 @@ func (c *Converter) handleInputLine(line []byte) { return } - // Parse out action and test name. - i := 0 - if actionColon { - i = bytes.IndexByte(line, ':') + 1 - } - if i == 0 { - i = len(updates[0]) - } - action := strings.ToLower(strings.TrimSuffix(strings.TrimSpace(string(line[4:i])), ":")) - name := strings.TrimSpace(string(line[i:])) + // Parse out action and test name from "=== ACTION: Name". + action, name, _ := strings.Cut(string(line[len("=== "):]), " ") + action = strings.TrimSuffix(action, ":") + action = strings.ToLower(action) + name = strings.TrimSpace(name) e := &event{Action: action} if line[0] == '-' { // PASS or FAIL report @@ -336,7 +331,10 @@ func (c *Converter) handleInputLine(line []byte) { c.output.write(origLine) return } - if action == "attr" { + switch action { + case "artifacts": + name, e.Path, _ = strings.Cut(name, " ") + case "attr": var rest string name, rest, _ = strings.Cut(name, " ") e.Key, e.Value, _ = strings.Cut(rest, " ") diff --git a/src/testing/internal/testdeps/deps.go b/src/testing/internal/testdeps/deps.go index 6f42d4722ca00c..5ab377daeb61ed 100644 --- a/src/testing/internal/testdeps/deps.go +++ b/src/testing/internal/testdeps/deps.go @@ -66,6 +66,12 @@ func (TestDeps) ImportPath() string { return ImportPath } +var ModulePath string + +func (TestDeps) ModulePath() string { + return ModulePath +} + // testLog implements testlog.Interface, logging actions by package os. type testLog struct { mu sync.Mutex diff --git a/src/testing/testing.go b/src/testing/testing.go index 3f76446549364a..0d1d08ca89a5e6 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -420,7 +420,6 @@ import ( "sync/atomic" "time" "unicode" - "unicode/utf8" _ "unsafe" // for linkname ) @@ -456,6 +455,7 @@ func Init() { // this flag lets "go test" tell the binary to write the files in the directory where // the "go test" command is run. outputDir = flag.String("test.outputdir", "", "write profiles to `dir`") + artifacts = flag.Bool("test.artifacts", false, "store test artifacts in test.,outputdir") // Report as tests are run; default is silent for success. flag.Var(&chatty, "test.v", "verbose: print additional output") count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times") @@ -489,6 +489,7 @@ var ( short *bool failFast *bool outputDir *string + artifacts *bool chatty chattyFlag count *uint coverProfile *string @@ -516,6 +517,7 @@ var ( cpuList []int testlogFile *os.File + artifactDir string numFailed atomic.Uint32 // number of test failures @@ -653,15 +655,17 @@ type common struct { runner string // Function name of tRunner running the test. isParallel bool // Whether the test is parallel. - parent *common - level int // Nesting depth of test or benchmark. - creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run. - name string // Name of test or benchmark. - start highPrecisionTime // Time test or benchmark started - duration time.Duration - barrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing). - signal chan bool // To signal a test is done. - sub []*T // Queue of subtests to be run in parallel. + parent *common + level int // Nesting depth of test or benchmark. + creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run. + modulePath string + importPath string + name string // Name of test or benchmark. + start highPrecisionTime // Time test or benchmark started + duration time.Duration + barrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing). + signal chan bool // To signal a test is done. + sub []*T // Queue of subtests to be run in parallel. lastRaceErrors atomic.Int64 // Max value of race.Errors seen during the test or its subtests. raceErrorLogged atomic.Bool @@ -671,6 +675,10 @@ type common struct { tempDirErr error tempDirSeq int32 + artifactDirOnce sync.Once + artifactDir string + artifactDirErr error + ctx context.Context cancelCtx context.CancelFunc } @@ -879,6 +887,7 @@ func fmtDuration(d time.Duration) string { // TB is the interface common to [T], [B], and [F]. type TB interface { + ArtifactDir() string Attr(key, value string) Cleanup(func()) Error(args ...any) @@ -1313,6 +1322,96 @@ func (c *common) Cleanup(f func()) { c.cleanups = append(c.cleanups, fn) } +// ArtifactDir returns a directory in which the test should store output files. +// When the -artifacts flag is provided, this directory is located +// under the output directory. Otherwise, ArtifactDir returns a temporary directory +// that is removed after the test completes. +// +// Each test or subtest within each test package has a unique artifact directory. +// Repeated calls to ArtifactDir in the same test or subtest return the same directory. +// Subtest outputs are not located under the parent test's output directory. +func (c *common) ArtifactDir() string { + c.checkFuzzFn("ArtifactDir") + c.artifactDirOnce.Do(func() { + c.artifactDir, c.artifactDirErr = c.makeArtifactDir() + }) + if c.artifactDirErr != nil { + c.Fatalf("ArtifactDir: %v", c.artifactDirErr) + } + return c.artifactDir +} + +func hashString(s string) (h uint64) { + // FNV, used here to avoid a dependency on maphash. + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= 1099511628211 + } + return +} + +// makeArtifactDir creates the artifact directory for a test. +// The artifact directory is: +// +// /_artifacts/// +// +// The test package is the package import path with the module name prefix removed. +// The test name is truncated if too long. +// Special characters are removed from the path. +func (c *common) makeArtifactDir() (string, error) { + if !*artifacts { + return c.makeTempDir() + } + + // If the test name is longer than maxNameSize, truncate it and replace the last + // hashSize bytes with a hash of the full name. + const maxNameSize = 64 + name := strings.ReplaceAll(c.name, "/", "__") + if len(name) > maxNameSize { + h := fmt.Sprintf("%0x", hashString(name)) + name = name[:maxNameSize-len(h)] + h + } + + // Remove the module path prefix from the import path. + pkg := strings.TrimPrefix(c.importPath, c.modulePath+"/") + + // Join with /, not filepath.Join: the import path is /-separated, + // and we don't want removeSymbolsExcept to strip \ separators on Windows. + base := "/" + pkg + "/" + name + base = removeSymbolsExcept(base, "!#$%&()+,-.=@^_{}~ /") + base, err := filepath.Localize(base) + if err != nil { + // This name can't be safely converted into a local filepath. + // Drop it and just use _artifacts/. + base = "" + } + + artifactBase := filepath.Join(artifactDir, base) + if err := os.MkdirAll(artifactBase, 0o777); err != nil { + return "", err + } + dir, err := os.MkdirTemp(artifactBase, "") + if err != nil { + return "", err + } + if c.chatty != nil { + c.chatty.Updatef(c.name, "=== ARTIFACTS %s %v\n", c.name, dir) + } + return dir, nil +} + +func removeSymbolsExcept(s, allowed string) string { + mapper := func(r rune) rune { + if unicode.IsLetter(r) || + unicode.IsNumber(r) || + strings.ContainsRune(allowed, r) { + return r + } + return -1 // disallowed symbol + } + return strings.Map(mapper, s) +} + // TempDir returns a temporary directory for the test to use. // The directory is automatically removed when the test and // all its subtests complete. @@ -1322,6 +1421,14 @@ func (c *common) Cleanup(f func()) { // be created somewhere beneath it. func (c *common) TempDir() string { c.checkFuzzFn("TempDir") + dir, err := c.makeTempDir() + if err != nil { + c.Fatalf("TempDir: %v", err) + } + return dir +} + +func (c *common) makeTempDir() (string, error) { // Use a single parent directory for all the temporary directories // created by a test, each numbered sequentially. c.tempDirMu.Lock() @@ -1332,7 +1439,7 @@ func (c *common) TempDir() string { _, err := os.Stat(c.tempDir) nonExistent = os.IsNotExist(err) if err != nil && !nonExistent { - c.Fatalf("TempDir: %v", err) + return "", err } } @@ -1347,23 +1454,9 @@ func (c *common) TempDir() string { // Drop unusual characters (such as path separators or // characters interacting with globs) from the directory name to // avoid surprising os.MkdirTemp behavior. - mapper := func(r rune) rune { - if r < utf8.RuneSelf { - const allowed = "!#$%&()+,-.=@^_{}~ " - if '0' <= r && r <= '9' || - 'a' <= r && r <= 'z' || - 'A' <= r && r <= 'Z' { - return r - } - if strings.ContainsRune(allowed, r) { - return r - } - } else if unicode.IsLetter(r) || unicode.IsNumber(r) { - return r - } - return -1 - } - pattern = strings.Map(mapper, pattern) + const allowed = "!#$%&()+,-.=@^_{}~ " + pattern = removeSymbolsExcept(pattern, allowed) + c.tempDir, c.tempDirErr = os.MkdirTemp(os.Getenv("GOTMPDIR"), pattern) if c.tempDirErr == nil { c.Cleanup(func() { @@ -1381,14 +1474,14 @@ func (c *common) TempDir() string { c.tempDirMu.Unlock() if c.tempDirErr != nil { - c.Fatalf("TempDir: %v", c.tempDirErr) + return "", c.tempDirErr } dir := fmt.Sprintf("%s%c%03d", c.tempDir, os.PathSeparator, seq) if err := os.Mkdir(dir, 0o777); err != nil { - c.Fatalf("TempDir: %v", err) + return "", err } - return dir + return dir, nil } // removeAll is like os.RemoveAll, but retries Windows "Access is denied." @@ -1971,15 +2064,17 @@ func (t *T) Run(name string, f func(t *T)) bool { ctx, cancelCtx := context.WithCancel(context.Background()) t = &T{ common: common{ - barrier: make(chan bool), - signal: make(chan bool, 1), - name: testName, - parent: &t.common, - level: t.level + 1, - creator: pc[:n], - chatty: t.chatty, - ctx: ctx, - cancelCtx: cancelCtx, + barrier: make(chan bool), + signal: make(chan bool, 1), + name: testName, + modulePath: t.modulePath, + importPath: t.importPath, + parent: &t.common, + level: t.level + 1, + creator: pc[:n], + chatty: t.chatty, + ctx: ctx, + cancelCtx: cancelCtx, }, tstate: t.tstate, } @@ -2140,6 +2235,7 @@ func (f matchStringOnly) MatchString(pat, str string) (bool, error) { return f func (f matchStringOnly) StartCPUProfile(w io.Writer) error { return errMain } func (f matchStringOnly) StopCPUProfile() {} func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain } +func (f matchStringOnly) ModulePath() string { return "" } func (f matchStringOnly) ImportPath() string { return "" } func (f matchStringOnly) StartTestLog(io.Writer) {} func (f matchStringOnly) StopTestLog() error { return errMain } @@ -2193,6 +2289,7 @@ type M struct { // testing/internal/testdeps's TestDeps. type testDeps interface { ImportPath() string + ModulePath() string MatchString(pat, str string) (bool, error) SetPanicOnExit0(bool) StartCPUProfile(io.Writer) error @@ -2336,7 +2433,7 @@ func (m *M) Run() (code int) { if !*isFuzzWorker { deadline := m.startAlarm() haveExamples = len(m.examples) > 0 - testRan, testOk := runTests(m.deps.MatchString, m.tests, deadline) + testRan, testOk := runTests(m.deps.ModulePath(), m.deps.ImportPath(), m.deps.MatchString, m.tests, deadline) fuzzTargetsRan, fuzzTargetsOk := runFuzzTests(m.deps, m.fuzzTargets, deadline) exampleRan, exampleOk := runExamples(m.deps.MatchString, m.examples) m.stopAlarm() @@ -2437,14 +2534,14 @@ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalT if *timeout > 0 { deadline = time.Now().Add(*timeout) } - ran, ok := runTests(matchString, tests, deadline) + ran, ok := runTests("", "", matchString, tests, deadline) if !ran && !haveExamples { fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") } return ok } -func runTests(matchString func(pat, str string) (bool, error), tests []InternalTest, deadline time.Time) (ran, ok bool) { +func runTests(modulePath, importPath string, matchString func(pat, str string) (bool, error), tests []InternalTest, deadline time.Time) (ran, ok bool) { ok = true for _, procs := range cpuList { runtime.GOMAXPROCS(procs) @@ -2463,11 +2560,13 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT tstate.deadline = deadline t := &T{ common: common{ - signal: make(chan bool, 1), - barrier: make(chan bool), - w: os.Stdout, - ctx: ctx, - cancelCtx: cancelCtx, + signal: make(chan bool, 1), + barrier: make(chan bool), + w: os.Stdout, + ctx: ctx, + cancelCtx: cancelCtx, + modulePath: modulePath, + importPath: importPath, }, tstate: tstate, } @@ -2536,6 +2635,18 @@ func (m *M) before() { fmt.Fprintf(os.Stderr, "testing: cannot use -test.gocoverdir because test binary was not built with coverage enabled\n") os.Exit(2) } + if *artifacts { + var err error + artifactDir, err = filepath.Abs(toOutputDir("_artifacts")) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: cannot make -test.outputdir absolute: %v\n", err) + os.Exit(2) + } + if err := os.Mkdir(artifactDir, 0o777); err != nil && !errors.Is(err, os.ErrExist) { + fmt.Fprintf(os.Stderr, "testing: %v\n", err) + os.Exit(2) + } + } if *testlog != "" { // Note: Not using toOutputDir. // This file is for use by cmd/go, not users. diff --git a/src/testing/testing_test.go b/src/testing/testing_test.go index cc89e4144e6a78..167f4a0b457635 100644 --- a/src/testing/testing_test.go +++ b/src/testing/testing_test.go @@ -469,7 +469,7 @@ func TestTesting(t *testing.T) { // runTest runs a helper test with -test.v, ignoring its exit status. // runTest both logs and returns the test output. -func runTest(t *testing.T, test string) []byte { +func runTest(t *testing.T, test string, args ...string) []byte { t.Helper() testenv.MustHaveExec(t) @@ -477,6 +477,7 @@ func runTest(t *testing.T, test string) []byte { cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x") cmd = testenv.CleanCmdEnv(cmd) cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + cmd.Args = append(cmd.Args, args...) out, err := cmd.CombinedOutput() t.Logf("%v: %v\n%s", cmd, err, out) @@ -1055,6 +1056,105 @@ func TestAttrInvalid(t *testing.T) { } } +const artifactContent = "It belongs in a museum.\n" + +func TestArtifactDirExample(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) +} + +func TestArtifactDirDefault(t *testing.T) { + tempDir := t.TempDir() + t.Chdir(tempDir) + out := runTest(t, "TestArtifactDirExample", "-test.artifacts") + checkArtifactDir(t, out, "TestArtifactDirExample", tempDir) +} + +func TestArtifactDirSpecified(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirExample", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, "TestArtifactDirExample", tempDir) +} + +func TestArtifactDirNoArtifacts(t *testing.T) { + t.Chdir(t.TempDir()) + out := string(runTest(t, "TestArtifactDirExample")) + if strings.Contains(out, "=== ARTIFACTS") { + t.Errorf("expected output with no === ARTIFACTS, got\n%q", out) + } + ents, err := os.ReadDir(".") + if err != nil { + t.Fatal(err) + } + for _, e := range ents { + t.Errorf("unexpected file in current directory after test: %v", e.Name()) + } +} + +func TestArtifactDirSubtestExample(t *testing.T) { + t.Run("Subtest", func(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) + }) +} + +func TestArtifactDirInSubtest(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirSubtestExample/Subtest", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, "TestArtifactDirSubtestExample/Subtest", tempDir) +} + +func TestArtifactDirLongTestNameExample(t *testing.T) { + name := strings.Repeat("x", 256) + t.Run(name, func(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) + }) +} + +func TestArtifactDirWithLongTestName(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirLongTestNameExample", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, `TestArtifactDirLongTestNameExample/\w+`, tempDir) +} + +func TestArtifactDirConsistent(t *testing.T) { + a := t.ArtifactDir() + b := t.ArtifactDir() + if a != b { + t.Errorf("t.ArtifactDir is not consistent between calls: %q, %q", a, b) + } +} + +func checkArtifactDir(t *testing.T, out []byte, testName, outputDir string) { + t.Helper() + + re := regexp.MustCompile(`=== ARTIFACTS ` + testName + ` ([^\n]+)`) + match := re.FindSubmatch(out) + if match == nil { + t.Fatalf("expected output matching %q, got\n%q", re, out) + } + artifactDir := string(match[1]) + + // Verify that the artifact directory is contained in the expected output directory. + relDir, err := filepath.Rel(outputDir, artifactDir) + if err != nil { + t.Fatal(err) + } + if !filepath.IsLocal(relDir) { + t.Fatalf("want artifact directory contained in %q, got %q", outputDir, artifactDir) + } + + for _, part := range strings.Split(relDir, string(os.PathSeparator)) { + const maxSize = 64 + if len(part) > maxSize { + t.Errorf("artifact directory %q contains component >%v characters long: %q", relDir, maxSize, part) + } + } + + got, err := os.ReadFile(filepath.Join(artifactDir, "artifact")) + if err != nil || string(got) != artifactContent { + t.Errorf("reading artifact in %q: got %q, %v; want %q", artifactDir, got, err, artifactContent) + } +} + func TestBenchmarkBLoopIterationCorrect(t *testing.T) { out := runTest(t, "BenchmarkBLoopPrint") c := bytes.Count(out, []byte("Printing from BenchmarkBLoopPrint")) @@ -1110,3 +1210,7 @@ func BenchmarkBNPrint(b *testing.B) { b.Logf("Printing from BenchmarkBNPrint") } } + +func TestArtifactDir(t *testing.T) { + t.Log(t.ArtifactDir()) +} From 78b43037dc20b9f5d624260b50e15bfa8956e4d5 Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 21:21:41 -0400 Subject: [PATCH 095/152] cmd/go: refactor usage of `workFilePath` This commit refactors usage of the global variable `workFilePath` to the global LoaderState field of the same name. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/modload rf 'ex { workFilePath -> LoaderState.workFilePath }' rf 'add State.requirements \ // Set to the path to the go.work file, or "" if workspace mode is\ // disabled' rf 'rm workFilePath' Change-Id: I53cdbc3cc619914421513db74a74a04ab10b3e33 Reviewed-on: https://go-review.googlesource.com/c/go/+/698062 Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI --- src/cmd/go/internal/modload/buildlist.go | 4 +-- src/cmd/go/internal/modload/init.go | 31 +++++++++++------------- src/cmd/go/internal/modload/modfile.go | 2 +- 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index b73cb43d0dc3cf..086626042c95b7 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -108,13 +108,13 @@ func newRequirements(pruning modPruning, rootModules []module.Version, direct ma mustHaveGoRoot(rootModules) if pruning != workspace { - if workFilePath != "" { + if LoaderState.workFilePath != "" { panic("in workspace mode, but pruning is not workspace in newRequirements") } } if pruning != workspace { - if workFilePath != "" { + if LoaderState.workFilePath != "" { panic("in workspace mode, but pruning is not workspace in newRequirements") } for i, m := range rootModules { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 1c8aa379d3cf78..31fe6327735736 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -60,7 +60,7 @@ var ( func EnterModule(ctx context.Context, enterModroot string) { LoaderState.MainModules = nil // reset MainModules LoaderState.requirements = nil - workFilePath = "" // Force module mode + LoaderState.workFilePath = "" // Force module mode modfetch.Reset() LoaderState.modRoots = []string{enterModroot} @@ -97,12 +97,6 @@ func EnterWorkspace(ctx context.Context) (exit func(), err error) { }, nil } -// Variable set in InitWorkfile -var ( - // Set to the path to the go.work file, or "" if workspace mode is disabled. - workFilePath string -) - type MainModuleSet struct { // versions are the module.Version values of each of the main modules. // For each of them, the Path fields are ordinary module paths and the Version @@ -349,7 +343,7 @@ func InitWorkfile() { if err := fsys.Init(); err != nil { base.Fatal(err) } - workFilePath = FindGoWork(base.Cwd()) + LoaderState.workFilePath = FindGoWork(base.Cwd()) } // FindGoWork returns the name of the go.work file for this command, @@ -378,7 +372,7 @@ func FindGoWork(wd string) string { // WorkFilePath returns the absolute path of the go.work file, or "" if not in // workspace mode. WorkFilePath must be called after InitWorkfile. func WorkFilePath() string { - return workFilePath + return LoaderState.workFilePath } // Reset clears all the initialized, cached state about the use of modules, @@ -404,7 +398,7 @@ func setState(s State) State { cfg.ModulesEnabled = s.modulesEnabled LoaderState.MainModules = s.MainModules LoaderState.requirements = s.requirements - workFilePath = s.workFilePath + LoaderState.workFilePath = s.workFilePath // The modfetch package's global state is used to compute // the go.sum file, so save and restore it along with the // modload state. @@ -441,7 +435,10 @@ type State struct { // commitRequirements functions. All other functions that need or // produce a *Requirements should accept and/or return an explicit // parameter. - requirements *Requirements + requirements *Requirements + + // Set to the path to the go.work file, or "" if workspace mode is + // disabled workFilePath string modfetchState modfetch.State } @@ -507,7 +504,7 @@ func Init() { base.Fatalf("go: -modfile cannot be used with commands that ignore the current module") } LoaderState.modRoots = nil - } else if workFilePath != "" { + } else if LoaderState.workFilePath != "" { // We're in workspace mode, which implies module mode. if cfg.ModFile != "" { base.Fatalf("go: -modfile cannot be used in workspace mode") @@ -651,7 +648,7 @@ func inWorkspaceMode() bool { if !Enabled() { return false } - return workFilePath != "" + return LoaderState.workFilePath != "" } // HasModRoot reports whether a main module or main modules are present. @@ -888,7 +885,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) var workFile *modfile.WorkFile if inWorkspaceMode() { var err error - workFile, LoaderState.modRoots, err = LoadWorkFile(workFilePath) + workFile, LoaderState.modRoots, err = LoadWorkFile(LoaderState.workFilePath) if err != nil { return nil, err } @@ -896,7 +893,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum" modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile) } - modfetch.GoSumFile = workFilePath + ".sum" + modfetch.GoSumFile = LoaderState.workFilePath + ".sum" } else if len(LoaderState.modRoots) == 0 { // We're in module mode, but not inside a module. // @@ -1542,8 +1539,8 @@ func setDefaultBuildMod() { } } vendorDir := "" - if workFilePath != "" { - vendorDir = filepath.Join(filepath.Dir(workFilePath), "vendor") + if LoaderState.workFilePath != "" { + vendorDir = filepath.Join(filepath.Dir(LoaderState.workFilePath), "vendor") } else { if len(LoaderState.modRoots) != 1 { panic(fmt.Errorf("outside workspace mode, but have %v modRoots", LoaderState.modRoots)) diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 6b432ed9637ec5..fa2348d97baf25 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -356,7 +356,7 @@ func replacementFrom(mod module.Version) (r module.Version, modroot string, from return module.Version{}, "", "" } if _, r, ok := replacement(mod, LoaderState.MainModules.WorkFileReplaceMap()); ok { - return r, "", workFilePath + return r, "", LoaderState.workFilePath } for _, v := range LoaderState.MainModules.Versions() { if index := LoaderState.MainModules.Index(v); index != nil { From 97fd6bdeccf8c59f07dadbff8f614ea4169f01b1 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Mon, 18 Aug 2025 22:51:36 +0100 Subject: [PATCH 096/152] cmd/compile: fuse NaN checks with other comparisons MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NaN checks can often be merged into other comparisons by inverting them. For example, `math.IsNaN(x) || x > 0` is equivalent to `!(x <= 0)`. goos: linux goarch: amd64 pkg: math cpu: 12th Gen Intel(R) Core(TM) i7-12700T │ sec/op │ sec/op vs base │ Acos 4.315n ± 0% 4.314n ± 0% ~ (p=0.642 n=10) Acosh 8.398n ± 0% 7.779n ± 0% -7.37% (p=0.000 n=10) Asin 4.203n ± 0% 4.211n ± 0% +0.20% (p=0.001 n=10) Asinh 10.150n ± 0% 9.562n ± 0% -5.79% (p=0.000 n=10) Atan 2.363n ± 0% 2.363n ± 0% ~ (p=0.801 n=10) Atanh 8.192n ± 2% 7.685n ± 0% -6.20% (p=0.000 n=10) Atan2 4.013n ± 0% 4.010n ± 0% ~ (p=0.073 n=10) Cbrt 4.858n ± 0% 4.755n ± 0% -2.12% (p=0.000 n=10) Cos 4.596n ± 0% 4.357n ± 0% -5.20% (p=0.000 n=10) Cosh 5.071n ± 0% 5.071n ± 0% ~ (p=0.585 n=10) Erf 2.802n ± 1% 2.788n ± 0% -0.54% (p=0.002 n=10) Erfc 3.087n ± 1% 3.071n ± 0% ~ (p=0.320 n=10) Erfinv 3.981n ± 0% 3.965n ± 0% -0.41% (p=0.000 n=10) Erfcinv 3.985n ± 0% 3.977n ± 0% -0.20% (p=0.000 n=10) ExpGo 8.721n ± 2% 8.252n ± 0% -5.38% (p=0.000 n=10) Expm1 4.378n ± 0% 4.228n ± 0% -3.43% (p=0.000 n=10) Exp2 8.313n ± 0% 7.855n ± 0% -5.52% (p=0.000 n=10) Exp2Go 8.498n ± 2% 7.921n ± 0% -6.79% (p=0.000 n=10) Mod 15.16n ± 4% 12.20n ± 1% -19.58% (p=0.000 n=10) Frexp 1.780n ± 2% 1.496n ± 0% -15.96% (p=0.000 n=10) Gamma 4.378n ± 1% 4.013n ± 0% -8.35% (p=0.000 n=10) HypotGo 2.655n ± 5% 2.427n ± 1% -8.57% (p=0.000 n=10) Ilogb 1.912n ± 5% 1.749n ± 0% -8.53% (p=0.000 n=10) J0 22.43n ± 9% 20.46n ± 0% -8.76% (p=0.000 n=10) J1 21.03n ± 4% 19.96n ± 0% -5.09% (p=0.000 n=10) Jn 45.40n ± 1% 42.59n ± 0% -6.20% (p=0.000 n=10) Ldexp 2.312n ± 1% 1.944n ± 0% -15.94% (p=0.000 n=10) Lgamma 4.617n ± 1% 4.584n ± 0% -0.73% (p=0.000 n=10) Log 4.226n ± 0% 4.213n ± 0% -0.31% (p=0.001 n=10) Logb 1.771n ± 0% 1.775n ± 0% ~ (p=0.097 n=10) Log1p 5.102n ± 2% 5.001n ± 0% -1.97% (p=0.000 n=10) Log10 4.407n ± 0% 4.408n ± 0% ~ (p=1.000 n=10) Log2 2.416n ± 1% 2.138n ± 0% -11.51% (p=0.000 n=10) Modf 1.669n ± 2% 1.611n ± 0% -3.50% (p=0.000 n=10) Nextafter32 2.186n ± 0% 2.185n ± 0% ~ (p=0.051 n=10) Nextafter64 2.182n ± 0% 2.184n ± 0% +0.09% (p=0.016 n=10) PowInt 11.39n ± 6% 10.68n ± 2% -6.24% (p=0.000 n=10) PowFrac 26.60n ± 2% 26.12n ± 0% -1.80% (p=0.000 n=10) Pow10Pos 0.5067n ± 4% 0.5003n ± 1% -1.27% (p=0.001 n=10) Pow10Neg 0.8552n ± 0% 0.8552n ± 0% ~ (p=0.928 n=10) Round 1.181n ± 0% 1.182n ± 0% +0.08% (p=0.001 n=10) RoundToEven 1.709n ± 0% 1.710n ± 0% ~ (p=0.053 n=10) Remainder 12.54n ± 5% 11.99n ± 2% -4.46% (p=0.000 n=10) Sin 3.933n ± 5% 3.926n ± 0% -0.17% (p=0.000 n=10) Sincos 5.672n ± 0% 5.522n ± 0% -2.65% (p=0.000 n=10) Sinh 5.447n ± 1% 5.444n ± 0% -0.06% (p=0.029 n=10) Tan 4.061n ± 0% 4.058n ± 0% -0.07% (p=0.005 n=10) Tanh 5.599n ± 0% 5.595n ± 0% -0.06% (p=0.042 n=10) Y0 20.75n ± 5% 19.73n ± 1% -4.92% (p=0.000 n=10) Y1 20.87n ± 2% 19.78n ± 1% -5.20% (p=0.000 n=10) Yn 44.50n ± 2% 42.04n ± 2% -5.53% (p=0.000 n=10) geomean 4.989n 4.791n -3.96% goos: linux goarch: riscv64 pkg: math cpu: Spacemit(R) X60 │ sec/op │ sec/op vs base │ Acos 159.9n ± 0% 159.9n ± 0% ~ (p=0.269 n=10) Acosh 244.7n ± 0% 235.0n ± 0% -3.98% (p=0.000 n=10) Asin 159.9n ± 0% 159.9n ± 0% ~ (p=0.154 n=10) Asinh 270.8n ± 0% 261.1n ± 0% -3.60% (p=0.000 n=10) Atan 119.1n ± 0% 119.1n ± 0% ~ (p=0.347 n=10) Atanh 260.2n ± 0% 261.8n ± 4% ~ (p=0.459 n=10) Atan2 186.8n ± 0% 186.8n ± 0% ~ (p=0.487 n=10) Cbrt 203.5n ± 0% 198.2n ± 0% -2.60% (p=0.000 n=10) Ceil 31.82n ± 0% 31.81n ± 0% ~ (p=0.714 n=10) Copysign 4.894n ± 0% 4.893n ± 0% ~ (p=0.161 n=10) Cos 107.6n ± 0% 103.6n ± 0% -3.76% (p=0.000 n=10) Cosh 259.0n ± 0% 252.8n ± 0% -2.39% (p=0.000 n=10) Erf 133.7n ± 0% 133.7n ± 0% ~ (p=0.720 n=10) Erfc 137.9n ± 0% 137.8n ± 0% -0.04% (p=0.033 n=10) Erfinv 173.7n ± 0% 168.8n ± 0% -2.82% (p=0.000 n=10) Erfcinv 173.7n ± 0% 168.8n ± 0% -2.82% (p=0.000 n=10) Exp 215.3n ± 0% 208.1n ± 0% -3.34% (p=0.000 n=10) ExpGo 226.7n ± 0% 220.6n ± 0% -2.69% (p=0.000 n=10) Expm1 164.8n ± 0% 159.0n ± 0% -3.52% (p=0.000 n=10) Exp2 185.0n ± 0% 182.7n ± 0% -1.22% (p=0.000 n=10) Exp2Go 198.9n ± 0% 196.5n ± 0% -1.21% (p=0.000 n=10) Abs 4.894n ± 0% 4.893n ± 0% ~ (p=0.262 n=10) Dim 16.31n ± 0% 16.31n ± 0% ~ (p=1.000 n=10) Floor 31.81n ± 0% 31.81n ± 0% ~ (p=0.067 n=10) Max 26.11n ± 0% 26.10n ± 0% ~ (p=0.080 n=10) Min 26.10n ± 0% 26.10n ± 0% ~ (p=0.095 n=10) Mod 337.7n ± 0% 291.9n ± 0% -13.56% (p=0.000 n=10) Frexp 50.57n ± 0% 42.41n ± 0% -16.13% (p=0.000 n=10) Gamma 206.3n ± 0% 198.1n ± 0% -4.00% (p=0.000 n=10) Hypot 94.62n ± 0% 94.61n ± 0% ~ (p=0.437 n=10) HypotGo 109.3n ± 0% 109.3n ± 0% ~ (p=1.000 n=10) Ilogb 44.05n ± 0% 44.04n ± 0% -0.02% (p=0.025 n=10) J0 663.1n ± 0% 663.9n ± 0% +0.13% (p=0.002 n=10) J1 663.9n ± 0% 666.4n ± 0% +0.38% (p=0.000 n=10) Jn 1.404µ ± 0% 1.407µ ± 0% +0.21% (p=0.000 n=10) Ldexp 57.10n ± 0% 48.93n ± 0% -14.30% (p=0.000 n=10) Lgamma 185.1n ± 0% 187.6n ± 0% +1.32% (p=0.000 n=10) Log 182.7n ± 0% 170.1n ± 0% -6.87% (p=0.000 n=10) Logb 46.49n ± 0% 46.49n ± 0% ~ (p=0.675 n=10) Log1p 184.3n ± 0% 179.4n ± 0% -2.63% (p=0.000 n=10) Log10 184.3n ± 0% 171.2n ± 0% -7.08% (p=0.000 n=10) Log2 66.05n ± 0% 57.90n ± 0% -12.34% (p=0.000 n=10) Modf 34.25n ± 0% 34.24n ± 0% ~ (p=0.163 n=10) Nextafter32 49.33n ± 1% 48.93n ± 0% -0.81% (p=0.002 n=10) Nextafter64 43.64n ± 0% 43.23n ± 0% -0.93% (p=0.000 n=10) PowInt 267.6n ± 0% 251.2n ± 0% -6.11% (p=0.000 n=10) PowFrac 672.9n ± 0% 637.9n ± 0% -5.19% (p=0.000 n=10) Pow10Pos 13.87n ± 0% 13.87n ± 0% ~ (p=1.000 n=10) Pow10Neg 19.58n ± 62% 19.59n ± 62% ~ (p=0.355 n=10) Round 23.65n ± 0% 23.65n ± 0% ~ (p=1.000 n=10) RoundToEven 27.73n ± 0% 27.73n ± 0% ~ (p=0.635 n=10) Remainder 309.9n ± 0% 280.5n ± 0% -9.49% (p=0.000 n=10) Signbit 13.05n ± 0% 13.05n ± 0% ~ (p=1.000 n=10) ¹ Sin 120.7n ± 0% 120.7n ± 0% ~ (p=1.000 n=10) ¹ Sincos 148.4n ± 0% 143.5n ± 0% -3.30% (p=0.000 n=10) Sinh 275.6n ± 0% 267.5n ± 0% -2.94% (p=0.000 n=10) SqrtIndirect 3.262n ± 0% 3.262n ± 0% ~ (p=0.263 n=10) SqrtLatency 19.57n ± 0% 19.57n ± 0% ~ (p=0.582 n=10) SqrtIndirectLatency 19.57n ± 0% 19.57n ± 0% ~ (p=1.000 n=10) SqrtGoLatency 203.2n ± 0% 197.6n ± 0% -2.78% (p=0.000 n=10) SqrtPrime 4.952µ ± 0% 4.952µ ± 0% -0.01% (p=0.025 n=10) Tan 153.3n ± 0% 153.3n ± 0% ~ (p=1.000 n=10) Tanh 280.5n ± 0% 272.4n ± 0% -2.91% (p=0.000 n=10) Trunc 31.81n ± 0% 31.81n ± 0% ~ (p=1.000 n=10) Y0 680.1n ± 0% 664.8n ± 0% -2.25% (p=0.000 n=10) Y1 684.2n ± 0% 669.6n ± 0% -2.14% (p=0.000 n=10) Yn 1.444µ ± 0% 1.410µ ± 0% -2.35% (p=0.000 n=10) Float64bits 5.709n ± 0% 5.708n ± 0% ~ (p=0.573 n=10) Float64frombits 4.893n ± 0% 4.893n ± 0% ~ (p=0.734 n=10) Float32bits 12.23n ± 0% 12.23n ± 0% ~ (p=0.628 n=10) Float32frombits 4.893n ± 0% 4.893n ± 0% ~ (p=0.971 n=10) FMA 4.893n ± 0% 4.893n ± 0% ~ (p=0.736 n=10) geomean 88.96n 87.05n -2.15% ¹ all samples are equal Change-Id: I8db8ac7b7b3430b946b89e88dd6c1546804125c3 Reviewed-on: https://go-review.googlesource.com/c/go/+/697360 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Cherry Mui Reviewed-by: Keith Randall Auto-Submit: Michael Munday --- .../compile/internal/ssa/_gen/generic.rules | 16 + src/cmd/compile/internal/ssa/fuse.go | 10 +- .../compile/internal/ssa/fuse_comparisons.go | 116 +++- .../compile/internal/ssa/rewritegeneric.go | 553 ++++++++++++++++++ src/cmd/compile/internal/test/float_test.go | 104 ++++ test/codegen/fuse.go | 80 +++ 6 files changed, 855 insertions(+), 24 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 6fdea7cc7a3cdc..048d9958dc7440 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -347,6 +347,22 @@ (OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) (OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) +// NaN check: ( x != x || x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) x) ) +(OrB (Neq64F x x) ((Less|Leq)64F x y:(Const64F [c]))) => (Not ((Leq|Less)64F y x)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) x)) => (Not ((Leq|Less)64F x y)) +(OrB (Neq32F x x) ((Less|Leq)32F x y:(Const32F [c]))) => (Not ((Leq|Less)32F y x)) +(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) x)) => (Not ((Leq|Less)32F x y)) + +// NaN check: ( x != x || Abs(x) (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) Abs(x) ) +(OrB (Neq64F x x) ((Less|Leq)64F abs:(Abs x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y abs)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) abs:(Abs x))) => (Not ((Leq|Less)64F abs y)) + +// NaN check: ( x != x || -x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) -x) ) +(OrB (Neq64F x x) ((Less|Leq)64F neg:(Neg64F x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y neg)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) neg:(Neg64F x))) => (Not ((Leq|Less)64F neg y)) +(OrB (Neq32F x x) ((Less|Leq)32F neg:(Neg32F x) y:(Const32F [c]))) => (Not ((Leq|Less)32F y neg)) +(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) neg:(Neg32F x))) => (Not ((Leq|Less)32F neg y)) + // Canonicalize x-const to x+(-const) (Sub64 x (Const64 [c])) && x.Op != OpConst64 => (Add64 (Const64 [-c]) x) (Sub32 x (Const32 [c])) && x.Op != OpConst32 => (Add32 (Const32 [-c]) x) diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go index 68defde7b4b956..0cee91b532b101 100644 --- a/src/cmd/compile/internal/ssa/fuse.go +++ b/src/cmd/compile/internal/ssa/fuse.go @@ -9,8 +9,8 @@ import ( "fmt" ) -// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange). -func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) } +// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck). +func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck) } // fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect). func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) } @@ -21,6 +21,7 @@ const ( fuseTypePlain fuseType = 1 << iota fuseTypeIf fuseTypeIntInRange + fuseTypeNanCheck fuseTypeBranchRedirect fuseTypeShortCircuit ) @@ -38,7 +39,10 @@ func fuse(f *Func, typ fuseType) { changed = fuseBlockIf(b) || changed } if typ&fuseTypeIntInRange != 0 { - changed = fuseIntegerComparisons(b) || changed + changed = fuseIntInRange(b) || changed + } + if typ&fuseTypeNanCheck != 0 { + changed = fuseNanCheck(b) || changed } if typ&fuseTypePlain != 0 { changed = fuseBlockPlain(b) || changed diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go index f5fb84b0d73532..b6eb8fcb90dfbc 100644 --- a/src/cmd/compile/internal/ssa/fuse_comparisons.go +++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go @@ -4,21 +4,36 @@ package ssa -// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5', -// which can be optimized to 'unsigned(x-1) < 4'. -// -// Look for branch structure like: +// fuseIntInRange transforms integer range checks to remove the short-circuit operator. For example, +// it would convert `if 1 <= x && x < 5 { ... }` into `if (1 <= x) & (x < 5) { ... }`. Rewrite rules +// can then optimize these into unsigned range checks, `if unsigned(x-1) < 4 { ... }` in this case. +func fuseIntInRange(b *Block) bool { + return fuseComparisons(b, canOptIntInRange) +} + +// fuseNanCheck replaces the short-circuit operators between NaN checks and comparisons with +// constants. For example, it would transform `if x != x || x > 1.0 { ... }` into +// `if (x != x) | (x > 1.0) { ... }`. Rewrite rules can then merge the NaN check with the comparison, +// in this case generating `if !(x <= 1.0) { ... }`. +func fuseNanCheck(b *Block) bool { + return fuseComparisons(b, canOptNanCheck) +} + +// fuseComparisons looks for control graphs that match this pattern: // -// p +// p - predecessor // |\ -// | b +// | b - block // |/ \ -// s0 s1 +// s0 s1 - successors // -// In our example, p has control '1 <= x', b has control 'x < 5', -// and s0 and s1 are the if and else results of the comparison. +// This pattern is typical for if statements such as `if x || y { ... }` and `if x && y { ... }`. // -// This will be optimized into: +// If canOptControls returns true when passed the control values for p and b then fuseComparisons +// will try to convert p into a plain block with only one successor (b) and modify b's control +// value to include p's control value (effectively causing b to be speculatively executed). +// +// This transformation results in a control graph that will now look like this: // // p // \ @@ -26,9 +41,12 @@ package ssa // / \ // s0 s1 // -// where b has the combined control value 'unsigned(x-1) < 4'. // Later passes will then fuse p and b. -func fuseIntegerComparisons(b *Block) bool { +// +// In other words `if x || y { ... }` will become `if x | y { ... }` and `if x && y { ... }` will +// become `if x & y { ... }`. This is a useful transformation because we can then use rewrite +// rules to optimize `x | y` and `x & y`. +func fuseComparisons(b *Block, canOptControls func(a, b *Value, op Op) bool) bool { if len(b.Preds) != 1 { return false } @@ -45,14 +63,6 @@ func fuseIntegerComparisons(b *Block) bool { return false } - // Check if the control values combine to make an integer inequality that - // can be further optimized later. - bc := b.Controls[0] - pc := p.Controls[0] - if !areMergeableInequalities(bc, pc) { - return false - } - // If the first (true) successors match then we have a disjunction (||). // If the second (false) successors match then we have a conjunction (&&). for i, op := range [2]Op{OpOrB, OpAndB} { @@ -60,6 +70,13 @@ func fuseIntegerComparisons(b *Block) bool { continue } + // Check if the control values can be usefully combined. + bc := b.Controls[0] + pc := p.Controls[0] + if !canOptControls(bc, pc, op) { + return false + } + // TODO(mundaym): should we also check the cost of executing b? // Currently we might speculatively execute b even if b contains // a lot of instructions. We could just check that len(b.Values) @@ -125,7 +142,7 @@ func isUnsignedInequality(v *Value) bool { return false } -func areMergeableInequalities(x, y *Value) bool { +func canOptIntInRange(x, y *Value, op Op) bool { // We need both inequalities to be either in the signed or unsigned domain. // TODO(mundaym): it would also be good to merge when we have an Eq op that // could be transformed into a Less/Leq. For example in the unsigned @@ -155,3 +172,60 @@ func areMergeableInequalities(x, y *Value) bool { } return false } + +// canOptNanCheck reports whether one of arguments is a NaN check and the other +// is a comparison with a constant that can be combined together. +// +// Examples (c must be a constant): +// +// v != v || v < c => !(c <= v) +// v != v || v <= c => !(c < v) +// v != v || c < v => !(v <= c) +// v != v || c <= v => !(v < c) +func canOptNanCheck(x, y *Value, op Op) bool { + if op != OpOrB { + return false + } + + for i := 0; i <= 1; i, x, y = i+1, y, x { + if len(x.Args) != 2 || x.Args[0] != x.Args[1] { + continue + } + v := x.Args[0] + switch x.Op { + case OpNeq64F: + if y.Op != OpLess64F && y.Op != OpLeq64F { + return false + } + for j := 0; j <= 1; j++ { + a, b := y.Args[j], y.Args[j^1] + if a.Op != OpConst64F { + continue + } + // Sign bit operations not affect NaN check results. This special case allows us + // to optimize statements like `if v != v || Abs(v) > c { ... }`. + if (b.Op == OpAbs || b.Op == OpNeg64F) && b.Args[0] == v { + return true + } + return b == v + } + case OpNeq32F: + if y.Op != OpLess32F && y.Op != OpLeq32F { + return false + } + for j := 0; j <= 1; j++ { + a, b := y.Args[j], y.Args[j^1] + if a.Op != OpConst32F { + continue + } + // Sign bit operations not affect NaN check results. This special case allows us + // to optimize statements like `if v != v || -v > c { ... }`. + if b.Op == OpNeg32F && b.Args[0] == v { + return true + } + return b == v + } + } + } + return false +} diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 5720063f34b267..37ba324d86cec3 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -23957,6 +23957,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + typ := &b.Func.Config.Types // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d]))) // cond: c >= d // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) @@ -25269,6 +25270,558 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } + // match: (OrB (Neq64F x x) (Less64F x y:(Const64F [c]))) + // result: (Not (Leq64F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F x y:(Const64F [c]))) + // result: (Not (Less64F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) x)) + // result: (Not (Leq64F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) x)) + // result: (Not (Less64F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F x y:(Const32F [c]))) + // result: (Not (Leq32F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F x y:(Const32F [c]))) + // result: (Not (Less32F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F y:(Const32F [c]) x)) + // result: (Not (Leq32F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F y:(Const32F [c]) x)) + // result: (Not (Less32F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F abs:(Abs x) y:(Const64F [c]))) + // result: (Not (Leq64F y abs)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + abs := v_1.Args[0] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, abs) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F abs:(Abs x) y:(Const64F [c]))) + // result: (Not (Less64F y abs)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + abs := v_1.Args[0] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, abs) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) abs:(Abs x))) + // result: (Not (Leq64F abs y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + abs := v_1.Args[1] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(abs, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) abs:(Abs x))) + // result: (Not (Less64F abs y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + abs := v_1.Args[1] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(abs, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F neg:(Neg64F x) y:(Const64F [c]))) + // result: (Not (Leq64F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F neg:(Neg64F x) y:(Const64F [c]))) + // result: (Not (Less64F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) neg:(Neg64F x))) + // result: (Not (Leq64F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) neg:(Neg64F x))) + // result: (Not (Less64F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F neg:(Neg32F x) y:(Const32F [c]))) + // result: (Not (Leq32F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F neg:(Neg32F x) y:(Const32F [c]))) + // result: (Not (Less32F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F y:(Const32F [c]) neg:(Neg32F x))) + // result: (Not (Leq32F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F y:(Const32F [c]) neg:(Neg32F x))) + // result: (Not (Less32F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } return false } func rewriteValuegeneric_OpPhi(v *Value) bool { diff --git a/src/cmd/compile/internal/test/float_test.go b/src/cmd/compile/internal/test/float_test.go index 9e61148c5297e5..7a5e27870f97f0 100644 --- a/src/cmd/compile/internal/test/float_test.go +++ b/src/cmd/compile/internal/test/float_test.go @@ -623,6 +623,110 @@ func TestInf(t *testing.T) { } } +//go:noinline +func isNaNOrGtZero64(x float64) bool { + return math.IsNaN(x) || x > 0 +} + +//go:noinline +func isNaNOrGteZero64(x float64) bool { + return x >= 0 || math.IsNaN(x) +} + +//go:noinline +func isNaNOrLtZero64(x float64) bool { + return x < 0 || math.IsNaN(x) +} + +//go:noinline +func isNaNOrLteZero64(x float64) bool { + return math.IsNaN(x) || x <= 0 +} + +func TestFusedNaNChecks64(t *testing.T) { + tests := []struct { + value float64 + isZero bool + isGreaterThanZero bool + isLessThanZero bool + isNaN bool + }{ + {value: 0.0, isZero: true}, + {value: math.Copysign(0, -1), isZero: true}, + {value: 1.0, isGreaterThanZero: true}, + {value: -1.0, isLessThanZero: true}, + {value: math.Inf(1), isGreaterThanZero: true}, + {value: math.Inf(-1), isLessThanZero: true}, + {value: math.NaN(), isNaN: true}, + } + + check := func(name string, f func(x float64) bool, value float64, want bool) { + got := f(value) + if got != want { + t.Errorf("%v(%g): want %v, got %v", name, value, want, got) + } + } + + for _, test := range tests { + check("isNaNOrGtZero64", isNaNOrGtZero64, test.value, test.isNaN || test.isGreaterThanZero) + check("isNaNOrGteZero64", isNaNOrGteZero64, test.value, test.isNaN || test.isGreaterThanZero || test.isZero) + check("isNaNOrLtZero64", isNaNOrLtZero64, test.value, test.isNaN || test.isLessThanZero) + check("isNaNOrLteZero64", isNaNOrLteZero64, test.value, test.isNaN || test.isLessThanZero || test.isZero) + } +} + +//go:noinline +func isNaNOrGtZero32(x float32) bool { + return x > 0 || x != x +} + +//go:noinline +func isNaNOrGteZero32(x float32) bool { + return x != x || x >= 0 +} + +//go:noinline +func isNaNOrLtZero32(x float32) bool { + return x != x || x < 0 +} + +//go:noinline +func isNaNOrLteZero32(x float32) bool { + return x <= 0 || x != x +} + +func TestFusedNaNChecks32(t *testing.T) { + tests := []struct { + value float32 + isZero bool + isGreaterThanZero bool + isLessThanZero bool + isNaN bool + }{ + {value: 0.0, isZero: true}, + {value: float32(math.Copysign(0, -1)), isZero: true}, + {value: 1.0, isGreaterThanZero: true}, + {value: -1.0, isLessThanZero: true}, + {value: float32(math.Inf(1)), isGreaterThanZero: true}, + {value: float32(math.Inf(-1)), isLessThanZero: true}, + {value: float32(math.NaN()), isNaN: true}, + } + + check := func(name string, f func(x float32) bool, value float32, want bool) { + got := f(value) + if got != want { + t.Errorf("%v(%g): want %v, got %v", name, value, want, got) + } + } + + for _, test := range tests { + check("isNaNOrGtZero32", isNaNOrGtZero32, test.value, test.isNaN || test.isGreaterThanZero) + check("isNaNOrGteZero32", isNaNOrGteZero32, test.value, test.isNaN || test.isGreaterThanZero || test.isZero) + check("isNaNOrLtZero32", isNaNOrLtZero32, test.value, test.isNaN || test.isLessThanZero) + check("isNaNOrLteZero32", isNaNOrLteZero32, test.value, test.isNaN || test.isLessThanZero || test.isZero) + } +} + var sinkFloat float64 func BenchmarkMul2(b *testing.B) { diff --git a/test/codegen/fuse.go b/test/codegen/fuse.go index 8d6ea3c5c74664..561bac7224728a 100644 --- a/test/codegen/fuse.go +++ b/test/codegen/fuse.go @@ -6,6 +6,8 @@ package codegen +import "math" + // Notes: // - these examples use channels to provide a source of // unknown values that cannot be optimized away @@ -196,6 +198,84 @@ func ui4d(c <-chan uint8) { } } +// -------------------------------------// +// merge NaN checks // +// ------------------------------------ // + +func f64NaNOrPosInf(c <-chan float64) { + // This test assumes IsInf(x, 1) is implemented as x > MaxFloat rather than x == Inf(1). + + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FCLASSD",-"FLED",-"FLTD",-"FNED",-"FEQD" + for x := <-c; math.IsNaN(x) || math.IsInf(x, 1); x = <-c { + } +} + +func f64NaNOrNegInf(c <-chan float64) { + // This test assumes IsInf(x, -1) is implemented as x < -MaxFloat rather than x == Inf(-1). + + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FCLASSD",-"FLED",-"FLTD",-"FNED",-"FEQD" + for x := <-c; math.IsNaN(x) || math.IsInf(x, -1); x = <-c { + } +} + +func f64NaNOrLtOne(c <-chan float64) { + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLED",-"FLTD",-"FNED",-"FEQD" + for x := <-c; math.IsNaN(x) || x < 1; x = <-c { + } +} + +func f64NaNOrLteOne(c <-chan float64) { + // amd64:"JLS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLTD",-"FLED",-"FNED",-"FEQD" + for x := <-c; x <= 1 || math.IsNaN(x); x = <-c { + } +} + +func f64NaNOrGtOne(c <-chan float64) { + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLED",-"FLTD",-"FNED",-"FEQD" + for x := <-c; math.IsNaN(x) || x > 1; x = <-c { + } +} + +func f64NaNOrGteOne(c <-chan float64) { + // amd64:"JLS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLTD",-"FLED",-"FNED",-"FEQD" + for x := <-c; x >= 1 || math.IsNaN(x); x = <-c { + } +} + +func f32NaNOrLtOne(c <-chan float32) { + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLES",-"FLTS",-"FNES",-"FEQS" + for x := <-c; x < 1 || x != x; x = <-c { + } +} + +func f32NaNOrLteOne(c <-chan float32) { + // amd64:"JLS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLTS",-"FLES",-"FNES",-"FEQS" + for x := <-c; x != x || x <= 1; x = <-c { + } +} + +func f32NaNOrGtOne(c <-chan float32) { + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLES",-"FLTS",-"FNES",-"FEQS" + for x := <-c; x > 1 || x != x; x = <-c { + } +} + +func f32NaNOrGteOne(c <-chan float32) { + // amd64:"JLS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLTS",-"FLES",-"FNES",-"FEQS" + for x := <-c; x != x || x >= 1; x = <-c { + } +} + // ------------------------------------ // // regressions // // ------------------------------------ // From e5d004c7a8777c8333b965266994a8fd2cd45394 Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Tue, 7 Oct 2025 11:06:18 -0700 Subject: [PATCH 097/152] net/http: update HTTP/2 documentation to reference new config features Update the package docs to point users at the modern HTTP/2 configuration APIs. Mention in the TLSNextProto documentation that this field is superseded by the Protocols field for most user-facing purposes. Change-Id: I30cd9a85a27e1174338f0d6b887f98c28eac5b5d Reviewed-on: https://go-review.googlesource.com/c/go/+/709797 Reviewed-by: Nicholas Husin Reviewed-by: Nicholas Husin Reviewed-by: Brad Fitzpatrick LUCI-TryBot-Result: Go LUCI --- src/net/http/doc.go | 31 +++++++++++++++---------------- src/net/http/server.go | 6 +++--- src/net/http/transport.go | 6 +++--- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/src/net/http/doc.go b/src/net/http/doc.go index f7ad3ae762fb6c..24e07352ca726d 100644 --- a/src/net/http/doc.go +++ b/src/net/http/doc.go @@ -84,27 +84,26 @@ custom Server: # HTTP/2 -Starting with Go 1.6, the http package has transparent support for the -HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 -can do so by setting [Transport.TLSNextProto] (for clients) or -[Server.TLSNextProto] (for servers) to a non-nil, empty -map. Alternatively, the following GODEBUG settings are -currently supported: +The http package has transparent support for the HTTP/2 protocol. + +[Server] and [DefaultTransport] automatically enable HTTP/2 support +when using HTTPS. [Transport] does not enable HTTP/2 by default. + +To enable or disable support for HTTP/1, HTTP/2, and/or unencrypted HTTP/2, +see the [Server.Protocols] and [Transport.Protocols] configuration fields. + +To configure advanced HTTP/2 features, see the [Server.HTTP2] and +[Transport.HTTP2] configuration fields. + +Alternatively, the following GODEBUG settings are currently supported: GODEBUG=http2client=0 # disable HTTP/2 client support GODEBUG=http2server=0 # disable HTTP/2 server support GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs GODEBUG=http2debug=2 # ... even more verbose, with frame dumps -Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug - -The http package's [Transport] and [Server] both automatically enable -HTTP/2 support for simple configurations. To enable HTTP/2 for more -complex configurations, to use lower-level HTTP/2 features, or to use -a newer version of Go's http2 package, import "golang.org/x/net/http2" -directly and use its ConfigureTransport and/or ConfigureServer -functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 -package takes precedence over the net/http package's built-in HTTP/2 -support. +The "omithttp2" build tag may be used to disable the HTTP/2 implementation +contained in the http package. */ + package http diff --git a/src/net/http/server.go b/src/net/http/server.go index 4078c899061409..02554d1a201afd 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -3066,6 +3066,9 @@ type Server struct { // automatically closed when the function returns. // If TLSNextProto is not nil, HTTP/2 support is not enabled // automatically. + // + // Historically, TLSNextProto was used to disable HTTP/2 support. + // The Server.Protocols field now provides a simpler way to do this. TLSNextProto map[string]func(*Server, *tls.Conn, Handler) // ConnState specifies an optional callback function that is @@ -3094,9 +3097,6 @@ type Server struct { ConnContext func(ctx context.Context, c net.Conn) context.Context // HTTP2 configures HTTP/2 connections. - // - // This field does not yet have any effect. - // See https://go.dev/issue/67813. HTTP2 *HTTP2Config // Protocols is the set of protocols accepted by the server. diff --git a/src/net/http/transport.go b/src/net/http/transport.go index 5cef9be487a4e2..a560765d331d65 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -249,6 +249,9 @@ type Transport struct { // must return a RoundTripper that then handles the request. // If TLSNextProto is not nil, HTTP/2 support is not enabled // automatically. + // + // Historically, TLSNextProto was used to disable HTTP/2 support. + // The Transport.Protocols field now provides a simpler way to do this. TLSNextProto map[string]func(authority string, c *tls.Conn) RoundTripper // ProxyConnectHeader optionally specifies headers to send to @@ -296,9 +299,6 @@ type Transport struct { ForceAttemptHTTP2 bool // HTTP2 configures HTTP/2 connections. - // - // This field does not yet have any effect. - // See https://go.dev/issue/67813. HTTP2 *HTTP2Config // Protocols is the set of protocols supported by the transport. From e1ca1de1234aa0f6be85c97db5492a94b099a305 Mon Sep 17 00:00:00 2001 From: Vlad Saioc Date: Mon, 6 Oct 2025 15:10:52 +0000 Subject: [PATCH 098/152] net/http: format pprof.go Properly formatted net/http/pprof.go to correct inconsistent whitespaces between keys and values for profileSupportsDelta. Change-Id: Iea1515b4289de95862d7eb3af5b8d8d13df2b990 GitHub-Last-Rev: 381d2d3ee746fafdf688b96c8b56a081a1283381 GitHub-Pull-Request: golang/go#75769 Reviewed-on: https://go-review.googlesource.com/c/go/+/709415 Reviewed-by: Dmitri Shuralyov LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui Auto-Submit: Dmitri Shuralyov --- src/net/http/pprof/pprof.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go index e5a46ed253cf8b..71aade67d32046 100644 --- a/src/net/http/pprof/pprof.go +++ b/src/net/http/pprof/pprof.go @@ -352,13 +352,13 @@ func collectProfile(p *pprof.Profile) (*profile.Profile, error) { } var profileSupportsDelta = map[handler]bool{ - "allocs": true, - "block": true, - "goroutineleak": true, - "goroutine": true, - "heap": true, - "mutex": true, - "threadcreate": true, + "allocs": true, + "block": true, + "goroutineleak": true, + "goroutine": true, + "heap": true, + "mutex": true, + "threadcreate": true, } var profileDescriptions = map[string]string{ From d4830c61301a32ad9373bc30c5fd6196c3567f61 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 8 Oct 2025 12:19:14 -0400 Subject: [PATCH 099/152] cmd/internal/obj: fix Link.Diag printf errors go1.26's vet printf checker can associate the printf-wrapper property with local vars and struct fields if they are assigned from a printf-like func literal (CL 706635). This leads to better detection of mistakes. Change-Id: I604be1e200aa1aba75e09d4f36ab68c1dba3b8a3 Reviewed-on: https://go-review.googlesource.com/c/go/+/710195 Auto-Submit: Alan Donovan LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/internal/obj/arm/asm5.go | 2 +- src/cmd/internal/obj/arm64/asm7.go | 2 +- src/cmd/internal/obj/link.go | 7 +++++++ src/cmd/internal/obj/loong64/asm.go | 4 ++-- src/cmd/internal/obj/mips/asm0.go | 2 +- src/cmd/internal/obj/plist.go | 4 ++-- src/cmd/internal/obj/riscv/obj.go | 2 +- 7 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 0ef13b81f6f3f1..1e2891de0a7ddc 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -579,7 +579,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } if int64(pc) > p.Pc { - ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, pc, p) + ctxt.Diag("PC padding invalid: want %d, has %d: %v", p.Pc, pc, p) } for int64(pc) != p.Pc { // emit 0xe1a00000 (MOVW R0, R0) diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 743d09a319087d..172c2256d7eede 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -4354,7 +4354,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { // remove the NOTUSETMP flag in optab. op := c.opirr(p, p.As) if op&Sbit != 0 { - c.ctxt.Diag("can not break addition/subtraction when S bit is set", p) + c.ctxt.Diag("can not break addition/subtraction when S bit is set (%v)", p) } rt, r := p.To.Reg, p.Reg if r == obj.REG_NONE { diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 816fed026f35ad..b7e116bae39dc4 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -1216,6 +1216,13 @@ type Link struct { Fingerprint goobj.FingerprintType // fingerprint of symbol indices, to catch index mismatch } +// Assert to vet's printf checker that Link.DiagFunc is a printf-like. +func _(ctxt *Link) { + ctxt.DiagFunc = func(format string, args ...any) { + _ = fmt.Sprintf(format, args...) + } +} + func (ctxt *Link) Diag(format string, args ...interface{}) { ctxt.Errors++ ctxt.DiagFunc(format, args...) diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go index ca6e2be4aa9eb6..ccf093ca9e8d54 100644 --- a/src/cmd/internal/obj/loong64/asm.go +++ b/src/cmd/internal/obj/loong64/asm.go @@ -2057,7 +2057,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { switch o.type_ { default: - c.ctxt.Diag("unknown type %d %v", o.type_) + c.ctxt.Diag("unknown type %d", o.type_) prasm(p) case 0: // pseudo ops @@ -4438,7 +4438,7 @@ func (c *ctxt0) specialFpMovInst(a obj.As, fclass int, tclass int) uint32 { } } - c.ctxt.Diag("bad class combination: %s %s,%s\n", a, fclass, tclass) + c.ctxt.Diag("bad class combination: %s %d,%s\n", a, fclass, tclass) return 0 } diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index 2de5a4d6c0b8fc..a55953e741432a 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -1172,7 +1172,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { } switch o.type_ { default: - c.ctxt.Diag("unknown type %d %v", o.type_) + c.ctxt.Diag("unknown type %d", o.type_) prasm(p) case 0: /* pseudo ops */ diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go index 698e5ace9ccca8..69914b1c1f36d5 100644 --- a/src/cmd/internal/obj/plist.go +++ b/src/cmd/internal/obj/plist.go @@ -63,12 +63,12 @@ func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc) { switch p.To.Sym.Name { case "go_args_stackmap": if p.From.Type != TYPE_CONST || p.From.Offset != abi.FUNCDATA_ArgsPointerMaps { - ctxt.Diag("%s: FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps", p.Pos) + ctxt.Diag("%v: FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps", p) } p.To.Sym = ctxt.LookupDerived(curtext, curtext.Name+".args_stackmap") case "no_pointers_stackmap": if p.From.Type != TYPE_CONST || p.From.Offset != abi.FUNCDATA_LocalsPointerMaps { - ctxt.Diag("%s: FUNCDATA use of no_pointers_stackmap(SB) without FUNCDATA_LocalsPointerMaps", p.Pos) + ctxt.Diag("%v: FUNCDATA use of no_pointers_stackmap(SB) without FUNCDATA_LocalsPointerMaps", p) } // funcdata for functions with no local variables in frame. // Define two zero-length bitmaps, because the same index is used diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 91642ffbcb0e50..4a9c54a3967c4e 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -3026,7 +3026,7 @@ func instructionsForOpImmediate(p *obj.Prog, as obj.As, rs int16) []*instruction low, high, err := Split32BitImmediate(ins.imm) if err != nil { - p.Ctxt.Diag("%v: constant %d too large", p, ins.imm, err) + p.Ctxt.Diag("%v: constant %d too large: %v", p, ins.imm, err) return nil } if high == 0 { From d945600d060e7a0b7c5e72ac606a017d105a17f3 Mon Sep 17 00:00:00 2001 From: Robbie McMichael Date: Mon, 6 Oct 2025 12:47:34 +0000 Subject: [PATCH 100/152] cmd/gofmt: change -d to exit 1 if diffs exist When using the -d flag, set the exit code to 1 if there is a diff. Fixes #46289 Change-Id: I802e8ccd1798ed7f4448696bec4bc82835ec71a2 GitHub-Last-Rev: db2207fba9a8f7a2f50138ec1f086ac6a74e1b10 GitHub-Pull-Request: golang/go#75649 Reviewed-on: https://go-review.googlesource.com/c/go/+/707635 Reviewed-by: Carlos Amedee Reviewed-by: Michael Pratt Reviewed-by: Sean Liao Auto-Submit: Sean Liao LUCI-TryBot-Result: Go LUCI --- src/cmd/gofmt/gofmt.go | 12 +++++- src/cmd/gofmt/gofmt_test.go | 55 ++++++++++++++++++++++++-- src/cmd/gofmt/testdata/exitcode.golden | 1 + src/cmd/gofmt/testdata/exitcode.input | 1 + 4 files changed, 64 insertions(+), 5 deletions(-) create mode 100644 src/cmd/gofmt/testdata/exitcode.golden create mode 100644 src/cmd/gofmt/testdata/exitcode.input diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go index bbb8b4fd15c2f7..ad6ad636524479 100644 --- a/src/cmd/gofmt/gofmt.go +++ b/src/cmd/gofmt/gofmt.go @@ -41,6 +41,9 @@ var ( // debugging cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") + + // errors + errFormattingDiffers = fmt.Errorf("formatting differs from gofmt's") ) // Keep these in sync with go/format/format.go. @@ -218,8 +221,12 @@ func (r *reporter) Report(err error) { panic("Report with nil error") } st := r.getState() - scanner.PrintError(st.err, err) - st.exitCode = 2 + if err == errFormattingDiffers { + st.exitCode = 1 + } else { + scanner.PrintError(st.err, err) + st.exitCode = 2 + } } func (r *reporter) ExitCode() int { @@ -281,6 +288,7 @@ func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) e newName := filepath.ToSlash(filename) oldName := newName + ".orig" r.Write(diff.Diff(oldName, src, newName, res)) + return errFormattingDiffers } } diff --git a/src/cmd/gofmt/gofmt_test.go b/src/cmd/gofmt/gofmt_test.go index 6b80673af148f5..2aba0f03ff09e9 100644 --- a/src/cmd/gofmt/gofmt_test.go +++ b/src/cmd/gofmt/gofmt_test.go @@ -53,10 +53,19 @@ func gofmtFlags(filename string, maxLines int) string { return "" } -func runTest(t *testing.T, in, out string) { - // process flags - *simplifyAST = false +// Reset global variables for all flags to their default value. +func resetFlags() { + *list = false + *write = false *rewriteRule = "" + *simplifyAST = false + *doDiff = false + *allErrors = false + *cpuprofile = "" +} + +func runTest(t *testing.T, in, out string) { + resetFlags() info, err := os.Lstat(in) if err != nil { t.Error(err) @@ -159,6 +168,46 @@ func TestRewrite(t *testing.T) { } } +// TestDiff runs gofmt with the -d flag on the input files and checks that the +// expected exit code is set. +func TestDiff(t *testing.T) { + tests := []struct { + in string + exitCode int + }{ + {in: "testdata/exitcode.input", exitCode: 1}, + {in: "testdata/exitcode.golden", exitCode: 0}, + } + + for _, tt := range tests { + resetFlags() + *doDiff = true + + initParserMode() + initRewrite() + + info, err := os.Lstat(tt.in) + if err != nil { + t.Error(err) + return + } + + const maxWeight = 2 << 20 + var buf, errBuf bytes.Buffer + s := newSequencer(maxWeight, &buf, &errBuf) + s.Add(fileWeight(tt.in, info), func(r *reporter) error { + return processFile(tt.in, info, nil, r) + }) + if errBuf.Len() > 0 { + t.Logf("%q", errBuf.Bytes()) + } + + if s.GetExitCode() != tt.exitCode { + t.Errorf("%s: expected exit code %d, got %d", tt.in, tt.exitCode, s.GetExitCode()) + } + } +} + // Test case for issue 3961. func TestCRLF(t *testing.T) { const input = "testdata/crlf.input" // must contain CR/LF's diff --git a/src/cmd/gofmt/testdata/exitcode.golden b/src/cmd/gofmt/testdata/exitcode.golden new file mode 100644 index 00000000000000..06ab7d0f9a35a7 --- /dev/null +++ b/src/cmd/gofmt/testdata/exitcode.golden @@ -0,0 +1 @@ +package main diff --git a/src/cmd/gofmt/testdata/exitcode.input b/src/cmd/gofmt/testdata/exitcode.input new file mode 100644 index 00000000000000..4f2f092ce508de --- /dev/null +++ b/src/cmd/gofmt/testdata/exitcode.input @@ -0,0 +1 @@ + package main From 941e5917c113e8414ad8dc5f36c8ebae33497be4 Mon Sep 17 00:00:00 2001 From: Paul Murphy Date: Wed, 1 Oct 2025 09:23:18 -0500 Subject: [PATCH 101/152] runtime: cleanup comments from asm_ppc64x.s improvements CL 706395 consolidated the aix and elf startup code, further update the comments to reflect this. Change-Id: Iccb98008b3fe4a4b08e55ee822924fad76846cc2 Reviewed-on: https://go-review.googlesource.com/c/go/+/708355 LUCI-TryBot-Result: Go LUCI Auto-Submit: Paul Murphy Reviewed-by: Quim Muntal Reviewed-by: Michael Pratt Reviewed-by: Keith Randall --- src/runtime/asm_ppc64x.s | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index b42e0b62f850ff..88d03dcc72c1e6 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -11,10 +11,10 @@ #include "asm_ppc64x.h" #include "cgo/abi_ppc64x.h" - +// This is called using the host ABI. argc and argv arguments +// should be in R3 and R4 respectively. TEXT _rt0_ppc64x_lib(SB),NOSPLIT|NOFRAME,$0 - // This is called with ELFv2 calling conventions. Convert to Go. - // Allocate argument storage for call to newosproc0. + // Convert to Go ABI, and Allocate argument storage for call to newosproc0. STACK_AND_SAVE_HOST_TO_GO_ABI(16) MOVD R3, _rt0_ppc64x_lib_argc<>(SB) @@ -48,7 +48,6 @@ nocgo: BL (CTR) done: - // Restore and return to ELFv2 caller. UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(16) RET From ae094a1397d03aafde380cdb79e0d6a5731c8dbc Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Mon, 29 Sep 2025 13:36:28 +0200 Subject: [PATCH 102/152] crypto/internal/fips140test: make entropy file pair names match Change-Id: I6a6a69642d00e3994277d9b5631d1d7f18f3f356 Reviewed-on: https://go-review.googlesource.com/c/go/+/710055 Reviewed-by: Roland Shoemaker Reviewed-by: Daniel McCarney LUCI-TryBot-Result: Go LUCI Auto-Submit: Filippo Valsorda Reviewed-by: Michael Pratt --- src/crypto/internal/fips140test/entropy_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/crypto/internal/fips140test/entropy_test.go b/src/crypto/internal/fips140test/entropy_test.go index 76c24289520e17..a84b50c62b22b6 100644 --- a/src/crypto/internal/fips140test/entropy_test.go +++ b/src/crypto/internal/fips140test/entropy_test.go @@ -31,11 +31,12 @@ var flagNISTSP80090B = flag.Bool("nist-sp800-90b", false, "run NIST SP 800-90B t func TestEntropySamples(t *testing.T) { cryptotest.MustSupportFIPS140(t) + now := time.Now().UTC() var seqSamples [1_000_000]uint8 samplesOrTryAgain(t, seqSamples[:]) seqSamplesName := fmt.Sprintf("entropy_samples_sequential_%s_%s_%s_%s_%s.bin", entropy.Version(), - runtime.GOOS, runtime.GOARCH, *flagEntropySamples, time.Now().Format("20060102T150405Z")) + runtime.GOOS, runtime.GOARCH, *flagEntropySamples, now.Format("20060102T150405Z")) if *flagEntropySamples != "" { if err := os.WriteFile(seqSamplesName, seqSamples[:], 0644); err != nil { t.Fatalf("failed to write samples to %q: %v", seqSamplesName, err) @@ -50,7 +51,7 @@ func TestEntropySamples(t *testing.T) { copy(restartSamples[i][:], samples[:]) } restartSamplesName := fmt.Sprintf("entropy_samples_restart_%s_%s_%s_%s_%s.bin", entropy.Version(), - runtime.GOOS, runtime.GOARCH, *flagEntropySamples, time.Now().Format("20060102T150405Z")) + runtime.GOOS, runtime.GOARCH, *flagEntropySamples, now.Format("20060102T150405Z")) if *flagEntropySamples != "" { f, err := os.Create(restartSamplesName) if err != nil { From de9da0de30377532370a09a311851afc0616c185 Mon Sep 17 00:00:00 2001 From: Mateusz Poliwczak Date: Tue, 7 Oct 2025 17:57:59 +0000 Subject: [PATCH 103/152] cmd/compile/internal/devirtualize: improve concrete type analysis This change improves the concrete type analysis in the devirtualizer, it not longer relies on ir.Reassigned, it now statically tries to determine the concrete type of an interface, even when assigned multiple times, following type assertions and iface conversions. Alternative to CL 649195 Updates #69521 Fixes #64824 Change-Id: Ib1656e19f3619ab2e1e6b2c78346cc320490b2af GitHub-Last-Rev: e8fa0b12f0a7b1d7ae00e5edb54ce04d1f702c09 GitHub-Pull-Request: golang/go#71935 Reviewed-on: https://go-review.googlesource.com/c/go/+/652036 Reviewed-by: Michael Pratt Reviewed-by: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Auto-Submit: Keith Randall --- .../internal/devirtualize/devirtualize.go | 463 +++++- .../inline/interleaved/interleaved.go | 37 +- src/cmd/compile/internal/ir/expr.go | 5 + src/cmd/compile/internal/noder/reader.go | 1 + src/cmd/compile/internal/ssagen/ssa.go | 19 + src/crypto/sha256/sha256_test.go | 14 + src/runtime/pprof/pprof_test.go | 5 + test/devirtualization.go | 1277 +++++++++++++++++ test/devirtualization_nil_panics.go | 100 ++ ...zation_with_type_assertions_interleaved.go | 139 ++ test/fixedbugs/issue42284.dir/a.go | 7 +- test/fixedbugs/issue42284.dir/b.go | 7 +- 12 files changed, 2039 insertions(+), 35 deletions(-) create mode 100644 test/devirtualization.go create mode 100644 test/devirtualization_nil_panics.go create mode 100644 test/devirtualization_with_type_assertions_interleaved.go diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index 372d05809401ff..9d4160085edde9 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -18,9 +18,11 @@ import ( "cmd/compile/internal/types" ) +const go126ImprovedConcreteTypeAnalysis = true + // StaticCall devirtualizes the given call if possible when the concrete callee // is available statically. -func StaticCall(call *ir.CallExpr) { +func StaticCall(s *State, call *ir.CallExpr) { // For promoted methods (including value-receiver methods promoted // to pointer-receivers), the interface method wrapper may contain // expressions that can panic (e.g., ODEREF, ODOTPTR, @@ -40,15 +42,31 @@ func StaticCall(call *ir.CallExpr) { } sel := call.Fun.(*ir.SelectorExpr) - r := ir.StaticValue(sel.X) - if r.Op() != ir.OCONVIFACE { - return - } - recv := r.(*ir.ConvExpr) + var typ *types.Type + if go126ImprovedConcreteTypeAnalysis { + typ = concreteType(s, sel.X) + if typ == nil { + return + } - typ := recv.X.Type() - if typ.IsInterface() { - return + // Don't create type-assertions that would be impossible at compile-time. + // This can happen in such case: any(0).(interface {A()}).A(), this typechecks without + // any errors, but will cause a runtime panic. We statically know that int(0) does not + // implement that interface, thus we skip the devirtualization, as it is not possible + // to make an assertion: any(0).(interface{A()}).(int) (int does not implement interface{A()}). + if !typecheck.Implements(typ, sel.X.Type()) { + return + } + } else { + r := ir.StaticValue(sel.X) + if r.Op() != ir.OCONVIFACE { + return + } + recv := r.(*ir.ConvExpr) + typ = recv.X.Type() + if typ.IsInterface() { + return + } } // If typ is a shape type, then it was a type argument originally @@ -99,8 +117,27 @@ func StaticCall(call *ir.CallExpr) { return } - dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil) - dt.SetType(typ) + dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, typ) + + if go126ImprovedConcreteTypeAnalysis { + // Consider: + // + // var v Iface + // v.A() + // v = &Impl{} + // + // Here in the devirtualizer, we determine the concrete type of v as being an *Impl, + // but it can still be a nil interface, we have not detected that. The v.(*Impl) + // type assertion that we make here would also have failed, but with a different + // panic "pkg.Iface is nil, not *pkg.Impl", where previously we would get a nil panic. + // We fix this, by introducing an additional nilcheck on the itab. + // Calling a method on an nil interface (in most cases) is a bug in a program, so it is fine + // to devirtualize and further (possibly) inline them, even though we would never reach + // the called function. + dt.UseNilPanic = true + dt.SetPos(call.Pos()) + } + x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true) switch x.Op() { case ir.ODOTMETH: @@ -138,3 +175,407 @@ func StaticCall(call *ir.CallExpr) { // Desugar OCALLMETH, if we created one (#57309). typecheck.FixMethodCall(call) } + +const concreteTypeDebug = false + +// concreteType determines the concrete type of n, following OCONVIFACEs and type asserts. +// Returns nil when the concrete type could not be determined, or when there are multiple +// (different) types assigned to an interface. +func concreteType(s *State, n ir.Node) (typ *types.Type) { + typ = concreteType1(s, n, make(map[*ir.Name]struct{})) + if typ == &noType { + return nil + } + if typ != nil && typ.IsInterface() { + base.Fatalf("typ.IsInterface() = true; want = false; typ = %v", typ) + } + return typ +} + +// noType is a sentinel value returned by [concreteType1]. +var noType types.Type + +// concreteType1 analyzes the node n and returns its concrete type if it is statically known. +// Otherwise, it returns a nil Type, indicating that a concrete type was not determined. +// When n is known to be statically nil or a self-assignment is detected, in returns a sentinel [noType] type instead. +func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types.Type) { + nn := n // for debug messages + + if concreteTypeDebug { + defer func() { + t := "&noType" + if outT != &noType { + t = outT.String() + } + base.Warn("concreteType1(%v) -> %v", nn, t) + }() + } + + for { + if concreteTypeDebug { + base.Warn("concreteType1(%v): analyzing %v", nn, n) + } + + if !n.Type().IsInterface() { + return n.Type() + } + + switch n1 := n.(type) { + case *ir.ConvExpr: + if n1.Op() == ir.OCONVNOP { + if !n1.Type().IsInterface() || !types.Identical(n1.Type(), n1.X.Type()) { + // As we check (directly before this switch) whether n is an interface, thus we should only reach + // here for iface conversions where both operands are the same. + base.Fatalf("not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type()) + } + n = n1.X + continue + } + if n1.Op() == ir.OCONVIFACE { + n = n1.X + continue + } + case *ir.InlinedCallExpr: + if n1.Op() == ir.OINLCALL { + n = n1.SingleResult() + continue + } + case *ir.ParenExpr: + n = n1.X + continue + case *ir.TypeAssertExpr: + n = n1.X + continue + } + break + } + + if n.Op() != ir.ONAME { + return nil + } + + name := n.(*ir.Name).Canonical() + if name.Class != ir.PAUTO { + return nil + } + + if name.Op() != ir.ONAME { + base.Fatalf("name.Op = %v; want = ONAME", n.Op()) + } + + // name.Curfn must be set, as we checked name.Class != ir.PAUTO before. + if name.Curfn == nil { + base.Fatalf("name.Curfn = nil; want not nil") + } + + if name.Addrtaken() { + return nil // conservatively assume it's reassigned with a different type indirectly + } + + if _, ok := seen[name]; ok { + return &noType // Already analyzed assignments to name, no need to do that twice. + } + seen[name] = struct{}{} + + if concreteTypeDebug { + base.Warn("concreteType1(%v): analyzing assignments to %v", nn, name) + } + + var typ *types.Type + for _, v := range s.assignments(name) { + var t *types.Type + switch v := v.(type) { + case *types.Type: + t = v + case ir.Node: + t = concreteType1(s, v, seen) + if t == &noType { + continue + } + } + if t == nil || (typ != nil && !types.Identical(typ, t)) { + return nil + } + typ = t + } + + if typ == nil { + // Variable either declared with zero value, or only assigned with nil. + return &noType + } + + return typ +} + +// assignment can be one of: +// - nil - assignment from an interface type. +// - *types.Type - assignment from a concrete type (non-interface). +// - ir.Node - assignment from a ir.Node. +// +// In most cases assignment should be an [ir.Node], but in cases where we +// do not follow the data-flow, we return either a concrete type (*types.Type) or a nil. +// For example in range over a slice, if the slice elem is of an interface type, then we return +// a nil, otherwise the elem's concrete type (We do so because we do not analyze assignment to the +// slice being ranged-over). +type assignment any + +// State holds precomputed state for use in [StaticCall]. +type State struct { + // ifaceAssignments maps interface variables to all their assignments + // defined inside functions stored in the analyzedFuncs set. + // Note: it does not include direct assignments to nil. + ifaceAssignments map[*ir.Name][]assignment + + // ifaceCallExprAssigns stores every [*ir.CallExpr], which has an interface + // result, that is assigned to a variable. + ifaceCallExprAssigns map[*ir.CallExpr][]ifaceAssignRef + + // analyzedFuncs is a set of Funcs that were analyzed for iface assignments. + analyzedFuncs map[*ir.Func]struct{} +} + +type ifaceAssignRef struct { + name *ir.Name // ifaceAssignments[name] + assignmentIndex int // ifaceAssignments[name][assignmentIndex] + returnIndex int // (*ir.CallExpr).Result(returnIndex) +} + +// InlinedCall updates the [State] to take into account a newly inlined call. +func (s *State) InlinedCall(fun *ir.Func, origCall *ir.CallExpr, inlinedCall *ir.InlinedCallExpr) { + if _, ok := s.analyzedFuncs[fun]; !ok { + // Full analyze has not been yet executed for the provided function, so we can skip it for now. + // When no devirtualization happens in a function, it is unnecessary to analyze it. + return + } + + // Analyze assignments in the newly inlined function. + s.analyze(inlinedCall.Init()) + s.analyze(inlinedCall.Body) + + refs, ok := s.ifaceCallExprAssigns[origCall] + if !ok { + return + } + delete(s.ifaceCallExprAssigns, origCall) + + // Update assignments to reference the new ReturnVars of the inlined call. + for _, ref := range refs { + vt := &s.ifaceAssignments[ref.name][ref.assignmentIndex] + if *vt != nil { + base.Fatalf("unexpected non-nil assignment") + } + if concreteTypeDebug { + base.Warn( + "InlinedCall(%v, %v): replacing interface node in (%v,%v) to %v (typ %v)", + origCall, inlinedCall, ref.name, ref.assignmentIndex, + inlinedCall.ReturnVars[ref.returnIndex], + inlinedCall.ReturnVars[ref.returnIndex].Type(), + ) + } + + // Update ifaceAssignments with an ir.Node from the inlined function’s ReturnVars. + // This may enable future devirtualization of calls that reference ref.name. + // We will get calls to [StaticCall] from the interleaved package, + // to try devirtualize such calls afterwards. + *vt = inlinedCall.ReturnVars[ref.returnIndex] + } +} + +// assignments returns all assignments to n. +func (s *State) assignments(n *ir.Name) []assignment { + fun := n.Curfn + if fun == nil { + base.Fatalf("n.Curfn = ") + } + + if !n.Type().IsInterface() { + base.Fatalf("name passed to assignments is not of an interface type: %v", n.Type()) + } + + // Analyze assignments in func, if not analyzed before. + if _, ok := s.analyzedFuncs[fun]; !ok { + if concreteTypeDebug { + base.Warn("assignments(): analyzing assignments in %v func", fun) + } + if s.analyzedFuncs == nil { + s.ifaceAssignments = make(map[*ir.Name][]assignment) + s.ifaceCallExprAssigns = make(map[*ir.CallExpr][]ifaceAssignRef) + s.analyzedFuncs = make(map[*ir.Func]struct{}) + } + s.analyzedFuncs[fun] = struct{}{} + s.analyze(fun.Init()) + s.analyze(fun.Body) + } + + return s.ifaceAssignments[n] +} + +// analyze analyzes every assignment to interface variables in nodes, updating [State]. +func (s *State) analyze(nodes ir.Nodes) { + assign := func(name ir.Node, assignment assignment) (*ir.Name, int) { + if name == nil || name.Op() != ir.ONAME || ir.IsBlank(name) { + return nil, -1 + } + + n, ok := ir.OuterValue(name).(*ir.Name) + if !ok || n.Curfn == nil { + return nil, -1 + } + + // Do not track variables that are not of interface types. + // For devirtualization they are unnecessary, we will not even look them up. + if !n.Type().IsInterface() { + return nil, -1 + } + + n = n.Canonical() + if n.Op() != ir.ONAME { + base.Fatalf("n.Op = %v; want = ONAME", n.Op()) + } + + switch a := assignment.(type) { + case nil: + case *types.Type: + if a != nil && a.IsInterface() { + assignment = nil // non-concrete type + } + case ir.Node: + // nil assignment, we can safely ignore them, see [StaticCall]. + if ir.IsNil(a) { + return nil, -1 + } + default: + base.Fatalf("unexpected type: %v", assignment) + } + + if concreteTypeDebug { + base.Warn("analyze(): assignment found %v = %v", name, assignment) + } + + s.ifaceAssignments[n] = append(s.ifaceAssignments[n], assignment) + return n, len(s.ifaceAssignments[n]) - 1 + } + + var do func(n ir.Node) + do = func(n ir.Node) { + switch n.Op() { + case ir.OAS: + n := n.(*ir.AssignStmt) + if rhs := n.Y; rhs != nil { + for { + if r, ok := rhs.(*ir.ParenExpr); ok { + rhs = r.X + continue + } + break + } + if call, ok := rhs.(*ir.CallExpr); ok && call.Fun != nil { + retTyp := call.Fun.Type().Results()[0].Type + n, idx := assign(n.X, retTyp) + if n != nil && retTyp.IsInterface() { + // We have a call expression, that returns an interface, store it for later evaluation. + // In case this func gets inlined later, we will update the assignment (added before) + // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X. + s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, 0}) + } + } else { + assign(n.X, rhs) + } + } + case ir.OAS2: + n := n.(*ir.AssignListStmt) + for i, p := range n.Lhs { + if n.Rhs[i] != nil { + assign(p, n.Rhs[i]) + } + } + case ir.OAS2DOTTYPE: + n := n.(*ir.AssignListStmt) + if n.Rhs[0] == nil { + base.Fatalf("n.Rhs[0] == nil; n = %v", n) + } + assign(n.Lhs[0], n.Rhs[0]) + assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize + case ir.OAS2MAPR, ir.OAS2RECV, ir.OSELRECV2: + n := n.(*ir.AssignListStmt) + if n.Rhs[0] == nil { + base.Fatalf("n.Rhs[0] == nil; n = %v", n) + } + assign(n.Lhs[0], n.Rhs[0].Type()) + assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize + case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) + rhs := n.Rhs[0] + for { + if r, ok := rhs.(*ir.ParenExpr); ok { + rhs = r.X + continue + } + break + } + if call, ok := rhs.(*ir.CallExpr); ok { + for i, p := range n.Lhs { + retTyp := call.Fun.Type().Results()[i].Type + n, idx := assign(p, retTyp) + if n != nil && retTyp.IsInterface() { + // We have a call expression, that returns an interface, store it for later evaluation. + // In case this func gets inlined later, we will update the assignment (added before) + // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X. + s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, i}) + } + } + } else if call, ok := rhs.(*ir.InlinedCallExpr); ok { + for i, p := range n.Lhs { + assign(p, call.ReturnVars[i]) + } + } else { + base.Fatalf("unexpected type %T in OAS2FUNC Rhs[0]", call) + } + case ir.ORANGE: + n := n.(*ir.RangeStmt) + xTyp := n.X.Type() + + // Range over an array pointer. + if xTyp.IsPtr() && xTyp.Elem().IsArray() { + xTyp = xTyp.Elem() + } + + if xTyp.IsArray() || xTyp.IsSlice() { + assign(n.Key, nil) // integer does not have methods to devirtualize + assign(n.Value, xTyp.Elem()) + } else if xTyp.IsChan() { + assign(n.Key, xTyp.Elem()) + base.Assertf(n.Value == nil, "n.Value != nil in range over chan") + } else if xTyp.IsMap() { + assign(n.Key, xTyp.Key()) + assign(n.Value, xTyp.Elem()) + } else if xTyp.IsInteger() || xTyp.IsString() { + // Range over int/string, results do not have methods, so nothing to devirtualize. + assign(n.Key, nil) + assign(n.Value, nil) + } else { + // We will not reach here in case of an range-over-func, as it is + // rewrtten to function calls in the noder package. + base.Fatalf("range over unexpected type %v", n.X.Type()) + } + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok { + for _, v := range n.Cases { + if v.Var == nil { + base.Assert(guard.Tag == nil) + continue + } + assign(v.Var, guard.X) + } + } + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + if _, ok := s.analyzedFuncs[n.Func]; !ok { + s.analyzedFuncs[n.Func] = struct{}{} + ir.Visit(n.Func, do) + } + } + } + ir.VisitList(nodes, do) +} diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go index 954cc306fc81d3..c83bbdb718df56 100644 --- a/src/cmd/compile/internal/inline/interleaved/interleaved.go +++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go @@ -45,6 +45,8 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { inlState := make(map[*ir.Func]*inlClosureState) calleeUseCounts := make(map[*ir.Func]int) + var state devirtualize.State + // Pre-process all the functions, adding parentheses around call sites and starting their "inl state". for _, fn := range typecheck.Target.Funcs { bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn) @@ -58,7 +60,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { // Do a first pass at counting call sites. for i := range s.parens { - s.resolve(i) + s.resolve(&state, i) } } @@ -102,10 +104,11 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { for { for i := l0; i < l1; i++ { // can't use "range parens" here paren := s.parens[i] - if new := s.edit(i); new != nil { + if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil { // Update AST and recursively mark nodes. - paren.X = new - ir.EditChildren(new, s.mark) // mark may append to parens + paren.X = inlinedCall + ir.EditChildren(inlinedCall, s.mark) // mark may append to parens + state.InlinedCall(s.fn, origCall, inlinedCall) done = false } } @@ -114,7 +117,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { break } for i := l0; i < l1; i++ { - s.resolve(i) + s.resolve(&state, i) } } @@ -188,7 +191,7 @@ type inlClosureState struct { // resolve attempts to resolve a call to a potentially inlineable callee // and updates use counts on the callees. Returns the call site count // for that callee. -func (s *inlClosureState) resolve(i int) (*ir.Func, int) { +func (s *inlClosureState) resolve(state *devirtualize.State, i int) (*ir.Func, int) { p := s.parens[i] if i < len(s.resolved) { if callee := s.resolved[i]; callee != nil { @@ -200,7 +203,7 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) { if !ok { // previously inlined return nil, -1 } - devirtualize.StaticCall(call) + devirtualize.StaticCall(state, call) if callee := inline.InlineCallTarget(s.fn, call, s.profile); callee != nil { for len(s.resolved) <= i { s.resolved = append(s.resolved, nil) @@ -213,23 +216,23 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) { return nil, 0 } -func (s *inlClosureState) edit(i int) ir.Node { +func (s *inlClosureState) edit(state *devirtualize.State, i int) (*ir.CallExpr, *ir.InlinedCallExpr) { n := s.parens[i].X call, ok := n.(*ir.CallExpr) if !ok { - return nil + return nil, nil } // This is redundant with earlier calls to // resolve, but because things can change it // must be re-checked. - callee, count := s.resolve(i) + callee, count := s.resolve(state, i) if count <= 0 { - return nil + return nil, nil } if inlCall := inline.TryInlineCall(s.fn, call, s.bigCaller, s.profile, count == 1 && callee.ClosureParent != nil); inlCall != nil { - return inlCall + return call, inlCall } - return nil + return nil, nil } // Mark inserts parentheses, and is called repeatedly. @@ -338,16 +341,18 @@ func (s *inlClosureState) unparenthesize() { // returns. func (s *inlClosureState) fixpoint() bool { changed := false + var state devirtualize.State ir.WithFunc(s.fn, func() { done := false for !done { done = true for i := 0; i < len(s.parens); i++ { // can't use "range parens" here paren := s.parens[i] - if new := s.edit(i); new != nil { + if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil { // Update AST and recursively mark nodes. - paren.X = new - ir.EditChildren(new, s.mark) // mark may append to parens + paren.X = inlinedCall + ir.EditChildren(inlinedCall, s.mark) // mark may append to parens + state.InlinedCall(s.fn, origCall, inlinedCall) done = false changed = true } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 9d34f27ce53363..037957b676a033 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -677,6 +677,11 @@ type TypeAssertExpr struct { // An internal/abi.TypeAssert descriptor to pass to the runtime. Descriptor *obj.LSym + + // When set to true, if this assert would panic, then use a nil pointer panic + // instead of an interface conversion panic. + // It must not be set for type asserts using the commaok form. + UseNilPanic bool } func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr { diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 41eb2dce1cc50f..a8a45b02697079 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -2961,6 +2961,7 @@ func (r *reader) multiExpr() []ir.Node { as.Def = true for i := range results { tmp := r.temp(pos, r.typ()) + tmp.Defn = as as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp)) as.Lhs.Append(tmp) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 1e2159579dfbf2..6c4afb78959791 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -5827,6 +5827,25 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if n.ITab != nil { targetItab = s.expr(n.ITab) } + + if n.UseNilPanic { + if commaok { + base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && commaok == true") + } + if n.Type().IsInterface() { + // Currently we do not expect the compiler to emit type asserts with UseNilPanic, that assert to an interface type. + // If needed, this can be relaxed in the future, but for now we can assert that. + base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && Type().IsInterface() == true") + } + typs := s.f.Config.Types + iface = s.newValue2( + ssa.OpIMake, + iface.Type, + s.nilCheck(s.newValue1(ssa.OpITab, typs.BytePtr, iface)), + s.newValue1(ssa.OpIData, typs.BytePtr, iface), + ) + } + return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor) } diff --git a/src/crypto/sha256/sha256_test.go b/src/crypto/sha256/sha256_test.go index 11b24db7d6b0a0..a18a536ba2896f 100644 --- a/src/crypto/sha256/sha256_test.go +++ b/src/crypto/sha256/sha256_test.go @@ -471,3 +471,17 @@ func BenchmarkHash256K(b *testing.B) { func BenchmarkHash1M(b *testing.B) { benchmarkSize(b, 1024*1024) } + +func TestAllocatonsWithTypeAsserts(t *testing.T) { + cryptotest.SkipTestAllocations(t) + allocs := testing.AllocsPerRun(100, func() { + h := New() + h.Write([]byte{1, 2, 3}) + marshaled, _ := h.(encoding.BinaryMarshaler).MarshalBinary() + marshaled, _ = h.(encoding.BinaryAppender).AppendBinary(marshaled[:0]) + h.(encoding.BinaryUnmarshaler).UnmarshalBinary(marshaled) + }) + if allocs != 0 { + t.Fatalf("allocs = %v; want = 0", allocs) + } +} diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 25a2f3b32415d7..a538627099034e 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -344,6 +344,11 @@ func (h inlineWrapper) dump(pcs []uintptr) { func inlinedWrapperCallerDump(pcs []uintptr) { var h inlineWrapperInterface + + // Take the address of h, such that h.dump() call (below) + // does not get devirtualized by the compiler. + _ = &h + h = &inlineWrapper{} h.dump(pcs) } diff --git a/test/devirtualization.go b/test/devirtualization.go new file mode 100644 index 00000000000000..e3319052945e00 --- /dev/null +++ b/test/devirtualization.go @@ -0,0 +1,1277 @@ +// errorcheck -0 -m + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +type M interface{ M() } + +type A interface{ A() } + +type C interface{ C() } + +type Impl struct{} + +func (*Impl) M() {} // ERROR "can inline \(\*Impl\).M$" + +func (*Impl) A() {} // ERROR "can inline \(\*Impl\).A$" + +type Impl2 struct{} + +func (*Impl2) M() {} // ERROR "can inline \(\*Impl2\).M$" + +func (*Impl2) A() {} // ERROR "can inline \(\*Impl2\).A$" + +type CImpl struct{} + +func (CImpl) C() {} // ERROR "can inline CImpl.C$" + +func typeAsserts() { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + a.(A).A() // ERROR "devirtualizing a.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + a.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + + v := a.(M) + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v.(A).A() // ERROR "devirtualizing v.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + v.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + v.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + v2 := a.(A) + v2.A() // ERROR "devirtualizing v2.A to \*Impl$" "inlining call to \(\*Impl\).A" + v2.(M).M() // ERROR "devirtualizing v2.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + v2.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + v2.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + a.(M).(A).A() // ERROR "devirtualizing a.\(M\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(A).(M).M() // ERROR "devirtualizing a.\(A\).\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + a.(M).(A).(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + a.(A).(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + any(a).(M).M() // ERROR "devirtualizing any\(a\).\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + any(a).(A).A() // ERROR "devirtualizing any\(a\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + any(a).(M).(any).(A).A() // ERROR "devirtualizing any\(a\).\(M\).\(any\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + + c := any(a) + c.(A).A() // ERROR "devirtualizing c.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + c.(M).M() // ERROR "devirtualizing c.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + M(a).M() // ERROR "devirtualizing M\(a\).M to \*Impl$" "inlining call to \(\*Impl\).M" + M(M(a)).M() // ERROR "devirtualizing M\(M\(a\)\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + a2 := a.(A) + A(a2).A() // ERROR "devirtualizing A\(a2\).A to \*Impl$" "inlining call to \(\*Impl\).A" + A(A(a2)).A() // ERROR "devirtualizing A\(A\(a2\)\).A to \*Impl$" "inlining call to \(\*Impl\).A" + + { + var a C = &CImpl{} // ERROR "&CImpl{} does not escape$" + a.(any).(C).C() // ERROR "devirtualizing a.\(any\).\(C\).C to \*CImpl$" "inlining call to CImpl.C" + a.(any).(*CImpl).C() // ERROR "inlining call to CImpl.C" + } +} + +func typeAssertsWithOkReturn() { + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + if v, ok := a.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(M) + if ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(A) + if ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(*Impl) + if ok { + v.A() // ERROR "inlining call to \(\*Impl\).A" + v.M() // ERROR "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(M) + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(A) + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(*Impl) + v.A() // ERROR "inlining call to \(\*Impl\).A" + v.M() // ERROR "inlining call to \(\*Impl\).M" + } + { + a := newM() // ERROR "&Impl{} does not escape$" "inlining call to newM" + callA(a) // ERROR "devirtualizing m.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callA" + callIfA(a) // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callIfA" + } + { + _, a := newM2ret() // ERROR "&Impl{} does not escape$" "inlining call to newM2ret" + callA(a) // ERROR "devirtualizing m.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callA" + callIfA(a) // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callIfA" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + // Note the !ok condition, devirtualizing here is fine. + if v, ok := a.(M); !ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a A = newImplNoInline() + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var impl2InA A = &Impl2{} // ERROR "&Impl2{} does not escape$" + var a A + a, _ = impl2InA.(*Impl) + // a now contains the zero value of *Impl + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + a := newANoInline() + a.A() + } + { + _, a := newANoInlineRet2() + a.A() + } +} + +func newM() M { // ERROR "can inline newM$" + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func newM2ret() (int, M) { // ERROR "can inline newM2ret$" + return -1, &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func callA(m M) { // ERROR "can inline callA$" "leaking param: m$" + m.(A).A() +} + +func callIfA(m M) { // ERROR "can inline callIfA$" "leaking param: m$" + if v, ok := m.(A); ok { + v.A() + } +} + +//go:noinline +func newImplNoInline() *Impl { + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +//go:noinline +func newImpl2ret2() (string, *Impl2) { + return "str", &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func newImpl2() *Impl2 { + return &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func newANoInline() A { + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +//go:noinline +func newANoInlineRet2() (string, A) { + return "", &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func testTypeSwitch() { + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v = &Impl{} // ERROR "&Impl{} does not escape$" + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + v.(M).M() // ERROR "devirtualizing v.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + switch v1 := v.(type) { + case A: + v1.A() + case M: + v1.M() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + case C: + v.C() + } + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + default: + panic("does not implement M") // ERROR ".does not implement M. escapes to heap$" + } + } +} + +func differentTypeAssign() { + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a.A() + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + var asAny any = a + asAny.(A).A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + var asAny any = a + asAny = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + asAny.(A).A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + var asAny any = a + asAny.(A).A() + asAny = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = newImpl2() + a.A() + } + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, a = newImpl2ret2() + a.A() + } +} + +func assignWithTypeAssert() { + { + var i1 A = &Impl{} // ERROR "&Impl{} does not escape$" + var i2 A = &Impl2{} // ERROR "&Impl2{} does not escape$" + i1 = i2.(*Impl) // this will panic + i1.A() // ERROR "devirtualizing i1.A to \*Impl$" "inlining call to \(\*Impl\).A" + i2.A() // ERROR "devirtualizing i2.A to \*Impl2$" "inlining call to \(\*Impl2\).A" + } + { + var i1 A = &Impl{} // ERROR "&Impl{} does not escape$" + var i2 A = &Impl2{} // ERROR "&Impl2{} does not escape$" + i1, _ = i2.(*Impl) // i1 is going to be nil + i1.A() // ERROR "devirtualizing i1.A to \*Impl$" "inlining call to \(\*Impl\).A" + i2.A() // ERROR "devirtualizing i2.A to \*Impl2$" "inlining call to \(\*Impl2\).A" + } +} + +func nilIface() { + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v = nil + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = nil + } + { + var nilIface A + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = nilIface + } + { + var nilIface A + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v = nilIface + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = &Impl{} // ERROR "&Impl{} does not escape$" + } + { + var v A + var v2 A = v + v2.A() // ERROR "devirtualizing v2.A to \*Impl$" "inlining call to \(\*Impl\).A" + v2 = &Impl{} // ERROR "&Impl{} does not escape$" + } + { + var v A + v.A() + } + { + var v A + var v2 A = v + v2.A() + } + { + var v A + var v2 A + v2 = v + v2.A() + } +} + +func longDevirtTest() { + var a interface { + M + A + } = &Impl{} // ERROR "&Impl{} does not escape$" + + { + var b A = a + b.A() // ERROR "devirtualizing b.A to \*Impl$" "inlining call to \(\*Impl\).A" + b.(M).M() // ERROR "devirtualizing b.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var b M = a + b.M() // ERROR "devirtualizing b.M to \*Impl$" "inlining call to \(\*Impl\).M" + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var b A = a.(M).(A) + b.A() // ERROR "devirtualizing b.A to \*Impl$" "inlining call to \(\*Impl\).A" + b.(M).M() // ERROR "devirtualizing b.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var b M = a.(A).(M) + b.M() // ERROR "devirtualizing b.M to \*Impl$" "inlining call to \(\*Impl\).M" + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + if v, ok := a.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + + { + var c A = a + + if v, ok := c.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + c = &Impl{} // ERROR "&Impl{} does not escape$" + + if v, ok := c.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + + if v, ok := c.(interface { + A + M + }); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } +} + +func deferDevirt() { + var a A + defer func() { // ERROR "can inline deferDevirt.func1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +func deferNoDevirt() { + var a A + defer func() { // ERROR "can inline deferNoDevirt.func1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() +} + +//go:noinline +func closureDevirt() { + var a A + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureDevirt.func1.1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +//go:noinline +func closureNoDevirt() { + var a A + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureNoDevirt.func1.1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() +} + +var global = "1" + +func closureDevirt2() { + var a A + a = &Impl{} // ERROR "&Impl{} does not escape$" + c := func() { // ERROR "can inline closureDevirt2.func1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + if global == "1" { + c = func() { // ERROR "can inline closureDevirt2.func2$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + c() +} + +func closureNoDevirt2() { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + c := func() { // ERROR "can inline closureNoDevirt2.func1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + if global == "1" { + c = func() { // ERROR "can inline closureNoDevirt2.func2$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + } + a.A() + c() +} + +//go:noinline +func closureDevirt3() { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureDevirt3.func1.1$" "func literal does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + func() { // ERROR "can inline closureDevirt3.func2$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() // ERROR "inlining call to closureDevirt3.func2" "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +//go:noinline +func closureNoDevirt3() { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureNoDevirt3.func1.1$" "func literal does not escape$" + a.A() + }() + func() { // ERROR "can inline closureNoDevirt3.func2$" + a.A() + }() // ERROR "inlining call to closureNoDevirt3.func2" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func varDeclaredInClosureReferencesOuter() { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func1.1$" "func literal does not escape$" + var v A = a + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func2.1$" "func literal does not escape$" + var v A = a + v = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + + var b A = &Impl{} // ERROR "&Impl{} escapes to heap$" + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func3.1$" "func literal does not escape$" + var v A = b + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + }() + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func4.1$" "func literal does not escape$" + var v A = b + v.A() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() +} + +//go:noinline +func testNamedReturn0() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + v.A() + return +} + +//go:noinline +func testNamedReturn1() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + v.A() + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func testNamedReturns3() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + defer func() { // ERROR "can inline testNamedReturns3.func1$" "func literal does not escape$" + v.A() + }() + v.A() + return &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +var ( + globalImpl = &Impl{} + globalImpl2 = &Impl2{} + globalA A = &Impl{} + globalM M = &Impl{} +) + +func globals() { + { + globalA.A() + globalA.(M).M() + globalM.M() + globalM.(A).A() + + a := globalA + a.A() + a.(M).M() + + m := globalM + m.M() + m.(A).A() + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = globalImpl + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = A(globalImpl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = M(globalImpl).(A) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = globalA.(*Impl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a = globalM.(*Impl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalImpl2 + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalA + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalM.(A) + a.A() + } +} + +func mapsDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = m[0] + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v.(M).M() // ERROR "devirtualizing v.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + var ok bool + if v, ok = m[0]; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + v, _ = m[0] + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func mapsNoDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = m[0] + v.A() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.(M).M() + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + var ok bool + if v, ok = m[0]; ok { + v.A() + } + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = m[0] + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v = m[0] + v.A() + } + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ok bool + if v, ok = m[0]; ok { + v.A() + } + v.A() + } + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = m[0] + v.A() + } +} + +func chanDevirt() { + { + m := make(chan *Impl) + var v A = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + v = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + v, _ = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + select { + case <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case v = <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case v, ok = <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } +} + +func chanNoDevirt() { + { + m := make(chan *Impl) + var v A = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + v = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + v, _ = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() + } + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + var ok bool + if v, ok = <-m; ok { + v.A() + } + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + select { + case v = <-m: + v.A() + } + v.A() + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + select { + case v, _ = <-m: + v.A() + } + v.A() + } + + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v = <-m + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = <-m + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ok bool + if v, ok = <-m; ok { + v.A() + } + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case v = <-m: + v.A() + } + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case v, _ = <-m: + v.A() + } + v.A() + } +} + +func rangeDevirt() { + { + var v A + m := make(map[*Impl]struct{}) // ERROR "make\(map\[\*Impl\]struct {}\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(chan *Impl) + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := []*Impl{} // ERROR "\[\]\*Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v = &Impl{} // ERROR "&Impl{} does not escape$" + impl := &Impl{} // ERROR "&Impl{} does not escape$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v = &Impl{} // ERROR "&Impl{} does not escape$" + impl := &Impl{} // ERROR "&Impl{} does not escape$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range &m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func rangeNoDevirt() { + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]struct{}) // ERROR "make\(map\[\*Impl\]struct {}\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(chan *Impl) + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := []*Impl{} // ERROR "\[\]\*Impl{} does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + impl := &Impl{} // ERROR "&Impl{} escapes to heap$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() + } + { + var v A + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + impl := &Impl{} // ERROR "&Impl{} escapes to heap$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + for _, v = range m { + } + v.A() + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + for _, v = range &m { + } + v.A() + } + + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]struct{}) // ERROR "make\(map\[A\]struct {}\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]A) // ERROR "make\(map\[A\]A\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]A) // ERROR "make\(map\[A\]A\) does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(chan A) + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := []A{} // ERROR "\[\]A{} does not escape$" + for _, v = range m { + } + v.A() + } + + { + var v A + m := [1]A{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + for _, v = range m { + } + v.A() + } + { + var v A + m := [1]A{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + for _, v = range &m { + } + v.A() + } +} + +var globalInt = 1 + +func testIfInit() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + var i = &Impl{} // ERROR "&Impl{} does not escape$" + if a = i; globalInt == 1 { + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var i2 = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + if a = i2; globalInt == 1 { + a.A() + } + a.A() + } +} + +func testSwitchInit() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + var i = &Impl{} // ERROR "&Impl{} does not escape$" + switch a = i; globalInt { + case 12: + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var i2 = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + switch a = i2; globalInt { + case 12: + a.A() + } + a.A() + } +} + +type implWrapper Impl + +func (implWrapper) A() {} // ERROR "can inline implWrapper.A$" + +//go:noinline +func devirtWrapperType() { + { + i := &Impl{} // ERROR "&Impl{} does not escape$" + // This is an OCONVNOP, so we have to be careful, not to devirtualize it to Impl.A. + var a A = (*implWrapper)(i) + a.A() // ERROR "devirtualizing a.A to \*implWrapper$" "inlining call to implWrapper.A" + } + { + i := Impl{} + // This is an OCONVNOP, so we have to be careful, not to devirtualize it to Impl.A. + var a A = (implWrapper)(i) // ERROR "implWrapper\(i\) does not escape$" + a.A() // ERROR "devirtualizing a.A to implWrapper$" "inlining call to implWrapper.A" + } +} + +func selfAssigns() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = a + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + asAny = asAny + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + a = asAny.(A) + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(A).A() // ERROR "devirtualizing a.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + b := a + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + asAny = asAny + a = asAny.(A) + asAny = a + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + asAny.(M).M() // ERROR "devirtualizing asAny.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny A = a + a = asAny.(A) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a, b, c A + c = &Impl{} // ERROR "&Impl{} does not escape$" + a = c + c = b + b = c + a = b + b = a + c = a + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func boolNoDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = m[0] // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } + { + m := make(chan *Impl) + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case _, v = <-m: // ERROR ".autotmp_[0-9]+ escapes to heap$" + } + v.(A).A() + } + { + m := make(chan *Impl) + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = <-m // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } + { + var a any = 4 // ERROR "4 does not escape$" + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = a.(int) // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } +} + +func addrTaken() { + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + a.A() + _ = ptrA + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + *ptrA = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + *ptrA = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } +} + +func testInvalidAsserts() { + any(0).(interface{ A() }).A() // ERROR "any\(0\) escapes to heap$" + { + var a M = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.(C).C() // this will panic + a.(any).(C).C() // this will panic + } + { + var a C = &CImpl{} // ERROR "&CImpl{} escapes to heap$" + a.(M).M() // this will panic + a.(any).(M).M() // this will panic + } + { + var a C = &CImpl{} // ERROR "&CImpl{} does not escape$" + + // this will panic + a.(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + // this will panic + a.(any).(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + } +} + +type namedBool bool + +func (namedBool) M() {} // ERROR "can inline namedBool.M$" + +//go:noinline +func namedBoolTest() { + m := map[int]int{} // ERROR "map\[int\]int{} does not escape" + var ok namedBool + _, ok = m[5] + var i M = ok // ERROR "ok does not escape" + i.M() // ERROR "devirtualizing i.M to namedBool$" "inlining call to namedBool.M" +} diff --git a/test/devirtualization_nil_panics.go b/test/devirtualization_nil_panics.go new file mode 100644 index 00000000000000..59da454be7f910 --- /dev/null +++ b/test/devirtualization_nil_panics.go @@ -0,0 +1,100 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" + "strings" +) + +type A interface{ A() } + +type Impl struct{} + +func (*Impl) A() {} + +type Impl2 struct{} + +func (*Impl2) A() {} + +func main() { + shouldNilPanic(28, func() { + var v A + v.A() + v = &Impl{} + }) + shouldNilPanic(36, func() { + var v A + defer func() { + v = &Impl{} + }() + v.A() + }) + shouldNilPanic(43, func() { + var v A + f := func() { + v = &Impl{} + } + v.A() + f() + }) + + // Make sure that both devirtualized and non devirtualized + // variants have the panic at the same line. + shouldNilPanic(55, func() { + var v A + defer func() { + v = &Impl{} + }() + v. // A() is on a sepearate line + A() + }) + shouldNilPanic(64, func() { + var v A + defer func() { + v = &Impl{} + v = &Impl2{} // assign different type, such that the call below does not get devirtualized + }() + v. // A() is on a sepearate line + A() + }) +} + +var cnt = 0 + +func shouldNilPanic(wantLine int, f func()) { + cnt++ + defer func() { + p := recover() + if p == nil { + panic("no nil deref panic") + } + if strings.Contains(fmt.Sprintf("%s", p), "invalid memory address or nil pointer dereference") { + callers := make([]uintptr, 128) + n := runtime.Callers(0, callers) + callers = callers[:n] + + frames := runtime.CallersFrames(callers) + line := -1 + for f, next := frames.Next(); next; f, next = frames.Next() { + if f.Func.Name() == fmt.Sprintf("main.main.func%v", cnt) { + line = f.Line + break + } + } + + if line != wantLine { + panic(fmt.Sprintf("invalid line number in panic = %v; want = %v", line, wantLine)) + } + + return + } + panic(p) + }() + f() +} diff --git a/test/devirtualization_with_type_assertions_interleaved.go b/test/devirtualization_with_type_assertions_interleaved.go new file mode 100644 index 00000000000000..6bad3beb9aa0d2 --- /dev/null +++ b/test/devirtualization_with_type_assertions_interleaved.go @@ -0,0 +1,139 @@ +// errorcheck -0 -m + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +type hashIface interface { + Sum() []byte +} + +type cloneableHashIface interface { + hashIface + Clone() hashIface +} + +type hash struct{ state [32]byte } + +func (h *hash) Sum() []byte { // ERROR "can inline \(\*hash\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash) Clone() hashIface { // ERROR "can inline \(\*hash\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c +} + +type hash2 struct{ state [32]byte } + +func (h *hash2) Sum() []byte { // ERROR "can inline \(\*hash2\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash2) Clone() hashIface { // ERROR "can inline \(\*hash2\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c +} + +func newHash() hashIface { // ERROR "can inline newHash$" + return &hash{} // ERROR "&hash{} escapes to heap$" +} + +func cloneHash1(h hashIface) hashIface { // ERROR "can inline cloneHash1$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return &hash{} // ERROR "&hash{} escapes to heap$" +} + +func cloneHash2(h hashIface) hashIface { // ERROR "can inline cloneHash2$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return nil +} + +func cloneHash3(h hashIface) hashIface { // ERROR "can inline cloneHash3$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return &hash2{} // ERROR "&hash2{} escapes to heap$" +} + +func cloneHashWithBool1(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool1$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return &hash{}, false // ERROR "&hash{} escapes to heap$" +} + +func cloneHashWithBool2(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool2$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return nil, false +} + +func cloneHashWithBool3(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool3$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return &hash2{}, false // ERROR "&hash2{} escapes to heap$" +} + +func interleavedWithTypeAssertions() { + h1 := newHash() // ERROR "&hash{} does not escape$" "inlining call to newHash" + _ = h1.Sum() // ERROR "devirtualizing h1.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h2 := cloneHash1(h1) // ERROR "&hash{} does not escape$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash1" + _ = h2.Sum() // ERROR "devirtualizing h2.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h3 := cloneHash2(h1) // ERROR "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash2" + _ = h3.Sum() // ERROR "devirtualizing h3.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h4 := cloneHash3(h1) // ERROR "&hash2{} escapes to heap$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash3" "moved to heap: c$" + _ = h4.Sum() + + h5, _ := cloneHashWithBool1(h1) // ERROR "&hash{} does not escape$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool1" + _ = h5.Sum() // ERROR "devirtualizing h5.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h6, _ := cloneHashWithBool2(h1) // ERROR "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool2" + _ = h6.Sum() // ERROR "devirtualizing h6.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h7, _ := cloneHashWithBool3(h1) // ERROR "&hash2{} escapes to heap$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool3" "moved to heap: c$" + _ = h7.Sum() +} + +type cloneableHashError interface { + hashIface + Clone() (hashIface, error) +} + +type hash3 struct{ state [32]byte } + +func newHash3() hashIface { // ERROR "can inline newHash3$" + return &hash3{} // ERROR "&hash3{} escapes to heap$" +} + +func (h *hash3) Sum() []byte { // ERROR "can inline \(\*hash3\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash3) Clone() (hashIface, error) { // ERROR "can inline \(\*hash3\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c, nil +} + +func interleavedCloneableHashError() { + h1 := newHash3() // ERROR "&hash3{} does not escape$" "inlining call to newHash3" + _ = h1.Sum() // ERROR "devirtualizing h1.Sum to \*hash3$" "inlining call to \(\*hash3\).Sum" "make\(\[\]byte, 32\) does not escape$" + + if h1, ok := h1.(cloneableHashError); ok { + h2, err := h1.Clone() // ERROR "devirtualizing h1.Clone to \*hash3$" "inlining call to \(\*hash3\).Clone" + if err == nil { + _ = h2.Sum() // ERROR "devirtualizing h2.Sum to \*hash3$" "inlining call to \(\*hash3\).Sum" "make\(\[\]byte, 32\) does not escape$" + } + } +} diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go index ccf54fad54a03c..e55f190d7ee571 100644 --- a/test/fixedbugs/issue42284.dir/a.go +++ b/test/fixedbugs/issue42284.dir/a.go @@ -22,9 +22,8 @@ func g() { h := E() // ERROR "inlining call to E" "T\(0\) does not escape" h.M() // ERROR "devirtualizing h.M to T" "inlining call to T.M" - // BAD: T(0) could be stack allocated. - i := F(T(0)) // ERROR "inlining call to F" "T\(0\) escapes to heap" + i := F(T(0)) // ERROR "inlining call to F" "T\(0\) does not escape" - // Testing that we do NOT devirtualize here: - i.M() + // It is fine that we devirtualize here, as we add an additional nilcheck. + i.M() // ERROR "devirtualizing i.M to T" "inlining call to T.M" } diff --git a/test/fixedbugs/issue42284.dir/b.go b/test/fixedbugs/issue42284.dir/b.go index 559de59184460a..4a0b7cea102e88 100644 --- a/test/fixedbugs/issue42284.dir/b.go +++ b/test/fixedbugs/issue42284.dir/b.go @@ -10,9 +10,8 @@ func g() { h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape" h.M() // ERROR "devirtualizing h.M to a.T" "inlining call to a.T.M" - // BAD: T(0) could be stack allocated. - i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) escapes to heap" + i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) does not escape" - // Testing that we do NOT devirtualize here: - i.M() + // It is fine that we devirtualize here, as we add an additional nilcheck. + i.M() // ERROR "devirtualizing i.M to a.T" "inlining call to a.T.M" } From ee163197a879cf19aa9758bc544c717445284311 Mon Sep 17 00:00:00 2001 From: Federico Guerinoni Date: Tue, 7 Oct 2025 01:13:14 +0200 Subject: [PATCH 104/152] path/filepath: return cleaned path from Rel As the doc says, Rel should return a cleaned path. Fixes #75763 Change-Id: Ic0f5a3b1da3cc4cf3c31fdb1a88ebcc4ea6f9169 Reviewed-on: https://go-review.googlesource.com/c/go/+/709675 Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI Reviewed-by: Sean Liao Auto-Submit: Sean Liao Reviewed-by: Carlos Amedee --- src/path/filepath/path.go | 2 +- src/path/filepath/path_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go index 5ffd9f0b6c3fba..dc6975242a928c 100644 --- a/src/path/filepath/path.go +++ b/src/path/filepath/path.go @@ -248,7 +248,7 @@ func Rel(basepath, targpath string) (string, error) { buf[n] = Separator copy(buf[n+1:], targ[t0:]) } - return string(buf), nil + return Clean(string(buf)), nil } return targ[t0:], nil } diff --git a/src/path/filepath/path_test.go b/src/path/filepath/path_test.go index 7ea02a7c282378..efdb3c603592ee 100644 --- a/src/path/filepath/path_test.go +++ b/src/path/filepath/path_test.go @@ -1506,6 +1506,7 @@ var reltests = []RelTests{ {"/../../a/b", "/../../a/b/c/d", "c/d"}, {".", "a/b", "a/b"}, {".", "..", ".."}, + {"", "../../.", "../.."}, // can't do purely lexically {"..", ".", "err"}, From 4837fbe4145cd47b43eed66fee9eed9c2b988316 Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Sun, 5 Oct 2025 23:09:03 +0100 Subject: [PATCH 105/152] net/http/httptest: check whether response bodies are allowed Fixes #75471 Change-Id: Ie8fc5fae4b2a9285501198d8379bbffe51ee63f7 Reviewed-on: https://go-review.googlesource.com/c/go/+/709335 Reviewed-by: Damien Neil Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI --- src/net/http/httptest/recorder.go | 22 ++++++++++++++++++++++ src/net/http/httptest/recorder_test.go | 16 ++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/src/net/http/httptest/recorder.go b/src/net/http/httptest/recorder.go index 17aa70f06760a2..7890b5ef6bdf31 100644 --- a/src/net/http/httptest/recorder.go +++ b/src/net/http/httptest/recorder.go @@ -105,6 +105,10 @@ func (rw *ResponseRecorder) writeHeader(b []byte, str string) { // Write implements http.ResponseWriter. The data in buf is written to // rw.Body, if not nil. func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + code := rw.Code + if !bodyAllowedForStatus(code) { + return 0, http.ErrBodyNotAllowed + } rw.writeHeader(buf, "") if rw.Body != nil { rw.Body.Write(buf) @@ -115,6 +119,10 @@ func (rw *ResponseRecorder) Write(buf []byte) (int, error) { // WriteString implements [io.StringWriter]. The data in str is written // to rw.Body, if not nil. func (rw *ResponseRecorder) WriteString(str string) (int, error) { + code := rw.Code + if !bodyAllowedForStatus(code) { + return 0, http.ErrBodyNotAllowed + } rw.writeHeader(nil, str) if rw.Body != nil { rw.Body.WriteString(str) @@ -122,6 +130,20 @@ func (rw *ResponseRecorder) WriteString(str string) (int, error) { return len(str), nil } +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. diff --git a/src/net/http/httptest/recorder_test.go b/src/net/http/httptest/recorder_test.go index 4782eced43e6ce..abf8e118d6092e 100644 --- a/src/net/http/httptest/recorder_test.go +++ b/src/net/http/httptest/recorder_test.go @@ -5,6 +5,7 @@ package httptest import ( + "errors" "fmt" "io" "net/http" @@ -309,6 +310,21 @@ func TestRecorder(t *testing.T) { } } +func TestBodyNotAllowed(t *testing.T) { + rw := NewRecorder() + rw.WriteHeader(204) + + _, err := rw.Write([]byte("hello world")) + if !errors.Is(err, http.ErrBodyNotAllowed) { + t.Errorf("expected BodyNotAllowed for Write after 204, got: %v", err) + } + + _, err = rw.WriteString("hello world") + if !errors.Is(err, http.ErrBodyNotAllowed) { + t.Errorf("expected BodyNotAllowed for WriteString after 204, got: %v", err) + } +} + // issue 39017 - disallow Content-Length values such as "+3" func TestParseContentLength(t *testing.T) { tests := []struct { From 0e466a8d1d89e15e953c7d35bcd9e02d3c89f62b Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 21 Jul 2025 13:30:08 -0400 Subject: [PATCH 106/152] cmd/compile: modify float-to-[u]int so that amd64 and arm64 match Eventual goal is that all the architectures agree, and are sensible. The test will be build-tagged to exclude not-yet-handled platforms. This change also bisects the conversion change in case of bugs. (`bisect -compile=convert ...`) Change-Id: I98528666b0a3fde17cbe8d69b612d01da18dce85 Reviewed-on: https://go-review.googlesource.com/c/go/+/691135 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall --- src/cmd/compile/internal/base/debug.go | 1 + src/cmd/compile/internal/base/flag.go | 3 + src/cmd/compile/internal/base/hashdebug.go | 1 + src/cmd/compile/internal/ssa/_gen/AMD64.rules | 17 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 197 ++++++++++++- src/cmd/compile/internal/ssagen/ssa.go | 72 ++++- test/convert5.go | 268 ++++++++++++++++++ test/convert5.out | 37 +++ 8 files changed, 569 insertions(+), 27 deletions(-) create mode 100644 test/convert5.go create mode 100644 test/convert5.out diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index 85873dcc40e1b3..9e8ab2f488bb4d 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -20,6 +20,7 @@ type DebugFlags struct { Append int `help:"print information about append compilation"` Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"` Closure int `help:"print information about closure compilation"` + Converthash string `help:"hash value for use in debugging changes to platform-dependent float-to-[u]int conversion" concurrent:"ok"` Defer int `help:"print information about defer compilation"` DisableNil int `help:"disable nil checks" concurrent:"ok"` DumpInlFuncProps string `help:"dump function properties from inl heuristics to specified file"` diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index a0ed876cfc8e0e..1ac2cecc61ec7e 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -262,6 +262,9 @@ func ParseFlags() { Debug.LoopVar = 1 } + if Debug.Converthash != "" { + ConvertHash = NewHashDebug("converthash", Debug.Converthash, nil) + } if Debug.Fmahash != "" { FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil) } diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go index fa63deb46a3c01..edf567457cb04b 100644 --- a/src/cmd/compile/internal/base/hashdebug.go +++ b/src/cmd/compile/internal/base/hashdebug.go @@ -53,6 +53,7 @@ func (d *HashDebug) SetInlineSuffixOnly(b bool) *HashDebug { // The default compiler-debugging HashDebug, for "-d=gossahash=..." var hashDebug *HashDebug +var ConvertHash *HashDebug // for debugging float-to-[u]int conversion changes var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes var LoopVarHash *HashDebug // for debugging shared/private loop variable changes var PGOHash *HashDebug // for debugging PGO optimization decisions diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 7d3efef5cdc837..0bea99e38de1bc 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -162,10 +162,19 @@ (Cvt64to32F ...) => (CVTSQ2SS ...) (Cvt64to64F ...) => (CVTSQ2SD ...) -(Cvt32Fto32 ...) => (CVTTSS2SL ...) -(Cvt32Fto64 ...) => (CVTTSS2SQ ...) -(Cvt64Fto32 ...) => (CVTTSD2SL ...) -(Cvt64Fto64 ...) => (CVTTSD2SQ ...) +// Float, to int. +// To make AMD64 "overflow" return max positive instead of max negative, compute +// y and not x, smear the sign bit, and xor. +(Cvt32Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) +(Cvt64Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) + +(Cvt32Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) ))) +(Cvt64Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x))))) + +(Cvt32Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SL x) +(Cvt32Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SQ x) +(Cvt64Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SL x) +(Cvt64Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SQ x) (Cvt32Fto64F ...) => (CVTSS2SD ...) (Cvt64Fto32F ...) => (CVTSD2SS ...) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a7ee632ae1af72..e702925f5f3bac 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -5,6 +5,7 @@ package ssa import "internal/buildcfg" import "math" import "cmd/internal/obj" +import "cmd/compile/internal/base" import "cmd/compile/internal/types" func rewriteValueAMD64(v *Value) bool { @@ -694,11 +695,9 @@ func rewriteValueAMD64(v *Value) bool { case OpCtz8NonZero: return rewriteValueAMD64_OpCtz8NonZero(v) case OpCvt32Fto32: - v.Op = OpAMD64CVTTSS2SL - return true + return rewriteValueAMD64_OpCvt32Fto32(v) case OpCvt32Fto64: - v.Op = OpAMD64CVTTSS2SQ - return true + return rewriteValueAMD64_OpCvt32Fto64(v) case OpCvt32Fto64F: v.Op = OpAMD64CVTSS2SD return true @@ -709,14 +708,12 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64CVTSL2SD return true case OpCvt64Fto32: - v.Op = OpAMD64CVTTSD2SL - return true + return rewriteValueAMD64_OpCvt64Fto32(v) case OpCvt64Fto32F: v.Op = OpAMD64CVTSD2SS return true case OpCvt64Fto64: - v.Op = OpAMD64CVTTSD2SQ - return true + return rewriteValueAMD64_OpCvt64Fto64(v) case OpCvt64to32F: v.Op = OpAMD64CVTSQ2SS return true @@ -25511,6 +25508,190 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { } return false } +func rewriteValueAMD64_OpCvt32Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto32 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARLconst, t) + v0.AuxInt = int8ToAuxInt(31) + v1 := b.NewValue0(v.Pos, OpAMD64ANDL, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSS2SL, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTL, typ.Int32) + v4 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v4.AddArg(x) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt32Fto32 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSS2SL x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSS2SL) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt32Fto64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto64 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) ))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t) + v0.AuxInt = int8ToAuxInt(63) + v1 := b.NewValue0(v.Pos, OpAMD64ANDQ, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSS2SQ, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTQ, typ.Int64) + v4 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpAMD64CVTSS2SD, typ.Float64) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt32Fto64 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSS2SQ x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSS2SQ) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto32 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARLconst, t) + v0.AuxInt = int8ToAuxInt(31) + v1 := b.NewValue0(v.Pos, OpAMD64ANDL, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSD2SL, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTL, typ.Int32) + v4 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpAMD64CVTSD2SS, typ.Float32) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt64Fto32 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSD2SL x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSD2SL) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64Fto64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto64 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t) + v0.AuxInt = int8ToAuxInt(63) + v1 := b.NewValue0(v.Pos, OpAMD64ANDQ, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSD2SQ, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTQ, typ.Int64) + v4 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v4.AddArg(x) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt64Fto64 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSD2SQ x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSD2SQ) + v.Type = t + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 6c4afb78959791..2705195bd55791 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2574,13 +2574,13 @@ var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ {types.TFLOAT32, types.TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32}, {types.TFLOAT32, types.TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32}, - {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned - {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead + {types.TFLOAT32, types.TUINT32}: {ssa.OpInvalid, ssa.OpCopy, types.TINT64}, // Cvt64Fto32U, branchy code expansion instead + {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead {types.TFLOAT64, types.TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32}, {types.TFLOAT64, types.TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32}, - {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned - {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead + {types.TFLOAT64, types.TUINT32}: {ssa.OpInvalid, ssa.OpCopy, types.TINT64}, // Cvt64Fto32U, branchy code expansion instead + {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead // float {types.TFLOAT64, types.TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32}, @@ -2860,10 +2860,23 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value { } // ft is float32 or float64, and tt is unsigned integer if ft.Size() == 4 { - return s.float32ToUint64(n, v, ft, tt) + switch tt.Size() { + case 8: + return s.float32ToUint64(n, v, ft, tt) + case 4, 2, 1: + // TODO should 2 and 1 saturate or truncate? + return s.float32ToUint32(n, v, ft, tt) + } } if ft.Size() == 8 { - return s.float64ToUint64(n, v, ft, tt) + switch tt.Size() { + case 8: + return s.float64ToUint64(n, v, ft, tt) + case 4, 2, 1: + // TODO should 2 and 1 saturate or truncate? + return s.float64ToUint32(n, v, ft, tt) + } + } s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) return nil @@ -5553,7 +5566,9 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, // equal to 10000000001; that rounds up, and the 1 cannot // be lost else it would round down if the LSB of the // candidate mantissa is 0. + cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x) + b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) @@ -5779,34 +5794,61 @@ func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ss func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // cutoff:=1<<(intY_Size-1) // if x < floatX(cutoff) { - // result = uintY(x) + // result = uintY(x) // bThen + // if x < 0 { // unlikely + // result = 0 // bZero + // } // } else { - // y = x - floatX(cutoff) + // y = x - floatX(cutoff) // bElse // z = uintY(y) // result = z | -(cutoff) // } + cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) - cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff) + cmp := s.newValueOrSfCall2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) b.Likely = ssa.BranchLikely - bThen := s.f.NewBlock(ssa.BlockPlain) + var bThen, bZero *ssa.Block + newConversion := base.ConvertHash.MatchPos(n.Pos(), nil) + if newConversion { + bZero = s.f.NewBlock(ssa.BlockPlain) + bThen = s.f.NewBlock(ssa.BlockIf) + } else { + bThen = s.f.NewBlock(ssa.BlockPlain) + } + bElse := s.f.NewBlock(ssa.BlockPlain) bAfter := s.f.NewBlock(ssa.BlockPlain) b.AddEdgeTo(bThen) s.startBlock(bThen) - a0 := s.newValue1(cvttab.cvt2U, tt, x) + a0 := s.newValueOrSfCall1(cvttab.cvt2U, tt, x) s.vars[n] = a0 - s.endBlock() - bThen.AddEdgeTo(bAfter) + + if newConversion { + cmpz := s.newValueOrSfCall2(cvttab.ltf, types.Types[types.TBOOL], x, cvttab.floatValue(s, ft, 0.0)) + s.endBlock() + bThen.SetControl(cmpz) + bThen.AddEdgeTo(bZero) + bThen.Likely = ssa.BranchUnlikely + bThen.AddEdgeTo(bAfter) + + s.startBlock(bZero) + s.vars[n] = cvttab.intValue(s, tt, 0) + s.endBlock() + bZero.AddEdgeTo(bAfter) + } else { + s.endBlock() + bThen.AddEdgeTo(bAfter) + } b.AddEdgeTo(bElse) s.startBlock(bElse) - y := s.newValue2(cvttab.subf, ft, x, cutoff) - y = s.newValue1(cvttab.cvt2U, tt, y) + y := s.newValueOrSfCall2(cvttab.subf, ft, x, cutoff) + y = s.newValueOrSfCall1(cvttab.cvt2U, tt, y) z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) a1 := s.newValue2(cvttab.or, tt, y, z) s.vars[n] = a1 diff --git a/test/convert5.go b/test/convert5.go new file mode 100644 index 00000000000000..1bd74abdad0715 --- /dev/null +++ b/test/convert5.go @@ -0,0 +1,268 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !wasm && !386 && !arm && !mips + +// TODO fix this to work for wasm and 32-bit architectures. +// Doing more than this, however, expands the change. + +package main + +import ( + "fmt" + "runtime" +) + +// This test checks that conversion from floats to (unsigned) 32 and 64-bit +// integers has the same sensible behavior for corner cases, and that the +// conversions to smaller integers agree. Because outliers are platform- +// independent, the "golden test" for smaller integers is more like of +// a "gold-ish test" and subject to change. + +//go:noinline +func id[T any](x T) T { + return x +} + +//go:noinline +func want[T comparable](name string, x, y T) { + if x != y { + _, _, line, _ := runtime.Caller(1) + fmt.Println("FAIL at line", line, "var =", name, "got =", x, "want =", y) + } +} + +//go:noinline +func log[T comparable](name string, x T) { + fmt.Println(name, x) +} + +const ( + // pX = max positive signed X bit + // nX = min negative signed X bit + // uX = max unsigned X bit + // tX = two to the X + p32 = 2147483647 + n32 = -2147483648 + u32 = 4294967295 + p64 = 9223372036854775807 + n64 = -9223372036854775808 + u64 = 18446744073709551615 + t44 = 1 << 44 +) + +func main() { + one := 1.0 + minus1_32 := id(float32(-1.0)) + minus1_64 := id(float64(-1.0)) + p32_plus4k_plus1 := id(float32(p32 + 4096 + 1)) // want this to be precise and fit in 24 bits mantissa + p64_plus4k_plus1 := id(float64(p64 + 4096 + 1)) // want this to be precise and fit in 53 bits mantissa + n32_minus4k := id(float32(n32 - 4096)) + n64_minus4k := id(float64(n64 - 4096)) + inf_32 := id(float32(one / 0)) + inf_64 := id(float64(one / 0)) + ninf_32 := id(float32(-one / 0)) + ninf_64 := id(float64(-one / 0)) + + // int32 conversions + int32Tests := []struct { + name string + input any // Use any to handle both float32 and float64 + expected int32 + }{ + {"minus1_32", minus1_32, -1}, + {"minus1_64", minus1_64, -1}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p32}, + {"n32_minus4k", n32_minus4k, n32}, + {"n64_minus4k", n64_minus4k, n32}, + {"inf_32", inf_32, p32}, + {"inf_64", inf_64, p32}, + {"ninf_32", ninf_32, n32}, + {"ninf_64", ninf_64, n32}, + } + + for _, test := range int32Tests { + var converted int32 + switch v := test.input.(type) { + case float32: + converted = int32(v) + case float64: + converted = int32(v) + } + want(test.name, converted, test.expected) + } + + // int64 conversions + int64Tests := []struct { + name string + input any + expected int64 + }{ + {"minus1_32", minus1_32, -1}, + {"minus1_64", minus1_64, -1}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p64}, + {"n32_minus4k", n32_minus4k, n32 - 4096}, + {"n64_minus4k", n64_minus4k, n64}, + {"inf_32", inf_32, p64}, + {"inf_64", inf_64, p64}, + {"ninf_32", ninf_32, n64}, + {"ninf_64", ninf_64, n64}, + } + + for _, test := range int64Tests { + var converted int64 + switch v := test.input.(type) { + case float32: + converted = int64(v) + case float64: + converted = int64(v) + } + want(test.name, converted, test.expected) + } + + // uint32 conversions + uint32Tests := []struct { + name string + input any + expected uint32 + }{ + {"minus1_32", minus1_32, 0}, + {"minus1_64", minus1_64, 0}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, u32}, + {"n32_minus4k", n32_minus4k, 0}, + {"n64_minus4k", n64_minus4k, 0}, + {"inf_32", inf_32, u32}, + {"inf_64", inf_64, u32}, + {"ninf_32", ninf_32, 0}, + {"ninf_64", ninf_64, 0}, + } + + for _, test := range uint32Tests { + var converted uint32 + switch v := test.input.(type) { + case float32: + converted = uint32(v) + case float64: + converted = uint32(v) + } + want(test.name, converted, test.expected) + } + + u64_plus4k_plus1_64 := id(float64(u64 + 4096 + 1)) + u64_plust44_plus1_32 := id(float32(u64 + t44 + 1)) + + // uint64 conversions + uint64Tests := []struct { + name string + input any + expected uint64 + }{ + {"minus1_32", minus1_32, 0}, + {"minus1_64", minus1_64, 0}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p64 + 4096 + 1}, + {"n32_minus4k", n32_minus4k, 0}, + {"n64_minus4k", n64_minus4k, 0}, + {"inf_32", inf_32, u64}, + {"inf_64", inf_64, u64}, + {"ninf_32", ninf_32, 0}, + {"ninf_64", ninf_64, 0}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64, u64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32, u64}, + } + + for _, test := range uint64Tests { + var converted uint64 + switch v := test.input.(type) { + case float32: + converted = uint64(v) + case float64: + converted = uint64(v) + } + want(test.name, converted, test.expected) + } + + // for smaller integer types + // TODO the overflow behavior is dubious, maybe we should fix it to be more sensible, e.g. saturating. + fmt.Println("Below this are 'golden' results to check for consistency across platforms. Overflow behavior is not necessarily what we want") + + u8plus2 := id(float64(257)) + p8minus1 := id(float32(126)) + n8plus2 := id(float64(-126)) + n8minusone := id(float32(-129)) + + fmt.Println("\nuint8 conversions") + uint8Tests := []struct { + name string + input any + }{ + {"minus1_32", minus1_32}, + {"minus1_64", minus1_64}, + {"p32_plus4k_plus1", p32_plus4k_plus1}, + {"p64_plus4k_plus1", p64_plus4k_plus1}, + {"n32_minus4k", n32_minus4k}, + {"n64_minus4k", n64_minus4k}, + {"inf_32", inf_32}, + {"inf_64", inf_64}, + {"ninf_32", ninf_32}, + {"ninf_64", ninf_64}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32}, + {"u8plus2", u8plus2}, + {"p8minus1", p8minus1}, + {"n8plus2", n8plus2}, + {"n8minusone", n8minusone}, + } + + for _, test := range uint8Tests { + var converted uint8 + switch v := test.input.(type) { + case float32: + converted = uint8(v) + case float64: + converted = uint8(v) + } + log(test.name, converted) + } + + fmt.Println("\nint8 conversions") + int8Tests := []struct { + name string + input any + }{ + {"minus1_32", minus1_32}, + {"minus1_64", minus1_64}, + {"p32_plus4k_plus1", p32_plus4k_plus1}, + {"p64_plus4k_plus1", p64_plus4k_plus1}, + {"n32_minus4k", n32_minus4k}, + {"n64_minus4k", n64_minus4k}, + {"inf_32", inf_32}, + {"inf_64", inf_64}, + {"ninf_32", ninf_32}, + {"ninf_64", ninf_64}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32}, + {"u8plus2", u8plus2}, + {"p8minus1", p8minus1}, + {"n8plus2", n8plus2}, + {"n8minusone", n8minusone}, + } + + for _, test := range int8Tests { + var converted int8 + switch v := test.input.(type) { + case float32: + converted = int8(v) + case float64: + converted = int8(v) + } + log(test.name, converted) + } + +} diff --git a/test/convert5.out b/test/convert5.out new file mode 100644 index 00000000000000..47a8af67f961f2 --- /dev/null +++ b/test/convert5.out @@ -0,0 +1,37 @@ +Below this are 'golden' results to check for consistency across platforms. Overflow behavior is not necessarily what we want + +uint8 conversions +minus1_32 255 +minus1_64 255 +p32_plus4k_plus1 255 +p64_plus4k_plus1 255 +n32_minus4k 0 +n64_minus4k 0 +inf_32 255 +inf_64 255 +ninf_32 0 +ninf_64 0 +u64_plus4k_plus1_64 255 +u64_plust44_plus1_32 255 +u8plus2 1 +p8minus1 126 +n8plus2 130 +n8minusone 127 + +int8 conversions +minus1_32 -1 +minus1_64 -1 +p32_plus4k_plus1 -1 +p64_plus4k_plus1 -1 +n32_minus4k 0 +n64_minus4k 0 +inf_32 -1 +inf_64 -1 +ninf_32 0 +ninf_64 0 +u64_plus4k_plus1_64 -1 +u64_plust44_plus1_32 -1 +u8plus2 1 +p8minus1 126 +n8plus2 -126 +n8minusone 127 From 78d75b37992be01326b9bd2666195aaba9bf2ae2 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 30 Sep 2025 16:27:32 -0400 Subject: [PATCH 107/152] cmd/compile: make 386 float-to-int conversions match amd64 Change-Id: Iff13b4471f94a6a91d8b159603a9338cb9c89747 Reviewed-on: https://go-review.googlesource.com/c/go/+/708295 Reviewed-by: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/_gen/386.rules | 6 +- src/cmd/compile/internal/ssa/_gen/386Ops.go | 2 + src/cmd/compile/internal/ssa/opGen.go | 13 +++ src/cmd/compile/internal/ssa/rewrite386.go | 99 ++++++++++++++++++++- src/cmd/compile/internal/x86/ssa.go | 7 ++ src/runtime/asm_386.s | 10 --- src/runtime/stubs_386.go | 1 - src/runtime/vlrt.go | 63 ++++++++++++- test/convert5.go | 4 +- 9 files changed, 182 insertions(+), 23 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/386.rules b/src/cmd/compile/internal/ssa/_gen/386.rules index 5f1150241929eb..4e3d3203c79041 100644 --- a/src/cmd/compile/internal/ssa/_gen/386.rules +++ b/src/cmd/compile/internal/ssa/_gen/386.rules @@ -88,8 +88,10 @@ (Cvt32to32F ...) => (CVTSL2SS ...) (Cvt32to64F ...) => (CVTSL2SD ...) -(Cvt32Fto32 ...) => (CVTTSS2SL ...) -(Cvt64Fto32 ...) => (CVTTSD2SL ...) +(Cvt32Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) +(Cvt64Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) +(Cvt32Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SL x) +(Cvt64Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SL x) (Cvt32Fto64F ...) => (CVTSS2SD ...) (Cvt64Fto32F ...) => (CVTSD2SS ...) diff --git a/src/cmd/compile/internal/ssa/_gen/386Ops.go b/src/cmd/compile/internal/ssa/_gen/386Ops.go index 60599a33abb587..86c2f9c8f080f8 100644 --- a/src/cmd/compile/internal/ssa/_gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/386Ops.go @@ -342,6 +342,8 @@ func init() { {name: "MOVWLSX", argLength: 1, reg: gp11, asm: "MOVWLSX"}, // sign extend arg0 from int16 to int32 {name: "MOVWLZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int32 + {name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"}, // move 32 bits from float to int reg, zero extend + {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32 diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 92adf5341b2574..2a60398bc68711 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -499,6 +499,7 @@ const ( Op386MOVBLZX Op386MOVWLSX Op386MOVWLZX + Op386MOVLf2i Op386MOVLconst Op386CVTTSD2SL Op386CVTTSS2SL @@ -5601,6 +5602,18 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVLf2i", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, { name: "MOVLconst", auxType: auxInt32, diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 0495438710659e..4845f1e0250ea8 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -3,6 +3,7 @@ package ssa import "math" +import "cmd/compile/internal/base" import "cmd/compile/internal/types" func rewriteValue386(v *Value) bool { @@ -340,8 +341,7 @@ func rewriteValue386(v *Value) bool { v.Op = Op386BSFL return true case OpCvt32Fto32: - v.Op = Op386CVTTSS2SL - return true + return rewriteValue386_OpCvt32Fto32(v) case OpCvt32Fto64F: v.Op = Op386CVTSS2SD return true @@ -352,8 +352,7 @@ func rewriteValue386(v *Value) bool { v.Op = Op386CVTSL2SD return true case OpCvt64Fto32: - v.Op = Op386CVTTSD2SL - return true + return rewriteValue386_OpCvt64Fto32(v) case OpCvt64Fto32F: v.Op = Op386CVTSD2SS return true @@ -7964,6 +7963,98 @@ func rewriteValue386_OpCtz8(v *Value) bool { return true } } +func rewriteValue386_OpCvt32Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto32 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(Op386XORL) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386SARLconst, t) + v0.AuxInt = int32ToAuxInt(31) + v1 := b.NewValue0(v.Pos, Op386ANDL, t) + y := b.NewValue0(v.Pos, Op386CVTTSS2SL, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, Op386NOTL, typ.Int32) + v4 := b.NewValue0(v.Pos, Op386MOVLf2i, typ.UInt32) + v4.AddArg(x) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt32Fto32 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSS2SL x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(Op386CVTTSS2SL) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_OpCvt64Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto32 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(Op386XORL) + v.Type = t + v0 := b.NewValue0(v.Pos, Op386SARLconst, t) + v0.AuxInt = int32ToAuxInt(31) + v1 := b.NewValue0(v.Pos, Op386ANDL, t) + y := b.NewValue0(v.Pos, Op386CVTTSD2SL, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, Op386NOTL, typ.Int32) + v4 := b.NewValue0(v.Pos, Op386MOVLf2i, typ.UInt32) + v5 := b.NewValue0(v.Pos, Op386CVTSD2SS, typ.Float32) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt64Fto32 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSD2SL x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(Op386CVTTSD2SL) + v.Type = t + v.AddArg(x) + return true + } + return false +} func rewriteValue386_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index d0aad088496351..2858a81b4b977b 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -538,6 +538,13 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() ssagen.AddAux(&p.To, v) + case ssa.Op386MOVLf2i: + var p *obj.Prog + p = s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.Op386ADDLconstmodify: sc := v.AuxValAndOff() val := sc.Val() diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index df32e90fda8416..2a6de64f9fbdb3 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -1407,16 +1407,6 @@ TEXT runtime·uint32tofloat64(SB),NOSPLIT,$8-12 FMOVDP F0, ret+4(FP) RET -TEXT runtime·float64touint32(SB),NOSPLIT,$12-12 - FMOVD a+0(FP), F0 - FSTCW 0(SP) - FLDCW runtime·controlWord64trunc(SB) - FMOVVP F0, 4(SP) - FLDCW 0(SP) - MOVL 4(SP), AX - MOVL AX, ret+8(FP) - RET - // gcWriteBarrier informs the GC about heap pointer writes. // // gcWriteBarrier returns space in a write barrier buffer which diff --git a/src/runtime/stubs_386.go b/src/runtime/stubs_386.go index a1dd023974a0c6..4f3dcd4fd9b250 100644 --- a/src/runtime/stubs_386.go +++ b/src/runtime/stubs_386.go @@ -6,7 +6,6 @@ package runtime import "unsafe" -func float64touint32(a float64) uint32 func uint32tofloat64(a uint32) float64 // stackcheck checks that SP is in range [g->stack.lo, g->stack.hi). diff --git a/src/runtime/vlrt.go b/src/runtime/vlrt.go index 4b12f593c8a8ec..511eb0dd4edf40 100644 --- a/src/runtime/vlrt.go +++ b/src/runtime/vlrt.go @@ -40,10 +40,17 @@ func float64toint64(d float64) (y uint64) { } func float64touint64(d float64) (y uint64) { - _d2v(&y, d) + _d2vu(&y, d) return } +func float64touint32(a float64) uint32 { + if a >= 0xffffffff { + return 0xffffffff + } + return uint32(float64touint64(a)) +} + func int64tofloat64(y int64) float64 { if y < 0 { return -uint64tofloat64(-uint64(y)) @@ -117,12 +124,16 @@ func _d2v(y *uint64, d float64) { } else { /* v = (hi||lo) << -sh */ sh := uint32(-sh) - if sh <= 11 { + if sh <= 10 { ylo = xlo << sh yhi = xhi<>(32-sh) } else { - /* overflow */ - yhi = uint32(d) /* causes something awful */ + if x&sign64 != 0 { + *y = 0x8000000000000000 + } else { + *y = 0x7fffffffffffffff + } + return } } if x&sign64 != 0 { @@ -136,6 +147,50 @@ func _d2v(y *uint64, d float64) { *y = uint64(yhi)<<32 | uint64(ylo) } +func _d2vu(y *uint64, d float64) { + x := *(*uint64)(unsafe.Pointer(&d)) + if x&sign64 != 0 { + *y = 0 + return + } + + xhi := uint32(x>>32)&0xfffff | 0x100000 + xlo := uint32(x) + sh := 1075 - int32(uint32(x>>52)&0x7ff) + + var ylo, yhi uint32 + if sh >= 0 { + sh := uint32(sh) + /* v = (hi||lo) >> sh */ + if sh < 32 { + if sh == 0 { + ylo = xlo + yhi = xhi + } else { + ylo = xlo>>sh | xhi<<(32-sh) + yhi = xhi >> sh + } + } else { + if sh == 32 { + ylo = xhi + } else if sh < 64 { + ylo = xhi >> (sh - 32) + } + } + } else { + /* v = (hi||lo) << -sh */ + sh := uint32(-sh) + if sh <= 11 { + ylo = xlo << sh + yhi = xhi<>(32-sh) + } else { + /* overflow */ + *y = 0xffffffffffffffff + return + } + } + *y = uint64(yhi)<<32 | uint64(ylo) +} func uint64div(n, d uint64) uint64 { // Check for 32 bit operands if uint32(n>>32) == 0 && uint32(d>>32) == 0 { diff --git a/test/convert5.go b/test/convert5.go index 1bd74abdad0715..57585ef76e1673 100644 --- a/test/convert5.go +++ b/test/convert5.go @@ -4,9 +4,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !wasm && !386 && !arm && !mips +//go:build !wasm -// TODO fix this to work for wasm and 32-bit architectures. +// TODO fix this to work for wasm // Doing more than this, however, expands the change. package main From b9f3accdcf973ca41069e22e6859b9436801aae5 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 6 Oct 2025 15:01:03 -0400 Subject: [PATCH 108/152] runtime: adjust softfloat corner cases to match amd64/arm64 This chooses saturating behavior for over/underflow. Change-Id: I96a33ef73feacdafe8310f893de445060bc1a536 Reviewed-on: https://go-review.googlesource.com/c/go/+/709595 Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/runtime/export_test.go | 1 + src/runtime/softfloat64.go | 93 +++++++++++++++++++++++++-------- src/runtime/softfloat64_test.go | 33 +++++++++++- test/convert5.go | 5 ++ 4 files changed, 107 insertions(+), 25 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 9f2fcacc30ee5c..f61cac763cef3d 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -25,6 +25,7 @@ var F32to64 = f32to64 var Fcmp64 = fcmp64 var Fintto64 = fintto64 var F64toint = f64toint +var F64touint = f64touint64 var Entersyscall = entersyscall var Exitsyscall = exitsyscall diff --git a/src/runtime/softfloat64.go b/src/runtime/softfloat64.go index 42ef0092970b3e..7b9409f75be380 100644 --- a/src/runtime/softfloat64.go +++ b/src/runtime/softfloat64.go @@ -26,6 +26,11 @@ const ( neg32 uint32 = 1 << (expbits32 + mantbits32) ) +// If F is not NaN and not Inf, then f == (-1)**sign * mantissa * 2**(exp-52) +// The mantissa and exp are adjusted from their stored representation so +// that the mantissa includes the formerly implicit 1, the exponent bias +// is removed, and denormalized floats to put a 1 in the expected +// (1< 63: // f >= 2^63 - if fs != 0 && fm == 0 { // f == -2^63 - return -1 << 63, true - } + case fi || fe >= 63: // |f| >= 2^63, including infinity if fs != 0 { - return 0, false + return -0x8000_0000_0000_0000, true } - return 0, false + return 0x7fff_ffff_ffff_ffff, true } for fe > int(mantbits64) { @@ -400,12 +406,51 @@ func f64toint(f uint64) (val int64, ok bool) { fm >>= 1 } val = int64(fm) + if val < 0 { + if fs != 0 { + return -0x8000_0000_0000_0000, true + } + return 0x7fff_ffff_ffff_ffff, true + } if fs != 0 { val = -val } return val, true } +// returns saturated-conversion uint64 value of f +// and whether the input was NaN (in which case it +// may not match the "hardware" conversion). +func f64touint(f uint64) (val uint64, isNan bool) { + fs, fm, fe, fi, fn := funpack64(f) + + switch { + + case fn: // NaN + return 0xffff_ffff_ffff_ffff, false + + case fs != 0: // all negative, including -Inf, are zero + return 0, true + + case fi || fe >= 64: // positive infinity or f >= 2^64 + return 0xffff_ffff_ffff_ffff, true + + case fe < -1: // f < 0.5 + return 0, true + } + + for fe > int(mantbits64) { + fe-- + fm <<= 1 + } + for fe < int(mantbits64) { + fe++ + fm >>= 1 + } + val = fm + return val, true +} + func fintto64(val int64) (f uint64) { fs := uint64(val) & (1 << 63) mant := uint64(val) @@ -564,6 +609,12 @@ func fint64to64(x int64) uint64 { func f32toint32(x uint32) int32 { val, _ := f64toint(f32to64(x)) + if val >= 0x7fffffff { + return 0x7fffffff + } + if val < -0x80000000 { + return -0x80000000 + } return int32(val) } @@ -574,6 +625,12 @@ func f32toint64(x uint32) int64 { func f64toint32(x uint64) int32 { val, _ := f64toint(x) + if val >= 0x7fffffff { + return 0x7fffffff + } + if val < -0x80000000 { + return -0x80000000 + } return int32(val) } @@ -583,23 +640,13 @@ func f64toint64(x uint64) int64 { } func f64touint64(x uint64) uint64 { - var m uint64 = 0x43e0000000000000 // float64 1<<63 - if fgt64(m, x) { - return uint64(f64toint64(x)) - } - y := fadd64(x, -m) - z := uint64(f64toint64(y)) - return z | (1 << 63) + val, _ := f64touint(x) + return val } func f32touint64(x uint32) uint64 { - var m uint32 = 0x5f000000 // float32 1<<63 - if fgt32(m, x) { - return uint64(f32toint64(x)) - } - y := fadd32(x, -m) - z := uint64(f32toint64(y)) - return z | (1 << 63) + val, _ := f64touint(f32to64(x)) + return val } func fuint64to64(x uint64) uint64 { diff --git a/src/runtime/softfloat64_test.go b/src/runtime/softfloat64_test.go index 3f53e8bc55810c..233d5e01c0ea60 100644 --- a/src/runtime/softfloat64_test.go +++ b/src/runtime/softfloat64_test.go @@ -28,6 +28,15 @@ func div(x, y float64) float64 { return x / y } func TestFloat64(t *testing.T) { base := []float64{ 0, + 1, + -9223372036854775808, + -9223372036854775808 + 4096, + 18446744073709551615, + 18446744073709551615 + 1, + 18446744073709551615 - 1, + 9223372036854775808 + 4096, + 0.5, + 0.75, math.Copysign(0, -1), -1, 1, @@ -35,6 +44,8 @@ func TestFloat64(t *testing.T) { math.Inf(+1), math.Inf(-1), 0.1, + 0.5, + 0.75, 1.5, 1.9999999999999998, // all 1s mantissa 1.3333333333333333, // 1.010101010101... @@ -70,7 +81,7 @@ func TestFloat64(t *testing.T) { 1e+307, 1e+308, } - all := make([]float64, 200) + all := make([]float64, 250) copy(all, base) for i := len(base); i < len(all); i++ { all[i] = rand.NormFloat64() @@ -82,6 +93,7 @@ func TestFloat64(t *testing.T) { test(t, "*", mul, fop(Fmul64), all) test(t, "/", div, fop(Fdiv64), all) } + } // 64 -hw-> 32 -hw-> 64 @@ -104,6 +116,11 @@ func hwint64(f float64) float64 { return float64(int64(f)) } +// float64 -hw-> uint64 -hw-> float64 +func hwuint64(f float64) float64 { + return float64(uint64(f)) +} + // float64 -hw-> int32 -hw-> float64 func hwint32(f float64) float64 { return float64(int32(f)) @@ -113,13 +130,23 @@ func hwint32(f float64) float64 { func toint64sw(f float64) float64 { i, ok := F64toint(math.Float64bits(f)) if !ok { - // There's no right answer for out of range. + // There's no right answer for NaN. // Match the hardware to pass the test. i = int64(f) } return float64(i) } +func touint64sw(f float64) float64 { + i := F64touint(math.Float64bits(f)) + if f != f { + // There's no right answer for NaN. + // Match the hardware to pass the test. + i = uint64(f) + } + return float64(i) +} + // float64 -hw-> int64 -sw-> float64 func fromint64sw(f float64) float64 { return math.Float64frombits(Fintto64(int64(f))) @@ -150,6 +177,7 @@ func test(t *testing.T, op string, hw, sw func(float64, float64) float64, all [] testu(t, "to32", trunc32, to32sw, h) testu(t, "to64", trunc32, to64sw, h) testu(t, "toint64", hwint64, toint64sw, h) + testu(t, "touint64", hwuint64, touint64sw, h) testu(t, "fromint64", hwint64, fromint64sw, h) testcmp(t, f, h) testcmp(t, h, f) @@ -163,6 +191,7 @@ func testu(t *testing.T, op string, hw, sw func(float64) float64, v float64) { h := hw(v) s := sw(v) if !same(h, s) { + s = sw(v) // debug me err(t, "%s %g = sw %g, hw %g\n", op, v, s, h) } } diff --git a/test/convert5.go b/test/convert5.go index 57585ef76e1673..27aa7867f42824 100644 --- a/test/convert5.go +++ b/test/convert5.go @@ -62,6 +62,8 @@ func main() { p64_plus4k_plus1 := id(float64(p64 + 4096 + 1)) // want this to be precise and fit in 53 bits mantissa n32_minus4k := id(float32(n32 - 4096)) n64_minus4k := id(float64(n64 - 4096)) + n32_plus4k := id(float32(n32 + 4096)) + n64_plus4k := id(float64(n64 + 4096)) inf_32 := id(float32(one / 0)) inf_64 := id(float64(one / 0)) ninf_32 := id(float32(-one / 0)) @@ -79,6 +81,7 @@ func main() { {"p64_plus4k_plus1", p64_plus4k_plus1, p32}, {"n32_minus4k", n32_minus4k, n32}, {"n64_minus4k", n64_minus4k, n32}, + {"n32_plus4k", n32_plus4k, n32 + 4096}, {"inf_32", inf_32, p32}, {"inf_64", inf_64, p32}, {"ninf_32", ninf_32, n32}, @@ -108,6 +111,8 @@ func main() { {"p64_plus4k_plus1", p64_plus4k_plus1, p64}, {"n32_minus4k", n32_minus4k, n32 - 4096}, {"n64_minus4k", n64_minus4k, n64}, + {"n32_plus4k", n32_plus4k, n32 + 4096}, + {"n64_plus4k", n64_plus4k, n64 + 4096}, {"inf_32", inf_32, p64}, {"inf_64", inf_64, p64}, {"ninf_32", ninf_32, n64}, From 8d810286b3121b601480426159c04d178fa29166 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 1 Oct 2025 11:29:13 -0400 Subject: [PATCH 109/152] cmd/compile: make wasm match other platforms for FP->int32/64 conversions this change is for overflows, so that all the platforms agree. Change-Id: I9f459353615bf24ef8a5de641063d9ce34986241 Reviewed-on: https://go-review.googlesource.com/c/go/+/708358 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/_gen/Wasm.rules | 9 ++-- src/cmd/compile/internal/ssa/_gen/WasmOps.go | 17 ++++-- src/cmd/compile/internal/ssa/opGen.go | 56 ++++++++++++++++++++ src/cmd/compile/internal/ssa/rewriteWasm.go | 8 +-- src/cmd/compile/internal/wasm/ssa.go | 11 ++++ test/convert5.go | 5 -- 6 files changed, 89 insertions(+), 17 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules index f632a01109f764..bb123bc3cc48e7 100644 --- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules @@ -76,13 +76,14 @@ (Cvt32Uto(64|32)F x) => (F(64|32)ConvertI64U (ZeroExt32to64 x)) (Cvt64Uto(64|32)F ...) => (F(64|32)ConvertI64U ...) -(Cvt32Fto32 ...) => (I64TruncSatF32S ...) +(Cvt32Fto32 ...) => (I32TruncSatF32S ...) (Cvt32Fto64 ...) => (I64TruncSatF32S ...) -(Cvt64Fto32 ...) => (I64TruncSatF64S ...) +(Cvt64Fto32 ...) => (I32TruncSatF64S ...) (Cvt64Fto64 ...) => (I64TruncSatF64S ...) -(Cvt32Fto32U ...) => (I64TruncSatF32U ...) + +(Cvt32Fto32U ...) => (I32TruncSatF32U ...) (Cvt32Fto64U ...) => (I64TruncSatF32U ...) -(Cvt64Fto32U ...) => (I64TruncSatF64U ...) +(Cvt64Fto32U ...) => (I32TruncSatF64U ...) (Cvt64Fto64U ...) => (I64TruncSatF64U ...) (Cvt32Fto64F ...) => (F64PromoteF32 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/WasmOps.go b/src/cmd/compile/internal/ssa/_gen/WasmOps.go index 45bbed5f520201..b63f28a2193a2f 100644 --- a/src/cmd/compile/internal/ssa/_gen/WasmOps.go +++ b/src/cmd/compile/internal/ssa/_gen/WasmOps.go @@ -222,10 +222,19 @@ func init() { {name: "F64Mul", asm: "F64Mul", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 * arg1 {name: "F64Div", asm: "F64Div", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 / arg1 - {name: "I64TruncSatF64S", asm: "I64TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) - {name: "I64TruncSatF64U", asm: "I64TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating) - {name: "I64TruncSatF32S", asm: "I64TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) - {name: "I64TruncSatF32U", asm: "I64TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating) + {name: "I64TruncSatF64S", asm: "I64TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) + {name: "I64TruncSatF64U", asm: "I64TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Uint64"}, // truncates the float arg0 to an unsigned integer (saturating) + {name: "I64TruncSatF32S", asm: "I64TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) + {name: "I64TruncSatF32U", asm: "I64TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Uint64"}, // truncates the float arg0 to an unsigned integer (saturating) + + // It appears to be wasm convention that everything lands in a 64-bit register; + // the WASM instructions for these operations produce 32-bit width results, but + // wasm/ssa.go widens them appropriately to 64-bit results. + {name: "I32TruncSatF64S", asm: "I32TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) + {name: "I32TruncSatF64U", asm: "I32TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Uint64"}, // truncates the float arg0 to an unsigned integer (saturating) + {name: "I32TruncSatF32S", asm: "I32TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) + {name: "I32TruncSatF32U", asm: "I32TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Uint64"}, // truncates the float arg0 to an unsigned integer (saturating) + {name: "F32ConvertI64S", asm: "F32ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the signed integer arg0 to a float {name: "F32ConvertI64U", asm: "F32ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the unsigned integer arg0 to a float {name: "F64ConvertI64S", asm: "F64ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the signed integer arg0 to a float diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 2a60398bc68711..32ba3a89856ed4 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2980,6 +2980,10 @@ const ( OpWasmI64TruncSatF64U OpWasmI64TruncSatF32S OpWasmI64TruncSatF32U + OpWasmI32TruncSatF64S + OpWasmI32TruncSatF64U + OpWasmI32TruncSatF32S + OpWasmI32TruncSatF32U OpWasmF32ConvertI64S OpWasmF32ConvertI64U OpWasmF64ConvertI64S @@ -40296,6 +40300,58 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "I32TruncSatF64S", + argLen: 1, + asm: wasm.AI32TruncSatF64S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I32TruncSatF64U", + argLen: 1, + asm: wasm.AI32TruncSatF64U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I32TruncSatF32S", + argLen: 1, + asm: wasm.AI32TruncSatF32S, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "I32TruncSatF32U", + argLen: 1, + asm: wasm.AI32TruncSatF32U, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, { name: "F32ConvertI64S", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index a164a6eee555b9..dd73d3a5e705ed 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -120,10 +120,10 @@ func rewriteValueWasm(v *Value) bool { v.Op = OpWasmI64Ctz return true case OpCvt32Fto32: - v.Op = OpWasmI64TruncSatF32S + v.Op = OpWasmI32TruncSatF32S return true case OpCvt32Fto32U: - v.Op = OpWasmI64TruncSatF32U + v.Op = OpWasmI32TruncSatF32U return true case OpCvt32Fto64: v.Op = OpWasmI64TruncSatF32S @@ -143,13 +143,13 @@ func rewriteValueWasm(v *Value) bool { case OpCvt32to64F: return rewriteValueWasm_OpCvt32to64F(v) case OpCvt64Fto32: - v.Op = OpWasmI64TruncSatF64S + v.Op = OpWasmI32TruncSatF64S return true case OpCvt64Fto32F: v.Op = OpWasmF32DemoteF64 return true case OpCvt64Fto32U: - v.Op = OpWasmI64TruncSatF64U + v.Op = OpWasmI32TruncSatF64U return true case OpCvt64Fto64: v.Op = OpWasmI64TruncSatF64S diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index 1e3b318e8c9fe0..8ebc90288572a3 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -430,6 +430,17 @@ func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) { getValue64(s, v.Args[0]) s.Prog(v.Op.Asm()) + // 32-bit integer conversion results + case ssa.OpWasmI32TruncSatF32S, ssa.OpWasmI32TruncSatF64S: + getValue64(s, v.Args[0]) + s.Prog(v.Op.Asm()) + s.Prog(wasm.AI64ExtendI32S) + + case ssa.OpWasmI32TruncSatF32U, ssa.OpWasmI32TruncSatF64U: + getValue64(s, v.Args[0]) + s.Prog(v.Op.Asm()) + s.Prog(wasm.AI64ExtendI32U) + case ssa.OpWasmF32DemoteF64: getValue64(s, v.Args[0]) s.Prog(v.Op.Asm()) diff --git a/test/convert5.go b/test/convert5.go index 27aa7867f42824..4688fae85587d4 100644 --- a/test/convert5.go +++ b/test/convert5.go @@ -4,11 +4,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !wasm - -// TODO fix this to work for wasm -// Doing more than this, however, expands the change. - package main import ( From 9db7e30bb42eed9912f5e7e9e3959f3b38879d5b Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Wed, 8 Oct 2025 17:13:12 -0700 Subject: [PATCH 110/152] net/url: allow IP-literals with IPv4-mapped IPv6 addresses The security fix we applied in CL709857 was overly broad. It applied rules from RFC 2732, which disallowed IPv4-mapped IPv6 addresses, but these were later allowed in RFC 3986, which is the canonical URI syntax RFC. Revert the portion of CL709857 which restricted IPv4-mapped addresses, and update the related tests. Fixes #75815 Change-Id: I3192f2275ad5c386f5c15006a6716bdb5282919d Reviewed-on: https://go-review.googlesource.com/c/go/+/710375 LUCI-TryBot-Result: Go LUCI Reviewed-by: Ethan Lee Auto-Submit: Roland Shoemaker --- src/net/url/url.go | 6 +++--- src/net/url/url_test.go | 23 ++++++++++++----------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/src/net/url/url.go b/src/net/url/url.go index 292bc6bb12187a..6afa30f162bd25 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -689,13 +689,13 @@ func parseHost(host string) (string, error) { // Per RFC 3986, only a host identified by a valid // IPv6 address can be enclosed by square brackets. - // This excludes any IPv4 or IPv4-mapped addresses. + // This excludes any IPv4, but notably not IPv4-mapped addresses. addr, err := netip.ParseAddr(unescapedHostname) if err != nil { return "", fmt.Errorf("invalid host: %w", err) } - if addr.Is4() || addr.Is4In6() { - return "", errors.New("invalid IPv6 host") + if addr.Is4() { + return "", errors.New("invalid IP-literal") } return "[" + unescapedHostname + "]" + unescapedColonPort, nil } else if i := strings.LastIndex(host, ":"); i != -1 { diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index 32065583f27dd7..6084facacc0519 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -726,7 +726,7 @@ var parseRequestURLTests = []struct { {"https://[2001:db8::1]/path", true}, // compressed IPv6 address with path {"https://[fe80::1%25eth0]/path?query=1", true}, // link-local with zone, path, and query - {"https://[::ffff:192.0.2.1]", false}, + {"https://[::ffff:192.0.2.1]", true}, {"https://[:1] ", false}, {"https://[1:2:3:4:5:6:7:8:9]", false}, {"https://[1::1::1]", false}, @@ -1672,16 +1672,17 @@ func TestParseErrors(t *testing.T) { {"cache_object:foo/bar", true}, {"cache_object/:foo/bar", false}, - {"http://[192.168.0.1]/", true}, // IPv4 in brackets - {"http://[192.168.0.1]:8080/", true}, // IPv4 in brackets with port - {"http://[::ffff:192.168.0.1]/", true}, // IPv4-mapped IPv6 in brackets - {"http://[::ffff:192.168.0.1]:8080/", true}, // IPv4-mapped IPv6 in brackets with port - {"http://[::ffff:c0a8:1]/", true}, // IPv4-mapped IPv6 in brackets (hex) - {"http://[not-an-ip]/", true}, // invalid IP string in brackets - {"http://[fe80::1%foo]/", true}, // invalid zone format in brackets - {"http://[fe80::1", true}, // missing closing bracket - {"http://fe80::1]/", true}, // missing opening bracket - {"http://[test.com]/", true}, // domain name in brackets + {"http://[192.168.0.1]/", true}, // IPv4 in brackets + {"http://[192.168.0.1]:8080/", true}, // IPv4 in brackets with port + {"http://[::ffff:192.168.0.1]/", false}, // IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1000]/", true}, // Out of range IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1]:8080/", false}, // IPv4-mapped IPv6 in brackets with port + {"http://[::ffff:c0a8:1]/", false}, // IPv4-mapped IPv6 in brackets (hex) + {"http://[not-an-ip]/", true}, // invalid IP string in brackets + {"http://[fe80::1%foo]/", true}, // invalid zone format in brackets + {"http://[fe80::1", true}, // missing closing bracket + {"http://fe80::1]/", true}, // missing opening bracket + {"http://[test.com]/", true}, // domain name in brackets } for _, tt := range tests { u, err := Parse(tt.in) From 80f3bb5516bb12233a15167bc8ba9d39cca5b535 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Thu, 9 Oct 2025 21:13:59 +0000 Subject: [PATCH 111/152] reflect: remove timeout in TestChanOfGC This test has an arbitrary 5 second timeout, and this seems to fire on Darwin with mayMoreStackMove enabled (which is slow). Just rely on the regular test timeout instead of this arbitrary shorter timeout to eliminate the possibility that the test is just too slow. On my Linux VM, I can get this test to take up to 2 seconds with mayMoreStackMove set on all the same packages dist does, so this failure mode is actually plausible. Fixes #75742. Change-Id: Iebcc859cab26e9205b57b869690162a9a424dfce Reviewed-on: https://go-review.googlesource.com/c/go/+/710618 Reviewed-by: Cherry Mui LUCI-TryBot-Result: Go LUCI Auto-Submit: Michael Knyszek --- src/reflect/all_test.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 2a8c5206624146..c6610b1968a834 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -6198,19 +6198,6 @@ func TestChanOfDir(t *testing.T) { } func TestChanOfGC(t *testing.T) { - done := make(chan bool, 1) - go func() { - select { - case <-done: - case <-time.After(5 * time.Second): - panic("deadlock in TestChanOfGC") - } - }() - - defer func() { - done <- true - }() - type T *uintptr tt := TypeOf(T(nil)) ct := ChanOf(BothDir, tt) From 19a30ea3f250f8d6258f3e08aa3561f1193fdd38 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Mon, 29 Sep 2025 17:26:49 -0400 Subject: [PATCH 112/152] cmd/compile: call generated size-specialized malloc functions directly This change creates calls to size-specialized malloc functions instead of calls to newObject when we know the size of the allocation at compilation time. Most of it is a matter of calling the newObject function (which will create calls to the size-specialized functions) rather then the newObjectNonSpecialized function (which won't). In the newHeapaddr, small, non-pointer case, we'll create a non specialized newObject and transform that into the appropriate size-specialized function when we produce the mallocgc in flushPendingHeapAllocations. We have to update some of the rewrites in generic.rules to also apply to the size-specialized functions when they apply to newObject. The messiest thing is we have to adjust the offset we use to save the memory profiler stack, because the depth of the call to profilealloc is two frames fewer in the size-specialized malloc functions compared to when newObject calls mallocgc. A bunch of tests have been adjusted to account for that. Change-Id: I6a6a6964c9037fb6719e392c4a498ed700b617d7 Reviewed-on: https://go-review.googlesource.com/c/go/+/707856 Reviewed-by: Michael Knyszek Reviewed-by: Michael Matloob LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/ir/symtab.go | 85 ++++++++++--------- .../compile/internal/ssa/_gen/generic.rules | 24 +++--- src/cmd/compile/internal/ssa/rewrite.go | 20 +++++ .../compile/internal/ssa/rewritegeneric.go | 42 +++++---- src/cmd/compile/internal/ssa/writebarrier.go | 13 ++- src/cmd/compile/internal/ssagen/ssa.go | 81 ++++++++++++++++-- src/cmd/dist/buildtool.go | 1 + src/runtime/mprof.go | 4 +- src/runtime/pprof/mprof_test.go | 8 +- src/runtime/pprof/pprof_test.go | 4 +- test/codegen/strings.go | 2 +- test/fixedbugs/issue15747.go | 4 +- test/heapsampling.go | 24 +++--- test/live.go | 10 +-- test/live_regabi.go | 10 +-- test/uintptrescapes2.go | 12 +-- 16 files changed, 228 insertions(+), 116 deletions(-) diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index ee0f52fbf3f3b8..f8eb4578809312 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -13,47 +13,50 @@ import ( var Syms symsStruct type symsStruct struct { - AssertE2I *obj.LSym - AssertE2I2 *obj.LSym - Asanread *obj.LSym - Asanwrite *obj.LSym - CgoCheckMemmove *obj.LSym - CgoCheckPtrWrite *obj.LSym - CheckPtrAlignment *obj.LSym - Deferproc *obj.LSym - Deferprocat *obj.LSym - DeferprocStack *obj.LSym - Deferreturn *obj.LSym - Duffcopy *obj.LSym - Duffzero *obj.LSym - GCWriteBarrier [8]*obj.LSym - Goschedguarded *obj.LSym - Growslice *obj.LSym - InterfaceSwitch *obj.LSym - MallocGC *obj.LSym - Memmove *obj.LSym - Msanread *obj.LSym - Msanwrite *obj.LSym - Msanmove *obj.LSym - Newobject *obj.LSym - Newproc *obj.LSym - PanicBounds *obj.LSym - PanicExtend *obj.LSym - Panicdivide *obj.LSym - Panicshift *obj.LSym - PanicdottypeE *obj.LSym - PanicdottypeI *obj.LSym - Panicnildottype *obj.LSym - Panicoverflow *obj.LSym - Racefuncenter *obj.LSym - Racefuncexit *obj.LSym - Raceread *obj.LSym - Racereadrange *obj.LSym - Racewrite *obj.LSym - Racewriterange *obj.LSym - TypeAssert *obj.LSym - WBZero *obj.LSym - WBMove *obj.LSym + AssertE2I *obj.LSym + AssertE2I2 *obj.LSym + Asanread *obj.LSym + Asanwrite *obj.LSym + CgoCheckMemmove *obj.LSym + CgoCheckPtrWrite *obj.LSym + CheckPtrAlignment *obj.LSym + Deferproc *obj.LSym + Deferprocat *obj.LSym + DeferprocStack *obj.LSym + Deferreturn *obj.LSym + Duffcopy *obj.LSym + Duffzero *obj.LSym + GCWriteBarrier [8]*obj.LSym + Goschedguarded *obj.LSym + Growslice *obj.LSym + InterfaceSwitch *obj.LSym + MallocGC *obj.LSym + MallocGCSmallNoScan [27]*obj.LSym + MallocGCSmallScanNoHeader [27]*obj.LSym + MallocGCTiny [16]*obj.LSym + Memmove *obj.LSym + Msanread *obj.LSym + Msanwrite *obj.LSym + Msanmove *obj.LSym + Newobject *obj.LSym + Newproc *obj.LSym + PanicBounds *obj.LSym + PanicExtend *obj.LSym + Panicdivide *obj.LSym + Panicshift *obj.LSym + PanicdottypeE *obj.LSym + PanicdottypeI *obj.LSym + Panicnildottype *obj.LSym + Panicoverflow *obj.LSym + Racefuncenter *obj.LSym + Racefuncexit *obj.LSym + Raceread *obj.LSym + Racereadrange *obj.LSym + Racewrite *obj.LSym + Racewriterange *obj.LSym + TypeAssert *obj.LSym + WBZero *obj.LSym + WBMove *obj.LSym // Wasm SigPanic *obj.LSym Staticuint64s *obj.LSym diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 048d9958dc7440..23ce21a8b2cc28 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -2065,28 +2065,32 @@ // for rewriting results of some late-expanded rewrites (below) (SelectN [n] m:(MakeResult ___)) => m.Args[n] +// TODO(matloob): Try out having non-zeroing mallocs for prointerless +// memory, and leaving the zeroing here. Then the compiler can remove +// the zeroing if the user has explicit writes to the whole object. + // for late-expanded calls, recognize newobject and remove zeroing and nilchecks -(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) - && isSameCall(call.Aux, "runtime.newobject") +(Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call)) + && isMalloc(call.Aux) => mem -(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) +(Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call)) && isConstZero(x) - && isSameCall(call.Aux, "runtime.newobject") + && isMalloc(call.Aux) => mem -(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) +(Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call)) && isConstZero(x) - && isSameCall(call.Aux, "runtime.newobject") + && isMalloc(call.Aux) => mem -(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) - && isSameCall(call.Aux, "runtime.newobject") +(NilCheck ptr:(SelectN [0] call:(StaticLECall ___)) _) + && isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr -(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) - && isSameCall(call.Aux, "runtime.newobject") +(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall ___))) _) + && isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 47f225c7aeb7c0..f02019df384f5f 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -456,6 +456,26 @@ func isSameCall(aux Aux, name string) bool { return fn != nil && fn.String() == name } +func isMalloc(aux Aux) bool { + return isNewObject(aux) || isSpecializedMalloc(aux) +} + +func isNewObject(aux Aux) bool { + fn := aux.(*AuxCall).Fn + return fn != nil && fn.String() == "runtime.newobject" +} + +func isSpecializedMalloc(aux Aux) bool { + fn := aux.(*AuxCall).Fn + if fn == nil { + return false + } + name := fn.String() + return strings.HasPrefix(name, "runtime.mallocgcSmallNoScanSC") || + strings.HasPrefix(name, "runtime.mallocgcSmallScanNoHeaderSC") || + strings.HasPrefix(name, "runtime.mallocTiny") +} + // canLoadUnaligned reports if the architecture supports unaligned load operations. func canLoadUnaligned(c *Config) bool { return c.ctxt.Arch.Alignment == 1 diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 37ba324d86cec3..c36ecc1cc60fcc 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -21318,8 +21318,8 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { v.copyOf(ptr) return true } - // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) - // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") + // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall ___)) _) + // cond: isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: ptr for { ptr := v_0 @@ -21327,14 +21327,17 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { break } call := ptr.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + if call.Op != OpStaticLECall { + break + } + if !(isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.copyOf(ptr) return true } - // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) - // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") + // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall ___))) _) + // cond: isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: ptr for { ptr := v_0 @@ -21346,7 +21349,10 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { break } call := ptr_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + if call.Op != OpStaticLECall { + break + } + if !(isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.copyOf(ptr) @@ -32463,27 +32469,27 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v.AddArg3(dst, e, mem) return true } - // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) - // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") + // match: (Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call)) + // cond: isConstZero(x) && isMalloc(call.Aux) // result: mem for { if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { break } call := v_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } x := v_1 mem := v_2 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isMalloc(call.Aux)) { break } v.copyOf(mem) return true } - // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) - // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") + // match: (Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call)) + // cond: isConstZero(x) && isMalloc(call.Aux) // result: mem for { if v_0.Op != OpOffPtr { @@ -32494,12 +32500,12 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } call := v_0_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } x := v_1 mem := v_2 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isMalloc(call.Aux)) { break } v.copyOf(mem) @@ -36842,19 +36848,19 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) - // cond: isSameCall(call.Aux, "runtime.newobject") + // match: (Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call)) + // cond: isMalloc(call.Aux) // result: mem for { if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { break } call := v_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } mem := v_1 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isMalloc(call.Aux)) { break } v.copyOf(mem) diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index ec6901f13ec1d2..ec5a0fed29d791 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -798,7 +798,16 @@ func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) { if call.Op != OpStaticCall { return nil, false } - if !isSameCall(call.Aux, "runtime.newobject") { + // Check for new object, or for new object calls that have been transformed into size-specialized malloc calls. + // Calls that have return type unsafe pointer may have originally been produced by flushPendingHeapAllocations + // in the ssa generator, so may have not originally been newObject calls. + var numParameters int64 + switch { + case isNewObject(call.Aux): + numParameters = 1 + case isSpecializedMalloc(call.Aux) && !v.Type.IsUnsafePtr(): + numParameters = 3 + default: return nil, false } if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 { @@ -813,7 +822,7 @@ func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) { if v.Args[0].Args[0].Op != OpSP { return nil, false } - if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value + if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+numParameters*c.RegSize { // offset of return value return nil, false } return mem, true diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 2705195bd55791..57cd9084821237 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -12,6 +12,7 @@ import ( "go/constant" "html" "internal/buildcfg" + "internal/runtime/gc" "os" "path/filepath" "slices" @@ -124,6 +125,15 @@ func InitConfig() { ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded") ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice") ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch") + for i := 1; i < len(ir.Syms.MallocGCSmallNoScan); i++ { + ir.Syms.MallocGCSmallNoScan[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallNoScanSC%d", i)) + } + for i := 1; i < len(ir.Syms.MallocGCSmallScanNoHeader); i++ { + ir.Syms.MallocGCSmallScanNoHeader[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallScanNoHeaderSC%d", i)) + } + for i := 1; i < len(ir.Syms.MallocGCTiny); i++ { + ir.Syms.MallocGCTiny[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocTiny%d", i)) + } ir.Syms.MallocGC = typecheck.LookupRuntimeFunc("mallocgc") ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove") ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread") @@ -690,7 +700,7 @@ func allocAlign(t *types.Type) int64 { func (s *state) newHeapaddr(n *ir.Name) { size := allocSize(n.Type()) if n.Type().HasPointers() || size >= maxAggregatedHeapAllocation || size == 0 { - s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil)) + s.setHeapaddr(n.Pos(), n, s.newObject(n.Type())) return } @@ -709,7 +719,7 @@ func (s *state) newHeapaddr(n *ir.Name) { // Make an allocation, but the type being allocated is just // the first pending object. We will come back and update it // later if needed. - allocCall = s.newObject(n.Type(), nil) + allocCall = s.newObjectNonSpecialized(n.Type(), nil) } else { allocCall = s.pendingHeapAllocations[0].Args[0] } @@ -762,7 +772,11 @@ func (s *state) flushPendingHeapAllocations() { s.constBool(true), // needZero TODO: false is ok? call.Args[1], // memory } - call.Aux = ssa.StaticAuxCall(ir.Syms.MallocGC, s.f.ABIDefault.ABIAnalyzeTypes( + mallocSym := ir.Syms.MallocGC + if specialMallocSym := s.specializedMallocSym(size, false); specialMallocSym != nil { + mallocSym = specialMallocSym + } + call.Aux = ssa.StaticAuxCall(mallocSym, s.f.ABIDefault.ABIAnalyzeTypes( []*types.Type{args[0].Type, args[1].Type, args[2].Type}, []*types.Type{types.Types[types.TUNSAFEPTR]}, )) @@ -774,6 +788,43 @@ func (s *state) flushPendingHeapAllocations() { ptr.Type = types.Types[types.TUNSAFEPTR] } +func (s *state) specializedMallocSym(size int64, hasPointers bool) *obj.LSym { + if !s.sizeSpecializedMallocEnabled() { + return nil + } + ptrSize := s.config.PtrSize + ptrBits := ptrSize * 8 + minSizeForMallocHeader := ptrSize * ptrBits + heapBitsInSpan := size <= minSizeForMallocHeader + if !heapBitsInSpan { + return nil + } + divRoundUp := func(n, a uintptr) uintptr { return (n + a - 1) / a } + sizeClass := gc.SizeToSizeClass8[divRoundUp(uintptr(size), gc.SmallSizeDiv)] + if hasPointers { + return ir.Syms.MallocGCSmallScanNoHeader[sizeClass] + } + if size < gc.TinySize { + return ir.Syms.MallocGCTiny[size] + } + return ir.Syms.MallocGCSmallNoScan[sizeClass] +} + +func (s *state) sizeSpecializedMallocEnabled() bool { + if base.Flag.CompilingRuntime { + // The compiler forces the values of the asan, msan, and race flags to false if + // we're compiling the runtime, so we lose the information about whether we're + // building in asan, msan, or race mode. Because the specialized functions don't + // work in that mode, just turn if off in that case. + // TODO(matloob): Save the information about whether the flags were passed in + // originally so we can turn off size specialized malloc in that case instead + // using Instrumenting below. Then we can remove this condition. + return false + } + + return buildcfg.Experiment.SizeSpecializedMalloc && !base.Flag.Cfg.Instrumenting +} + // setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil) // and then sets it as n's heap address. func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { @@ -796,7 +847,24 @@ func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { } // newObject returns an SSA value denoting new(typ). -func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value { +func (s *state) newObject(typ *types.Type) *ssa.Value { + if typ.Size() == 0 { + return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) + } + rtype := s.reflectType(typ) + if specialMallocSym := s.specializedMallocSym(typ.Size(), typ.HasPointers()); specialMallocSym != nil { + return s.rtcall(specialMallocSym, true, []*types.Type{types.NewPtr(typ)}, + s.constInt(types.Types[types.TUINTPTR], typ.Size()), + rtype, + s.constBool(true), + )[0] + } + return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0] +} + +// newObjectNonSpecialized returns an SSA value denoting new(typ). It does +// not produce size-specialized malloc functions. +func (s *state) newObjectNonSpecialized(typ *types.Type, rtype *ssa.Value) *ssa.Value { if typ.Size() == 0 { return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) } @@ -3594,11 +3662,10 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value { case ir.ONEW: n := n.(*ir.UnaryExpr) - var rtype *ssa.Value if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE { - rtype = s.expr(x.RType) + return s.newObjectNonSpecialized(n.Type().Elem(), s.expr(x.RType)) } - return s.newObject(n.Type().Elem(), rtype) + return s.newObject(n.Type().Elem()) case ir.OUNSAFEADD: n := n.(*ir.BinaryExpr) diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 080de832b2ad96..62cd9376927e0d 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -90,6 +90,7 @@ var bootstrapDirs = []string{ "internal/platform", "internal/profile", "internal/race", + "internal/runtime/gc", "internal/saferio", "internal/syscall/unix", "internal/types/errors", diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 0957e67b50fa2c..743fa5a3fe848b 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -49,7 +49,7 @@ const ( // desired maximum number of frames after expansion. // This should be at least as large as the largest skip value // used for profiling; otherwise stacks may be truncated inconsistently - maxSkip = 6 + maxSkip = 8 // maxProfStackDepth is the highest valid value for debug.profstackdepth. // It's used for the bucket.stk func. @@ -444,7 +444,7 @@ func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) { } // Only use the part of mp.profStack we need and ignore the extra space // reserved for delayed inline expansion with frame pointer unwinding. - nstk := callers(5, mp.profStack[:debug.profstackdepth]) + nstk := callers(3, mp.profStack[:debug.profstackdepth+2]) index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future)) b := stkbucket(memProfile, size, mp.profStack[:nstk], true) diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index 7c4a37e3c996fb..456ba9044498af 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -97,25 +97,25 @@ func TestMemoryProfiler(t *testing.T) { legacy string }{{ stk: []string{"runtime/pprof.allocatePersistent1K", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+( 0x[0-9,a-f]+ 0x[0-9,a-f]+)? # 0x[0-9,a-f]+ runtime/pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:48 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:87 `, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient1M", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:25 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:84 `, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient2M", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:31 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:85 `, memoryProfilerRun, (2<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient2MInline", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:35 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:86 `, memoryProfilerRun, (2<<20)*memoryProfilerRun), diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index a538627099034e..6e6f4313a83dfa 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -2585,7 +2585,7 @@ func TestProfilerStackDepth(t *testing.T) { t.Errorf("want stack depth = %d, got %d", depth, len(stk)) } - if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn { + if rootFn, wantFn := stk[depth-1], "runtime/pprof.allocDeep"; rootFn != wantFn { t.Errorf("want stack stack root %s, got %v", wantFn, rootFn) } } @@ -2660,7 +2660,7 @@ func goroutineDeep(t *testing.T, n int) { // guaranteed to have exactly the desired depth with produceProfileEvents as // their root frame which is expected by TestProfilerStackDepth. func produceProfileEvents(t *testing.T, depth int) { - allocDeep(depth - 1) // -1 for produceProfileEvents, ** + allocDeep(depth + 1) // +1 for produceProfileEvents, ** blockChanDeep(t, depth-2) // -2 for produceProfileEvents, **, chanrecv1 blockMutexDeep(t, depth-2) // -2 for produceProfileEvents, **, Unlock memSink = nil diff --git a/test/codegen/strings.go b/test/codegen/strings.go index 498c3d398f8bc6..0b3ca7016ffc58 100644 --- a/test/codegen/strings.go +++ b/test/codegen/strings.go @@ -23,7 +23,7 @@ func CountBytes(s []byte) int { func ToByteSlice() []byte { // Issue #24698 // amd64:`LEAQ\ttype:\[3\]uint8` - // amd64:`CALL\truntime\.newobject` + // amd64:`CALL\truntime\.mallocTiny3` // amd64:-`.*runtime.stringtoslicebyte` return []byte("foo") } diff --git a/test/fixedbugs/issue15747.go b/test/fixedbugs/issue15747.go index 92e762c4e923ac..743adb6a8ffb1f 100644 --- a/test/fixedbugs/issue15747.go +++ b/test/fixedbugs/issue15747.go @@ -19,7 +19,7 @@ type T struct{ M string } var b bool -func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to newobject: xx$" "live at entry to f1: xx$" +func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: xx$" "live at entry to f1: xx$" // xx was copied from the stack to the heap on the previous line: // xx was live for the first two prints but then it switched to &xx // being live. We should not see plain xx again. @@ -36,7 +36,7 @@ func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to newobject: xx$" //go:noinline func f2(d []byte, n int) (odata, res []byte, e interface{}) { // ERROR "live at entry to f2: d$" if n > len(d) { - return d, nil, &T{M: "hello"} // ERROR "live at call to newobject: d" + return d, nil, &T{M: "hello"} // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: d" } res = d[:n] odata = d[n:] diff --git a/test/heapsampling.go b/test/heapsampling.go index 741db74f894d45..db93b253c90bab 100644 --- a/test/heapsampling.go +++ b/test/heapsampling.go @@ -48,22 +48,23 @@ func testInterleavedAllocations() error { const iters = 50000 // Sizes of the allocations performed by each experiment. frames := []string{"main.allocInterleaved1", "main.allocInterleaved2", "main.allocInterleaved3"} + leafFrame := "main.allocInterleaved" // Pass if at least one of three experiments has no errors. Use a separate // function for each experiment to identify each experiment in the profile. allocInterleaved1(iters) - if checkAllocations(getMemProfileRecords(), frames[0:1], iters, allocInterleavedSizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:1], iters, allocInterleavedSizes) == nil { // Passed on first try, report no error. return nil } allocInterleaved2(iters) - if checkAllocations(getMemProfileRecords(), frames[0:2], iters, allocInterleavedSizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:2], iters, allocInterleavedSizes) == nil { // Passed on second try, report no error. return nil } allocInterleaved3(iters) // If it fails a third time, we may be onto something. - return checkAllocations(getMemProfileRecords(), frames[0:3], iters, allocInterleavedSizes) + return checkAllocations(getMemProfileRecords(), leafFrame, frames[0:3], iters, allocInterleavedSizes) } var allocInterleavedSizes = []int64{17 * 1024, 1024, 18 * 1024, 512, 16 * 1024, 256} @@ -108,22 +109,23 @@ func testSmallAllocations() error { // Sizes of the allocations performed by each experiment. sizes := []int64{1024, 512, 256} frames := []string{"main.allocSmall1", "main.allocSmall2", "main.allocSmall3"} + leafFrame := "main.allocSmall" // Pass if at least one of three experiments has no errors. Use a separate // function for each experiment to identify each experiment in the profile. allocSmall1(iters) - if checkAllocations(getMemProfileRecords(), frames[0:1], iters, sizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:1], iters, sizes) == nil { // Passed on first try, report no error. return nil } allocSmall2(iters) - if checkAllocations(getMemProfileRecords(), frames[0:2], iters, sizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:2], iters, sizes) == nil { // Passed on second try, report no error. return nil } allocSmall3(iters) // If it fails a third time, we may be onto something. - return checkAllocations(getMemProfileRecords(), frames[0:3], iters, sizes) + return checkAllocations(getMemProfileRecords(), leafFrame, frames[0:3], iters, sizes) } // allocSmall performs only small allocations for sanity testing. @@ -161,21 +163,21 @@ func allocSmall3(n int) { // Look only at samples that include the named frames, and group the // allocations by their line number. All these allocations are done from // the same leaf function, so their line numbers are the same. -func checkAllocations(records []runtime.MemProfileRecord, frames []string, count int64, size []int64) error { +func checkAllocations(records []runtime.MemProfileRecord, leafFrame string, frames []string, count int64, size []int64) error { objectsPerLine := map[int][]int64{} bytesPerLine := map[int][]int64{} totalCount := []int64{} // Compute the line number of the first allocation. All the // allocations are from the same leaf, so pick the first one. var firstLine int - for ln := range allocObjects(records, frames[0]) { + for ln := range allocObjects(records, leafFrame, frames[0]) { if firstLine == 0 || firstLine > ln { firstLine = ln } } for _, frame := range frames { var objectCount int64 - a := allocObjects(records, frame) + a := allocObjects(records, leafFrame, frame) for s := range size { // Allocations of size size[s] should be on line firstLine + s. ln := firstLine + s @@ -258,7 +260,7 @@ type allocStat struct { // allocObjects examines the profile records for samples including the // named function and returns the allocation stats aggregated by // source line number of the allocation (at the leaf frame). -func allocObjects(records []runtime.MemProfileRecord, function string) map[int]allocStat { +func allocObjects(records []runtime.MemProfileRecord, leafFrame, function string) map[int]allocStat { a := make(map[int]allocStat) for _, r := range records { var pcs []uintptr @@ -273,7 +275,7 @@ func allocObjects(records []runtime.MemProfileRecord, function string) map[int]a for { frame, more := frames.Next() name := frame.Function - if line == 0 { + if name == leafFrame && line == 0 { line = frame.Line } if name == function { diff --git a/test/live.go b/test/live.go index 6e1a60557c8ad4..56b78ccf8b4af2 100644 --- a/test/live.go +++ b/test/live.go @@ -467,9 +467,9 @@ func f27defer(b bool) { func f27go(b bool) { x := 0 if b { - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go } - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go printnl() } @@ -538,7 +538,7 @@ func f31(b1, b2, b3 bool) { g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } if b2 { - h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" + h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" } if b3 { panic(g18()) @@ -665,14 +665,14 @@ func f39a() (x []int) { func f39b() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return x } func f39c() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return } diff --git a/test/live_regabi.go b/test/live_regabi.go index d2565ba9130cd0..838cbdefad7c5b 100644 --- a/test/live_regabi.go +++ b/test/live_regabi.go @@ -465,9 +465,9 @@ func f27defer(b bool) { func f27go(b bool) { x := 0 if b { - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go } - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go printnl() } @@ -536,7 +536,7 @@ func f31(b1, b2, b3 bool) { g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } if b2 { - h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" + h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" } if b3 { panic(g18()) @@ -663,14 +663,14 @@ func f39a() (x []int) { func f39b() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return x } func f39c() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return } diff --git a/test/uintptrescapes2.go b/test/uintptrescapes2.go index 656286c0ff2bd7..e111d47fab95c4 100644 --- a/test/uintptrescapes2.go +++ b/test/uintptrescapes2.go @@ -33,8 +33,8 @@ func (T) M1(a uintptr) {} // ERROR "escaping uintptr" func (T) M2(a ...uintptr) {} // ERROR "escaping ...uintptr" func TestF1() { - var t int // ERROR "moved to heap" - F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + var t int // ERROR "moved to heap" + F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestF3() { @@ -49,17 +49,17 @@ func TestM1() { } func TestF2() { - var v int // ERROR "moved to heap" - F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + var v int // ERROR "moved to heap" + F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestF4() { var v2 int // ERROR "moved to heap" - F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestM2() { var t T var v int // ERROR "moved to heap" - t.M2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to T.M2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + t.M2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to T.M2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } From 954fdcc51a678a2b84edb29ef4c3743b318a6fd8 Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Thu, 9 Oct 2025 15:52:02 +0800 Subject: [PATCH 113/152] cmd/compile: declare no output register for loong64 LoweredAtomic{And,Or}32 ops The ICE seen on loong64 while compiling the `(*gcWork).tryStealSpan` function was due to an `LoweredAtomicAnd32` op (inlined from the `(pMask).clear` implementation) being incorrectly assigned an output register while it shouldn't have. Because the op is of mem type, it has needRegister() == false; hence in the shuffle phase of regalloc, its bogus output register has no associated `orig` value recorded. The bug was introduced in CL 482756, but only recently exposed by CL 696035. Since the old-style atomic ops need no return value (and is even documented so besides the loong64 ssa op definition), just fix the register info for both. While at it, add a note in the ssa op definition file about the architectural necessity of resultNotInArgs for loong64 atomic ops, because the practice is not seen in several other arches I have checked. Updates #75776 Change-Id: I087f51b8a2825d7b00fc3965b0afcc8b02cad277 Reviewed-on: https://go-review.googlesource.com/c/go/+/710475 LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt Reviewed-by: abner chenc Reviewed-by: Cherry Mui --- src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go | 11 +++++++++-- src/cmd/compile/internal/ssa/opGen.go | 6 ------ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go index a85a566660eee6..7e8b8bf497b8ff 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go @@ -143,6 +143,7 @@ func init() { gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg | rz}} + gpoldatom = regInfo{inputs: []regMask{gpspsbg, gpg}} gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} preldreg = regInfo{inputs: []regMask{gpspg}} @@ -431,6 +432,12 @@ func init() { faultOnNilArg1: true, }, + // Atomic operations. + // + // resultNotInArgs is needed by all ops lowering to LoongArch + // atomic memory access instructions, because these instructions + // are defined to require rd != rj && rd != rk per the ISA spec. + // atomic loads. // load from arg0. arg1=mem. // returns so they can be properly ordered with other loads. @@ -500,8 +507,8 @@ func init() { // Atomic 32 bit AND/OR. // *arg0 &= (|=) arg1. arg2=mem. returns nil. - {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, - {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicAnd32", argLength: 3, reg: gpoldatom, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicOr32", argLength: 3, reg: gpoldatom, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, // Atomic 32,64 bit AND/OR. // *arg0 &= (|=) arg1. arg2=mem. returns . auxint must be zero. diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 32ba3a89856ed4..3ed1619e4a6ab9 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -26392,9 +26392,6 @@ var opcodeTable = [...]opInfo{ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, }, }, { @@ -26409,9 +26406,6 @@ var opcodeTable = [...]opInfo{ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, }, }, { From c53cb642deea152e28281133bc0053f5ec0ce358 Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Thu, 9 Oct 2025 16:05:35 +0800 Subject: [PATCH 114/152] internal/buildcfg: enable greenteagc experiment for loong64 The loong64 compiler bug has been resolved, so we can now unconditionally enable the experiment on the architecture. Updates #73581 Fixes #75776 Change-Id: I390f8a125d43ca64798ea5b6a408aaf7220fadbf Reviewed-on: https://go-review.googlesource.com/c/go/+/710476 Reviewed-by: Michael Knyszek Auto-Submit: abner chenc LUCI-TryBot-Result: Go LUCI Reviewed-by: abner chenc Reviewed-by: Cherry Mui --- src/internal/buildcfg/exp.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index fb6b5859e31a1e..f1a1d8632ef507 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -78,18 +78,13 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { // things like .debug_addr (needed for DWARF 5). dwarf5Supported := (goos != "darwin" && goos != "ios" && goos != "aix") - // The compiler crashes while compiling some of the Green Tea code. - // The Green Tea code is pretty normal, so this is likely a compiler - // bug in the loong64 port. - greenTeaGCSupported := goarch != "loong64" - baseline := goexperiment.Flags{ RegabiWrappers: regabiSupported, RegabiArgs: regabiSupported, Dwarf5: dwarf5Supported, RandomizedHeapBase64: true, SizeSpecializedMalloc: true, - GreenTeaGC: greenTeaGCSupported, + GreenTeaGC: true, } // Start with the statically enabled set of experiments. From 5368e7742971c8dbcb75a405eb2319e71fb1d0c7 Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Thu, 9 Oct 2025 15:25:30 -0700 Subject: [PATCH 115/152] net/http: run TestRequestWriteTransport with fake time to avoid flakes This test verifies whether or not we use the chunked encoding when sending a request with a body like io.NopCloser(strings.NewReader("")). This depends on whether the transport can read a single byte from the request body within 200ms, which is flaky on very slow builders. Use fake time to avoid flakes. Fixes #52575 Change-Id: Ie11a58ac6bc18d43af1423827887e804242dee30 Reviewed-on: https://go-review.googlesource.com/c/go/+/710737 Auto-Submit: Nicholas Husin Reviewed-by: Nicholas Husin Reviewed-by: Nicholas Husin LUCI-TryBot-Result: Go LUCI --- src/net/http/requestwrite_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/net/http/requestwrite_test.go b/src/net/http/requestwrite_test.go index 380ae9dec3244d..8b097cd5e15d1f 100644 --- a/src/net/http/requestwrite_test.go +++ b/src/net/http/requestwrite_test.go @@ -15,6 +15,7 @@ import ( "strings" "testing" "testing/iotest" + "testing/synctest" "time" ) @@ -667,6 +668,13 @@ func TestRequestWrite(t *testing.T) { func TestRequestWriteTransport(t *testing.T) { t.Parallel() + // Run this test in a synctest bubble, since it relies on the transport + // successfully probing the request body within 200ms + // (see transferWriter.probeRequestBody). + // This occasionally flakes on slow builders (#52575) if we don't use a fake clock. + synctest.Test(t, testRequestWriteTransport) +} +func testRequestWriteTransport(t *testing.T) { matchSubstr := func(substr string) func(string) error { return func(written string) error { if !strings.Contains(written, substr) { From 955a5a0dc5dd68ed89200a08f17590c0a94c1e09 Mon Sep 17 00:00:00 2001 From: Julia Lapenko Date: Wed, 13 Aug 2025 22:23:14 +0300 Subject: [PATCH 116/152] runtime: support arm64 Neon in async preemption This is a port of CL 669195 adjusted to save arm64 Neon registers off stack. Change-Id: Ia014778a8c9f0c1d05977b04184f51e791ae8495 Reviewed-on: https://go-review.googlesource.com/c/go/+/695916 LUCI-TryBot-Result: Go LUCI Reviewed-by: Mark Freeman Reviewed-by: Cherry Mui --- src/runtime/mkpreempt.go | 204 +++++++++++++++++++++++++++------- src/runtime/preempt_arm64.go | 38 +++++++ src/runtime/preempt_arm64.s | 66 +++++------ src/runtime/preempt_noxreg.go | 2 +- src/runtime/preempt_xreg.go | 2 +- 5 files changed, 236 insertions(+), 76 deletions(-) create mode 100644 src/runtime/preempt_arm64.go diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 769c4ffc5c9eeb..7b84ba0a6fbaaf 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -163,19 +163,21 @@ package runtime type xRegs struct { `) pos := 0 - for _, reg := range l.regs { - if reg.pos != pos { - log.Fatalf("padding not implemented") - } - typ := fmt.Sprintf("[%d]byte", reg.size) - switch { - case reg.size == 4 && reg.pos%4 == 0: - typ = "uint32" - case reg.size == 8 && reg.pos%8 == 0: - typ = "uint64" + for _, seq := range l.regs { + for _, r := range seq.regs { + if r.pos != pos && !seq.fixedOffset { + log.Fatalf("padding not implemented") + } + typ := fmt.Sprintf("[%d]byte", r.size) + switch { + case r.size == 4 && r.pos%4 == 0: + typ = "uint32" + case r.size == 8 && r.pos%8 == 0: + typ = "uint64" + } + fmt.Fprintf(g.w, "\t%s %s\n", r.name, typ) + pos += r.size } - fmt.Fprintf(g.w, "\t%s %s\n", reg.reg, typ) - pos += reg.size } fmt.Fprintf(g.w, "}\n") @@ -191,16 +193,61 @@ type xRegs struct { type layout struct { stack int - regs []regPos + regs []regSeq sp string // stack pointer register } -type regPos struct { - pos, size int +type regInfo struct { + size int // register size in bytes + name string // register name + + // Some register names may require a specific suffix. + // In ARM64, a suffix called an "arrangement specifier" can be added to + // a register name. For example: + // + // V0.B16 + // + // In this case, "V0" is the register name, and ".B16" is the suffix. + suffix string + pos int // position on stack +} + +// Some save/restore operations can involve multiple registers in a single +// instruction. For example, the LDP/STP instructions in ARM64: +// +// LDP 8(RSP), (R0, R1) +// STP (R0, R1), 8(RSP) +// +// In these cases, a pair of registers (R0, R1) is used as a single argument. +type regSeq struct { saveOp string restoreOp string - reg string + regs []regInfo + + // By default, all registers are saved on the stack, and the stack pointer offset + // is calculated based on the size of each register. For example (ARM64): + // + // STP (R0, R1), 8(RSP) + // STP (R2, R3), 24(RSP) + // + // However, automatic offset calculation may not always be desirable. + // In some cases, the offset must remain fixed: + // + // VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R0) + // VST1.P [V4.B16, V5.B16, V6.B16, V7.B16], 64(R0) + // + // In this example, R0 is post-incremented after each instruction, + // so the offset should not be recalculated. For such cases, + // `fixedOffset` is set to true. + fixedOffset bool + + // After conversion to a string, register names are separated by commas + // and may be wrapped in a custom pair of brackets. For example (ARM64): + // + // (R0, R1) // wrapped in parentheses + // [V0.B16, V1.B16, V2.B16, V3.B16] // wrapped in square brackets + brackets [2]string // If this register requires special save and restore, these // give those operations with a %d placeholder for the stack @@ -208,42 +255,97 @@ type regPos struct { save, restore string } -func (l *layout) add(op, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack, size: size}) +func (l *layout) add(op, regname string, size int) { + l.regs = append(l.regs, regSeq{saveOp: op, restoreOp: op, regs: []regInfo{{size, regname, "", l.stack}}}) l.stack += size } -func (l *layout) add2(sop, rop, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack, size: size}) - l.stack += size +func (l *layout) add2(sop, rop string, regs []regInfo, brackets [2]string, fixedOffset bool) { + l.regs = append(l.regs, regSeq{saveOp: sop, restoreOp: rop, regs: regs, brackets: brackets, fixedOffset: fixedOffset}) + if !fixedOffset { + for i := range regs { + regs[i].pos = l.stack + l.stack += regs[i].size + } + } } func (l *layout) addSpecial(save, restore string, size int) { - l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack, size: size}) + l.regs = append(l.regs, regSeq{save: save, restore: restore, regs: []regInfo{{size, "", "", l.stack}}}) l.stack += size } +func (rs *regSeq) String() string { + switch len(rs.regs) { + case 0: + log.Fatal("Register sequence must not be empty!") + case 1: + return rs.regs[0].name + default: + names := make([]string, 0) + for _, r := range rs.regs { + name := r.name + r.suffix + names = append(names, name) + } + return rs.brackets[0] + strings.Join(names, ", ") + rs.brackets[1] + } + return "" +} + func (l *layout) save(g *gen) { - for _, reg := range l.regs { - if reg.save != "" { - g.p(reg.save, reg.pos) + for _, seq := range l.regs { + if len(seq.regs) < 1 { + log.Fatal("Register sequence must not be empty!") + } + // When dealing with a sequence of registers, we assume that only the position + // of the first register is relevant. For example: + // + // STP (R0, R1), 8(RSP) + // STP (R2, R3), 24(RSP) + // + // Here, R0.pos is 8. While we can infer that R1.pos is 16, it doesn't need to + // be explicitly specified, as the STP instruction calculates it automatically. + pos := seq.regs[0].pos + if seq.save != "" { + g.p(seq.save, pos) } else { - g.p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp) + name := seq.String() + g.p("%s %s, %d(%s)", seq.saveOp, name, pos, l.sp) } } } -func (l *layout) restore(g *gen) { - for i := len(l.regs) - 1; i >= 0; i-- { - reg := l.regs[i] +func (l *layout) restoreInOrder(g *gen, reverse bool) { + var seq []regSeq + if reverse { + seq = make([]regSeq, 0) + for i := len(l.regs) - 1; i >= 0; i-- { + seq = append(seq, l.regs[i]) + } + } else { + seq = l.regs + } + for _, reg := range seq { + if len(reg.regs) < 1 { + log.Fatal("Register sequence must not be empty!") + } + pos := reg.regs[0].pos if reg.restore != "" { - g.p(reg.restore, reg.pos) + g.p(reg.restore, pos) } else { - g.p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg) + g.p("%s %d(%s), %s", reg.restoreOp, pos, l.sp, reg.String()) } } } +func (l *layout) restore(g *gen) { + l.restoreInOrder(g, true) +} + +func (l *layout) restoreDirect(g *gen) { + l.restoreInOrder(g, false) +} + func gen386(g *gen) { p := g.p @@ -320,8 +422,11 @@ func genAMD64(g *gen) { // We don't have to do this, but it results in a nice Go type. If we split // this into multiple types, we probably should stop doing this. for i := range lXRegs.regs { - lXRegs.regs[i].pos = lZRegs.regs[i].pos - lYRegs.regs[i].pos = lZRegs.regs[i].pos + for j := range lXRegs.regs[i].regs { + lXRegs.regs[i].regs[j].pos = lZRegs.regs[i].regs[j].pos + lYRegs.regs[i].regs[j].pos = lZRegs.regs[i].regs[j].pos + } + } writeXRegs(g.goarch, &lZRegs) @@ -456,6 +561,7 @@ func genARM(g *gen) { } func genARM64(g *gen) { + const vReg = "R0" // *xRegState p := g.p // Add integer registers R0-R26 // R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special @@ -466,8 +572,11 @@ func genARM64(g *gen) { i-- continue // R18 is not used, skip } - reg := fmt.Sprintf("(R%d, R%d)", i, i+1) - l.add2("STP", "LDP", reg, 16) + regs := []regInfo{ + {name: fmt.Sprintf("R%d", i), size: 8}, + {name: fmt.Sprintf("R%d", i+1), size: 8}, + } + l.add2("STP", "LDP", regs, [2]string{"(", ")"}, false) } // Add flag registers. l.addSpecial( @@ -480,10 +589,17 @@ func genARM64(g *gen) { 8) // TODO: FPCR? I don't think we'll change it, so no need to save. // Add floating point registers F0-F31. - for i := 0; i < 31; i += 2 { - reg := fmt.Sprintf("(F%d, F%d)", i, i+1) - l.add2("FSTPD", "FLDPD", reg, 16) + lVRegs := layout{sp: vReg} // Non-GP registers + for i := 0; i < 31; i += 4 { + regs := []regInfo{ + {name: fmt.Sprintf("V%d", i), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+1), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+2), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+3), suffix: ".B16", size: 16, pos: 64}, + } + lVRegs.add2("VST1.P", "VLD1.P", regs, [2]string{"[", "]"}, true) } + writeXRegs(g.goarch, &lVRegs) if l.stack%16 != 0 { l.stack += 8 // SP needs 16-byte alignment } @@ -500,8 +616,20 @@ func genARM64(g *gen) { p("MOVD R30, (RSP)") p("#endif") + p("// Save GPs") l.save(g) + p("// Save extended register state to p.xRegs.scratch") + p("MOVD g_m(g), %s", vReg) + p("MOVD m_p(%s), %s", vReg, vReg) + p("ADD $(p_xRegs+xRegPerP_scratch), %s, %s", vReg, vReg) + lVRegs.save(g) p("CALL ·asyncPreempt2(SB)") + p("// Restore non-GPs from *p.xRegs.cache") + p("MOVD g_m(g), %s", vReg) + p("MOVD m_p(%s), %s", vReg, vReg) + p("MOVD (p_xRegs+xRegPerP_cache)(%s), %s", vReg, vReg) + lVRegs.restoreDirect(g) + p("// Restore GPs") l.restore(g) p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it diff --git a/src/runtime/preempt_arm64.go b/src/runtime/preempt_arm64.go new file mode 100644 index 00000000000000..1b71d2713ea5d0 --- /dev/null +++ b/src/runtime/preempt_arm64.go @@ -0,0 +1,38 @@ +// Code generated by mkpreempt.go; DO NOT EDIT. + +package runtime + +type xRegs struct { + V0 [16]byte + V1 [16]byte + V2 [16]byte + V3 [16]byte + V4 [16]byte + V5 [16]byte + V6 [16]byte + V7 [16]byte + V8 [16]byte + V9 [16]byte + V10 [16]byte + V11 [16]byte + V12 [16]byte + V13 [16]byte + V14 [16]byte + V15 [16]byte + V16 [16]byte + V17 [16]byte + V18 [16]byte + V19 [16]byte + V20 [16]byte + V21 [16]byte + V22 [16]byte + V23 [16]byte + V24 [16]byte + V25 [16]byte + V26 [16]byte + V27 [16]byte + V28 [16]byte + V29 [16]byte + V30 [16]byte + V31 [16]byte +} diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s index 31ec9d940f76d4..9017d8815970b2 100644 --- a/src/runtime/preempt_arm64.s +++ b/src/runtime/preempt_arm64.s @@ -4,13 +4,14 @@ #include "textflag.h" TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 - MOVD R30, -496(RSP) - SUB $496, RSP + MOVD R30, -240(RSP) + SUB $240, RSP MOVD R29, -8(RSP) SUB $8, RSP, R29 #ifdef GOOS_ios MOVD R30, (RSP) #endif + // Save GPs STP (R0, R1), 8(RSP) STP (R2, R3), 24(RSP) STP (R4, R5), 40(RSP) @@ -28,39 +29,32 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVD R0, 216(RSP) MOVD FPSR, R0 MOVD R0, 224(RSP) - FSTPD (F0, F1), 232(RSP) - FSTPD (F2, F3), 248(RSP) - FSTPD (F4, F5), 264(RSP) - FSTPD (F6, F7), 280(RSP) - FSTPD (F8, F9), 296(RSP) - FSTPD (F10, F11), 312(RSP) - FSTPD (F12, F13), 328(RSP) - FSTPD (F14, F15), 344(RSP) - FSTPD (F16, F17), 360(RSP) - FSTPD (F18, F19), 376(RSP) - FSTPD (F20, F21), 392(RSP) - FSTPD (F22, F23), 408(RSP) - FSTPD (F24, F25), 424(RSP) - FSTPD (F26, F27), 440(RSP) - FSTPD (F28, F29), 456(RSP) - FSTPD (F30, F31), 472(RSP) + // Save extended register state to p.xRegs.scratch + MOVD g_m(g), R0 + MOVD m_p(R0), R0 + ADD $(p_xRegs+xRegPerP_scratch), R0, R0 + VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R0) + VST1.P [V4.B16, V5.B16, V6.B16, V7.B16], 64(R0) + VST1.P [V8.B16, V9.B16, V10.B16, V11.B16], 64(R0) + VST1.P [V12.B16, V13.B16, V14.B16, V15.B16], 64(R0) + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R0) + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R0) + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R0) + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R0) CALL ·asyncPreempt2(SB) - FLDPD 472(RSP), (F30, F31) - FLDPD 456(RSP), (F28, F29) - FLDPD 440(RSP), (F26, F27) - FLDPD 424(RSP), (F24, F25) - FLDPD 408(RSP), (F22, F23) - FLDPD 392(RSP), (F20, F21) - FLDPD 376(RSP), (F18, F19) - FLDPD 360(RSP), (F16, F17) - FLDPD 344(RSP), (F14, F15) - FLDPD 328(RSP), (F12, F13) - FLDPD 312(RSP), (F10, F11) - FLDPD 296(RSP), (F8, F9) - FLDPD 280(RSP), (F6, F7) - FLDPD 264(RSP), (F4, F5) - FLDPD 248(RSP), (F2, F3) - FLDPD 232(RSP), (F0, F1) + // Restore non-GPs from *p.xRegs.cache + MOVD g_m(g), R0 + MOVD m_p(R0), R0 + MOVD (p_xRegs+xRegPerP_cache)(R0), R0 + VLD1.P 64(R0), [V0.B16, V1.B16, V2.B16, V3.B16] + VLD1.P 64(R0), [V4.B16, V5.B16, V6.B16, V7.B16] + VLD1.P 64(R0), [V8.B16, V9.B16, V10.B16, V11.B16] + VLD1.P 64(R0), [V12.B16, V13.B16, V14.B16, V15.B16] + VLD1.P 64(R0), [V16.B16, V17.B16, V18.B16, V19.B16] + VLD1.P 64(R0), [V20.B16, V21.B16, V22.B16, V23.B16] + VLD1.P 64(R0), [V24.B16, V25.B16, V26.B16, V27.B16] + VLD1.P 64(R0), [V28.B16, V29.B16, V30.B16, V31.B16] + // Restore GPs MOVD 224(RSP), R0 MOVD R0, FPSR MOVD 216(RSP), R0 @@ -78,8 +72,8 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 LDP 40(RSP), (R4, R5) LDP 24(RSP), (R2, R3) LDP 8(RSP), (R0, R1) - MOVD 496(RSP), R30 + MOVD 240(RSP), R30 MOVD -8(RSP), R29 MOVD (RSP), R27 - ADD $512, RSP + ADD $256, RSP RET (R27) diff --git a/src/runtime/preempt_noxreg.go b/src/runtime/preempt_noxreg.go index dfe46559b5b723..9f03b2b333420e 100644 --- a/src/runtime/preempt_noxreg.go +++ b/src/runtime/preempt_noxreg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !amd64 +//go:build !amd64 && !arm64 // This provides common support for architectures that DO NOT use extended // register state in asynchronous preemption. diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go index 9e05455ddbb747..f4578a4d76d889 100644 --- a/src/runtime/preempt_xreg.go +++ b/src/runtime/preempt_xreg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build amd64 +//go:build amd64 || arm64 // This provides common support for architectures that use extended register // state in asynchronous preemption. From 6f4c63ba63fdec6e4a42e2e19ac71937973dedef Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 26 Sep 2025 13:33:09 -0400 Subject: [PATCH 117/152] cmd/go: unify "go fix" and "go vet" This change unifies the fix and vet subcommands; they use the same run function, action graph, and external tool (-vettool for go vet and -fixtool for go fix). go fix runs the tool with the -fix flag, whereas although go vet also supports -fix, it is not the default. The two tools have different (overlapping) suites of analyzers. The high-level parts are fully parameterized over the vet/fix distinction; the lower-level parts (the action graph) continue to use only the "vet" terminology. The cmd/{vet,fix} executable is referred to as the "tool". The tool is generally invoked in -json mode, regardless of whether -json was requested, so that the tool produces a cacheable JSON blob on stdout. When the go user did not request -json, this blob is parsed and printed to stderr by logic in the go vet command. (Formerly the tool would print diagnostics to stderr, but this interacts poorly with the build cache.) go fix's legacy -fix=fixer,... flag is now a no-op that prints a warning that the flag is obsolete. The unitchecker's -c=n flag (to display n lines of context around each diagnostic) is reimplemented in go vet based on the JSON information, to avoid reliance on the stderr output of the tool. cmd/fix is added to dist's prebuilt set of tools since go fix cannot build it dynamically (though ideally it would). Updates #71859 For #75432 Change-Id: I0a84746720b59d05d662ed57826747c5598dca44 Reviewed-on: https://go-review.googlesource.com/c/go/+/700795 Reviewed-by: Michael Matloob Auto-Submit: Alan Donovan Reviewed-by: Michael Matloob TryBot-Bypass: Alan Donovan --- doc/next/3-tools.md | 10 + src/cmd/dist/build.go | 2 +- src/cmd/fix/doc.go | 20 -- src/cmd/fix/main.go | 66 ++-- src/cmd/go/alldocs.go | 60 ++-- src/cmd/go/internal/fix/fix.go | 86 ------ src/cmd/go/internal/test/test.go | 1 + src/cmd/go/internal/vet/vet.go | 371 +++++++++++++++++++++-- src/cmd/go/internal/vet/vetflag.go | 170 ++++++----- src/cmd/go/internal/work/buildid.go | 7 +- src/cmd/go/internal/work/exec.go | 81 +++-- src/cmd/go/main.go | 3 +- src/cmd/go/testdata/script/chdir.txt | 4 + src/cmd/go/testdata/script/vet_asm.txt | 8 +- src/cmd/go/testdata/script/vet_basic.txt | 125 ++++++++ src/cmd/go/testdata/script/vet_cache.txt | 24 ++ src/cmd/vet/doc.go | 2 + src/cmd/vet/main.go | 91 +++--- src/cmd/vet/testdata/print/print.go | 2 +- src/cmd/vet/vet_test.go | 16 +- 20 files changed, 818 insertions(+), 331 deletions(-) delete mode 100644 src/cmd/fix/doc.go delete mode 100644 src/cmd/go/internal/fix/fix.go create mode 100644 src/cmd/go/testdata/script/vet_basic.txt create mode 100644 src/cmd/go/testdata/script/vet_cache.txt diff --git a/doc/next/3-tools.md b/doc/next/3-tools.md index 9459a5490e7904..c0a4601c0b9e74 100644 --- a/doc/next/3-tools.md +++ b/doc/next/3-tools.md @@ -7,5 +7,15 @@ a replacement for `go tool doc`: it takes the same flags and arguments and has the same behavior. + +The `go fix` command, following the pattern of `go vet` in Go 1.10, +now uses the Go analysis framework (`golang.org/x/tools/go/analysis`). +This means the same analyzers that provide diagnostics in `go vet` +can be used to suggest and apply fixes in `go fix`. +The `go fix` command's historical fixers, all of which were obsolete, +have been removed and replaced by a suite of new analyzers that +offer fixes to use newer features of the language and library. + + ### Cgo {#cgo} diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index 9a7951726f6f04..2fcdb2d3915b9b 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -1397,7 +1397,7 @@ var ( binExesIncludedInDistpack = []string{"cmd/go", "cmd/gofmt"} // Keep in sync with the filter in cmd/distpack/pack.go. - toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/link", "cmd/preprofile", "cmd/vet"} + toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/fix", "cmd/link", "cmd/preprofile", "cmd/vet"} // We could install all tools in "cmd", but is unnecessary because we will // remove them in distpack, so instead install the tools that will actually diff --git a/src/cmd/fix/doc.go b/src/cmd/fix/doc.go deleted file mode 100644 index b3d69144717172..00000000000000 --- a/src/cmd/fix/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Fix finds Go programs that use old APIs and rewrites them to use -newer ones. After you update to a new Go release, fix helps make -the necessary changes to your programs. - -Usage: - - go tool fix [ignored...] - -This tool is currently in transition. All its historical fixers were -long obsolete and have been removed, so it is currently a no-op. In -due course the tool will integrate with the Go analysis framework -(golang.org/x/tools/go/analysis) and run a modern suite of fix -algorithms; see https://go.dev/issue/71859. -*/ -package main diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go index 87cc0d6414601b..422fa827459900 100644 --- a/src/cmd/fix/main.go +++ b/src/cmd/fix/main.go @@ -1,31 +1,59 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +/* +Fix is a tool executed by "go fix" to update Go programs that use old +features of the language and library and rewrite them to use newer +ones. After you update to a new Go release, fix helps make the +necessary changes to your programs. + +See the documentation for "go fix" for how to run this command. +You can provide an alternative tool using "go fix -fixtool=..." + +Run "go tool fix help" to see the list of analyzers supported by this +program. + +See [golang.org/x/tools/go/analysis] for information on how to write +an analyzer that can suggest fixes. +*/ package main import ( - "flag" - "fmt" - "os" -) + "cmd/internal/objabi" + "cmd/internal/telemetry/counter" -var ( - _ = flag.Bool("diff", false, "obsolete, no effect") - _ = flag.String("go", "", "obsolete, no effect") - _ = flag.String("r", "", "obsolete, no effect") - _ = flag.String("force", "", "obsolete, no effect") + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildtag" + "golang.org/x/tools/go/analysis/passes/hostport" + "golang.org/x/tools/go/analysis/unitchecker" ) -func usage() { - fmt.Fprintf(os.Stderr, "usage: go tool fix [-diff] [-r ignored] [-force ignored] ...\n") - flag.PrintDefaults() - os.Exit(2) -} - func main() { - flag.Usage = usage - flag.Parse() + // Keep consistent with cmd/vet/main.go! + counter.Open() + objabi.AddVersionFlag() + counter.Inc("fix/invocations") + + unitchecker.Main(suite...) // (never returns) +} - os.Exit(0) +// The fix suite analyzers produce fixes that are safe to apply. +// (Diagnostics may not describe actual problems, +// but their fixes must be unambiguously safe to apply.) +var suite = []*analysis.Analyzer{ + buildtag.Analyzer, + hostport.Analyzer, + // TODO(adonovan): now the modernize (proposal #75266) and + // inline (proposal #75267) analyzers are published, revendor + // x/tools and add them here. + // + // TODO(adonovan):add any other vet analyzers whose fixes are always safe. + // Candidates to audit: sigchanyzer, printf, assign, unreachable. + // Rejected: + // - composites: some types (e.g. PointXY{1,2}) don't want field names. + // - timeformat: flipping MM/DD is a behavior change, but the code + // could potentially be a workaround for another bug. + // - stringintconv: offers two fixes, user input required to choose. + // - fieldalignment: poor signal/noise; fix could be a regression. } diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 51f2223283b175..67c0ecbe8b2fcf 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -18,7 +18,7 @@ // clean remove object files and cached files // doc show documentation for package or symbol // env print Go environment information -// fix update packages to use new APIs +// fix apply fixes suggested by static checkers // fmt gofmt (reformat) package sources // generate generate Go files by processing source // get add dependencies to current module and install them @@ -495,22 +495,34 @@ // // For more about environment variables, see 'go help environment'. // -// # Update packages to use new APIs +// # Apply fixes suggested by static checkers // // Usage: // -// go fix [-fix list] [packages] +// go fix [build flags] [-fixtool prog] [fix flags] [packages] // -// Fix runs the Go fix command on the packages named by the import paths. +// Fix runs the Go fix tool (cmd/vet) on the named packages +// and applies suggested fixes. // -// The -fix flag sets a comma-separated list of fixes to run. -// The default is all known fixes. -// (Its value is passed to 'go tool fix -r'.) +// It supports these flags: +// +// -diff +// instead of applying each fix, print the patch as a unified diff +// +// The -fixtool=prog flag selects a different analysis tool with +// alternative or additional fixes; see the documentation for go vet's +// -vettool flag for details. // -// For more about fix, see 'go doc cmd/fix'. // For more about specifying packages, see 'go help packages'. // -// To run fix with other options, run 'go tool fix'. +// For a list of fixers and their flags, see 'go tool fix help'. +// +// For details of a specific fixer such as 'hostport', +// see 'go tool fix help hostport'. +// +// The build flags supported by go fix are those that control package resolution +// and execution, such as -C, -n, -x, -v, -tags, and -toolexec. +// For more about these flags, see 'go help build'. // // See also: go fmt, go vet. // @@ -2014,20 +2026,34 @@ // // go vet [build flags] [-vettool prog] [vet flags] [packages] // -// Vet runs the Go vet command on the packages named by the import paths. +// Vet runs the Go vet tool (cmd/vet) on the named packages +// and reports diagnostics. // -// For more about vet and its flags, see 'go doc cmd/vet'. -// For more about specifying packages, see 'go help packages'. -// For a list of checkers and their flags, see 'go tool vet help'. -// For details of a specific checker such as 'printf', see 'go tool vet help printf'. +// It supports these flags: +// +// -c int +// display offending line with this many lines of context (default -1) +// -json +// emit JSON output +// -fix +// instead of printing each diagnostic, apply its first fix (if any) +// -diff +// instead of applying each fix, print the patch as a unified diff // -// The -vettool=prog flag selects a different analysis tool with alternative -// or additional checks. -// For example, the 'shadow' analyzer can be built and run using these commands: +// The -vettool=prog flag selects a different analysis tool with +// alternative or additional checks. For example, the 'shadow' analyzer +// can be built and run using these commands: // // go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest // go vet -vettool=$(which shadow) // +// Alternative vet tools should be built atop golang.org/x/tools/go/analysis/unitchecker, +// which handles the interaction with go vet. +// +// For more about specifying packages, see 'go help packages'. +// For a list of checkers and their flags, see 'go tool vet help'. +// For details of a specific checker such as 'printf', see 'go tool vet help printf'. +// // The build flags supported by go vet are those that control package resolution // and execution, such as -C, -n, -x, -v, -tags, and -toolexec. // For more about these flags, see 'go help build'. diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go deleted file mode 100644 index 8947da05c3ee63..00000000000000 --- a/src/cmd/go/internal/fix/fix.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fix implements the “go fix” command. -package fix - -import ( - "cmd/go/internal/base" - "cmd/go/internal/cfg" - "cmd/go/internal/load" - "cmd/go/internal/modload" - "cmd/go/internal/str" - "cmd/go/internal/work" - "context" - "fmt" - "go/build" - "os" - "path/filepath" -) - -var CmdFix = &base.Command{ - UsageLine: "go fix [-fix list] [packages]", - Short: "update packages to use new APIs", - Long: ` -Fix runs the Go fix command on the packages named by the import paths. - -The -fix flag sets a comma-separated list of fixes to run. -The default is all known fixes. -(Its value is passed to 'go tool fix -r'.) - -For more about fix, see 'go doc cmd/fix'. -For more about specifying packages, see 'go help packages'. - -To run fix with other options, run 'go tool fix'. - -See also: go fmt, go vet. - `, -} - -var fixes = CmdFix.Flag.String("fix", "", "comma-separated list of fixes to apply") - -func init() { - work.AddBuildFlags(CmdFix, work.OmitBuildOnlyFlags) - CmdFix.Run = runFix // fix cycle -} - -func runFix(ctx context.Context, cmd *base.Command, args []string) { - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args) - w := 0 - for _, pkg := range pkgs { - if pkg.Error != nil { - base.Errorf("%v", pkg.Error) - continue - } - pkgs[w] = pkg - w++ - } - pkgs = pkgs[:w] - - printed := false - for _, pkg := range pkgs { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { - if !printed { - fmt.Fprintf(os.Stderr, "go: not fixing packages in dependency modules\n") - printed = true - } - continue - } - // Use pkg.gofiles instead of pkg.Dir so that - // the command only applies to this package, - // not to packages in subdirectories. - files := base.RelPaths(pkg.InternalAllGoFiles()) - goVersion := "" - if pkg.Module != nil { - goVersion = "go" + pkg.Module.GoVersion - } else if pkg.Standard { - goVersion = build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] - } - var fixArg []string - if *fixes != "" { - fixArg = []string{"-r=" + *fixes} - } - base.Run(str.StringList(cfg.BuildToolexec, filepath.Join(cfg.GOROOTbin, "go"), "tool", "fix", "-go="+goVersion, fixArg, files)) - } -} diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 15ffc618c65dab..e667bd64f7997f 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -707,6 +707,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { work.BuildInit() work.VetFlags = testVet.flags work.VetExplicit = testVet.explicit + work.VetTool = base.Tool("vet") pkgOpts := load.PackageOpts{ModResolveTests: true} pkgs = load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go index 3514be80feb8cc..a2625b21188cf4 100644 --- a/src/cmd/go/internal/vet/vet.go +++ b/src/cmd/go/internal/vet/vet.go @@ -2,13 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package vet implements the “go vet” command. +// Package vet implements the “go vet” and “go fix” commands. package vet import ( "context" + "encoding/json" + "errors" "fmt" - "path/filepath" + "io" + "os" + "slices" + "strconv" + "strings" + "sync" "cmd/go/internal/base" "cmd/go/internal/cfg" @@ -18,30 +25,39 @@ import ( "cmd/go/internal/work" ) -// Break init loop. -func init() { - CmdVet.Run = runVet -} - var CmdVet = &base.Command{ CustomFlags: true, UsageLine: "go vet [build flags] [-vettool prog] [vet flags] [packages]", Short: "report likely mistakes in packages", Long: ` -Vet runs the Go vet command on the packages named by the import paths. +Vet runs the Go vet tool (cmd/vet) on the named packages +and reports diagnostics. -For more about vet and its flags, see 'go doc cmd/vet'. -For more about specifying packages, see 'go help packages'. -For a list of checkers and their flags, see 'go tool vet help'. -For details of a specific checker such as 'printf', see 'go tool vet help printf'. +It supports these flags: -The -vettool=prog flag selects a different analysis tool with alternative -or additional checks. -For example, the 'shadow' analyzer can be built and run using these commands: + -c int + display offending line with this many lines of context (default -1) + -json + emit JSON output + -fix + instead of printing each diagnostic, apply its first fix (if any) + -diff + instead of applying each fix, print the patch as a unified diff + +The -vettool=prog flag selects a different analysis tool with +alternative or additional checks. For example, the 'shadow' analyzer +can be built and run using these commands: go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest go vet -vettool=$(which shadow) +Alternative vet tools should be built atop golang.org/x/tools/go/analysis/unitchecker, +which handles the interaction with go vet. + +For more about specifying packages, see 'go help packages'. +For a list of checkers and their flags, see 'go tool vet help'. +For details of a specific checker such as 'printf', see 'go tool vet help printf'. + The build flags supported by go vet are those that control package resolution and execution, such as -C, -n, -x, -v, -tags, and -toolexec. For more about these flags, see 'go help build'. @@ -50,9 +66,64 @@ See also: go fmt, go fix. `, } -func runVet(ctx context.Context, cmd *base.Command, args []string) { - vetFlags, pkgArgs := vetFlags(args) - modload.InitWorkfile() // The vet command does custom flag processing; initialize workspaces after that. +var CmdFix = &base.Command{ + CustomFlags: true, + UsageLine: "go fix [build flags] [-fixtool prog] [fix flags] [packages]", + Short: "apply fixes suggested by static checkers", + Long: ` +Fix runs the Go fix tool (cmd/vet) on the named packages +and applies suggested fixes. + +It supports these flags: + + -diff + instead of applying each fix, print the patch as a unified diff + +The -fixtool=prog flag selects a different analysis tool with +alternative or additional fixes; see the documentation for go vet's +-vettool flag for details. + +For more about specifying packages, see 'go help packages'. + +For a list of fixers and their flags, see 'go tool fix help'. + +For details of a specific fixer such as 'hostport', +see 'go tool fix help hostport'. + +The build flags supported by go fix are those that control package resolution +and execution, such as -C, -n, -x, -v, -tags, and -toolexec. +For more about these flags, see 'go help build'. + +See also: go fmt, go vet. + `, +} + +func init() { + // avoid initialization cycle + CmdVet.Run = run + CmdFix.Run = run + + addFlags(CmdVet) + addFlags(CmdFix) +} + +var ( + // "go vet -fix" causes fixes to be applied. + vetFixFlag = CmdVet.Flag.Bool("fix", false, "apply the first fix (if any) for each diagnostic") + + // The "go fix -fix=name,..." flag is an obsolete flag formerly + // used to pass a list of names to the old "cmd/fix -r". + fixFixFlag = CmdFix.Flag.String("fix", "", "obsolete; no effect") +) + +// run implements both "go vet" and "go fix". +func run(ctx context.Context, cmd *base.Command, args []string) { + // Compute flags for the vet/fix tool (e.g. cmd/{vet,fix}). + toolFlags, pkgArgs := toolFlags(cmd, args) + + // The vet/fix commands do custom flag processing; + // initialize workspaces after that. + modload.InitWorkfile() if cfg.DebugTrace != "" { var close func() error @@ -72,23 +143,84 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { defer span.Done() work.BuildInit() - work.VetFlags = vetFlags - if len(vetFlags) > 0 { - work.VetExplicit = true + + // Flag theory: + // + // All flags supported by unitchecker are accepted by go {vet,fix}. + // Some arise from each analyzer in the tool (both to enable it + // and to configure it), whereas others [-V -c -diff -fix -flags -json] + // are core to unitchecker itself. + // + // Most are passed through to toolFlags, but not all: + // * -V and -flags are used by the handshake in the [toolFlags] function; + // * these old flags have no effect: [-all -source -tags -v]; and + // * the [-c -fix -diff -json] flags are handled specially + // as described below: + // + // command args tool args + // go vet => cmd/vet -json Parse stdout, print diagnostics to stderr. + // go vet -json => cmd/vet -json Pass stdout through. + // go vet -fix [-diff] => cmd/vet -fix [-diff] Pass stdout through. + // go fix [-diff] => cmd/fix -fix [-diff] Pass stdout through. + // go fix -json => cmd/fix -json Pass stdout through. + // + // Notes: + // * -diff requires "go vet -fix" or "go fix", and no -json. + // * -json output is the same in "vet" and "fix" modes, + // and describes both diagnostics and fixes (but does not apply them). + // * -c=n is supported by the unitchecker, but we reimplement it + // here (see printDiagnostics), and do not pass the flag through. + + work.VetExplicit = len(toolFlags) > 0 + + if cmd.Name() == "fix" || *vetFixFlag { + // fix mode: 'go fix' or 'go vet -fix' + if jsonFlag { + if diffFlag { + base.Fatalf("-json and -diff cannot be used together") + } + } else { + toolFlags = append(toolFlags, "-fix") + if diffFlag { + toolFlags = append(toolFlags, "-diff") + } + } + if contextFlag != -1 { + base.Fatalf("-c flag cannot be used when applying fixes") + } + } else { + // vet mode: 'go vet' without -fix + if !jsonFlag { + // Post-process the JSON diagnostics on stdout and format + // it as "file:line: message" diagnostics on stderr. + // (JSON reliably frames diagnostics, fixes, and errors so + // that we don't have to parse stderr or interpret non-zero + // exit codes, and interacts better with the action cache.) + toolFlags = append(toolFlags, "-json") + work.VetHandleStdout = printJSONDiagnostics + } + if diffFlag { + base.Fatalf("go vet -diff flag requires -fix") + } } - if vetTool != "" { - var err error - work.VetTool, err = filepath.Abs(vetTool) - if err != nil { - base.Fatalf("%v", err) + + // Implement legacy "go fix -fix=name,..." flag. + if *fixFixFlag != "" { + fmt.Fprintf(os.Stderr, "go %s: the -fix=%s flag is obsolete and has no effect", cmd.Name(), *fixFixFlag) + + // The buildtag fixer is now implemented by cmd/fix. + if slices.Contains(strings.Split(*fixFixFlag, ","), "buildtag") { + fmt.Fprintf(os.Stderr, "go %s: to enable the buildtag check, use -buildtag", cmd.Name()) } } + work.VetFlags = toolFlags + pkgOpts := load.PackageOpts{ModResolveTests: true} pkgs := load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) load.CheckPackageErrors(pkgs) if len(pkgs) == 0 { - base.Fatalf("no packages to vet") + base.Fatalf("no packages to %s", cmd.Name()) } b := work.NewBuilder("") @@ -98,7 +230,23 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { } }() - root := &work.Action{Mode: "go vet"} + // To avoid file corruption from duplicate application of + // fixes (in fix mode), and duplicate reporting of diagnostics + // (in vet mode), we must run the tool only once for each + // source file. We achieve that by running on ptest (below) + // instead of p. + // + // As a side benefit, this also allows analyzers to make + // "closed world" assumptions and report diagnostics (such as + // "this symbol is unused") that might be false if computed + // from just the primary package p, falsified by the + // additional declarations in test files. + // + // We needn't worry about intermediate test variants, as they + // will only be executed in VetxOnly mode, for facts but not + // diagnostics. + + root := &work.Action{Mode: "go " + cmd.Name()} for _, p := range pkgs { _, ptest, pxtest, perr := load.TestPackagesFor(ctx, pkgOpts, p, nil) if perr != nil { @@ -106,10 +254,11 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { continue } if len(ptest.GoFiles) == 0 && len(ptest.CgoFiles) == 0 && pxtest == nil { - base.Errorf("go: can't vet %s: no Go files in %s", p.ImportPath, p.Dir) + base.Errorf("go: can't %s %s: no Go files in %s", cmd.Name(), p.ImportPath, p.Dir) continue } if len(ptest.GoFiles) > 0 || len(ptest.CgoFiles) > 0 { + // The test package includes all the files of primary package. root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest)) } if pxtest != nil { @@ -118,3 +267,167 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { } b.Do(ctx, root) } + +// printJSONDiagnostics parses JSON (from the tool's stdout) and +// prints it (to stderr) in "file:line: message" form. +// It also ensures that we exit nonzero if there were diagnostics. +func printJSONDiagnostics(r io.Reader) error { + stdout, err := io.ReadAll(r) + if err != nil { + return err + } + if len(stdout) > 0 { + // unitchecker emits a JSON map of the form: + // output maps Package ID -> Analyzer.Name -> (error | []Diagnostic); + var tree jsonTree + if err := json.Unmarshal([]byte(stdout), &tree); err != nil { + return fmt.Errorf("parsing JSON: %v", err) + } + for _, units := range tree { + for analyzer, msg := range units { + if msg[0] == '[' { + // []Diagnostic + var diags []jsonDiagnostic + if err := json.Unmarshal([]byte(msg), &diags); err != nil { + return fmt.Errorf("parsing JSON diagnostics: %v", err) + } + for _, diag := range diags { + base.SetExitStatus(1) + printJSONDiagnostic(analyzer, diag) + } + } else { + // error + var e jsonError + if err := json.Unmarshal([]byte(msg), &e); err != nil { + return fmt.Errorf("parsing JSON error: %v", err) + } + + base.SetExitStatus(1) + return errors.New(e.Err) + } + } + } + } + return nil +} + +var stderrMu sync.Mutex // serializes concurrent writes to stdout + +func printJSONDiagnostic(analyzer string, diag jsonDiagnostic) { + stderrMu.Lock() + defer stderrMu.Unlock() + + type posn struct { + file string + line, col int + } + parsePosn := func(s string) (_ posn, _ bool) { + colon2 := strings.LastIndexByte(s, ':') + if colon2 < 0 { + return + } + colon1 := strings.LastIndexByte(s[:colon2], ':') + if colon1 < 0 { + return + } + line, err := strconv.Atoi(s[colon1+len(":") : colon2]) + if err != nil { + return + } + col, err := strconv.Atoi(s[colon2+len(":"):]) + if err != nil { + return + } + return posn{s[:colon1], line, col}, true + } + + print := func(start, end, message string) { + if posn, ok := parsePosn(start); ok { + // The (*work.Shell).reportCmd method relativizes the + // prefix of each line of the subprocess's stdout; + // but filenames in JSON aren't at the start of the line, + // so we need to apply ShortPath here too. + fmt.Fprintf(os.Stderr, "%s:%d:%d: %v\n", base.ShortPath(posn.file), posn.line, posn.col, message) + } else { + fmt.Fprintf(os.Stderr, "%s: %v\n", start, message) + } + + // -c=n: show offending line plus N lines of context. + // (Duplicates logic in unitchecker; see analysisflags.PrintPlain.) + if contextFlag >= 0 { + if end == "" { + end = start + } + var ( + startPosn, ok1 = parsePosn(start) + endPosn, ok2 = parsePosn(end) + ) + if ok1 && ok2 { + // TODO(adonovan): respect overlays (like unitchecker does). + data, _ := os.ReadFile(startPosn.file) + lines := strings.Split(string(data), "\n") + for i := startPosn.line - contextFlag; i <= endPosn.line+contextFlag; i++ { + if 1 <= i && i <= len(lines) { + fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1]) + } + } + } + } + } + + // TODO(adonovan): append " [analyzer]" to message. But we must first relax + // x/tools/go/analysis/internal/versiontest.TestVettool and revendor; sigh. + _ = analyzer + print(diag.Posn, diag.End, diag.Message) + for _, rel := range diag.Related { + print(rel.Posn, rel.End, "\t"+rel.Message) + } +} + +// -- JSON schema -- + +// (populated by golang.org/x/tools/go/analysis/internal/analysisflags/flags.go) + +// A jsonTree is a mapping from package ID to analysis name to result. +// Each result is either a jsonError or a list of jsonDiagnostic. +type jsonTree map[string]map[string]json.RawMessage + +type jsonError struct { + Err string `json:"error"` +} + +// A TextEdit describes the replacement of a portion of a file. +// Start and End are zero-based half-open indices into the original byte +// sequence of the file, and New is the new text. +type jsonTextEdit struct { + Filename string `json:"filename"` + Start int `json:"start"` + End int `json:"end"` + New string `json:"new"` +} + +// A jsonSuggestedFix describes an edit that should be applied as a whole or not +// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix +// consists of multiple non-contiguous edits. +type jsonSuggestedFix struct { + Message string `json:"message"` + Edits []jsonTextEdit `json:"edits"` +} + +// A jsonDiagnostic describes the json schema of an analysis.Diagnostic. +type jsonDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` + Message string `json:"message"` + SuggestedFixes []jsonSuggestedFix `json:"suggested_fixes,omitempty"` + Related []jsonRelatedInformation `json:"related,omitempty"` +} + +// A jsonRelated describes a secondary position and message related to +// a primary diagnostic. +type jsonRelatedInformation struct { + Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` + Message string `json:"message"` +} diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go index d0bdb58a504ae7..7ebd8c9bfd19eb 100644 --- a/src/cmd/go/internal/vet/vetflag.go +++ b/src/cmd/go/internal/vet/vetflag.go @@ -21,70 +21,83 @@ import ( "cmd/go/internal/work" ) -// go vet flag processing -// -// We query the flags of the tool specified by -vettool and accept any -// of those flags plus any flag valid for 'go build'. The tool must -// support -flags, which prints a description of its flags in JSON to -// stdout. - -// vetTool specifies the vet command to run. -// Any tool that supports the (still unpublished) vet -// command-line protocol may be supplied; see -// golang.org/x/tools/go/analysis/unitchecker for one -// implementation. It is also used by tests. -// -// The default behavior (vetTool=="") runs 'go tool vet'. -var vetTool string // -vettool - -func init() { - // For now, we omit the -json flag for vet because we could plausibly - // support -json specific to the vet command in the future (perhaps using - // the same format as build -json). - work.AddBuildFlags(CmdVet, work.OmitJSONFlag) - CmdVet.Flag.StringVar(&vetTool, "vettool", "", "") +// go vet/fix flag processing +var ( + // We query the flags of the tool specified by -{vet,fix}tool + // and accept any of those flags plus any flag valid for 'go + // build'. The tool must support -flags, which prints a + // description of its flags in JSON to stdout. + + // toolFlag specifies the vet/fix command to run. + // Any toolFlag that supports the (unpublished) vet + // command-line protocol may be supplied; see + // golang.org/x/tools/go/analysis/unitchecker for the + // sole implementation. It is also used by tests. + // + // The default behavior ("") runs 'go tool {vet,fix}'. + // + // Do not access this flag directly; use [parseToolFlag]. + toolFlag string // -{vet,fix}tool + diffFlag bool // -diff + jsonFlag bool // -json + contextFlag = -1 // -c=n +) + +func addFlags(cmd *base.Command) { + // We run the compiler for export data. + // Suppress the build -json flag; we define our own. + work.AddBuildFlags(cmd, work.OmitJSONFlag) + + cmd.Flag.StringVar(&toolFlag, cmd.Name()+"tool", "", "") // -vettool or -fixtool + cmd.Flag.BoolVar(&diffFlag, "diff", false, "print diff instead of applying it") + cmd.Flag.BoolVar(&jsonFlag, "json", false, "print diagnostics and fixes as JSON") + cmd.Flag.IntVar(&contextFlag, "c", -1, "display offending line with this many lines of context") } -func parseVettoolFlag(args []string) { - // Extract -vettool by ad hoc flag processing: +// parseToolFlag scans args for -{vet,fix}tool and returns the effective tool filename. +func parseToolFlag(cmd *base.Command, args []string) string { + toolFlagName := cmd.Name() + "tool" // vettool or fixtool + + // Extract -{vet,fix}tool by ad hoc flag processing: // its value is needed even before we can declare // the flags available during main flag processing. for i, arg := range args { - if arg == "-vettool" || arg == "--vettool" { + if arg == "-"+toolFlagName || arg == "--"+toolFlagName { if i+1 >= len(args) { log.Fatalf("%s requires a filename", arg) } - vetTool = args[i+1] - return - } else if strings.HasPrefix(arg, "-vettool=") || - strings.HasPrefix(arg, "--vettool=") { - vetTool = arg[strings.IndexByte(arg, '=')+1:] - return + toolFlag = args[i+1] + break + } else if strings.HasPrefix(arg, "-"+toolFlagName+"=") || + strings.HasPrefix(arg, "--"+toolFlagName+"=") { + toolFlag = arg[strings.IndexByte(arg, '=')+1:] + break } } -} -// vetFlags processes the command line, splitting it at the first non-flag -// into the list of flags and list of packages. -func vetFlags(args []string) (passToVet, packageNames []string) { - parseVettoolFlag(args) - - // Query the vet command for its flags. - var tool string - if vetTool == "" { - tool = base.Tool("vet") - } else { - var err error - tool, err = filepath.Abs(vetTool) + if toolFlag != "" { + tool, err := filepath.Abs(toolFlag) if err != nil { log.Fatal(err) } + return tool } + + return base.Tool(cmd.Name()) // default to 'go tool vet|fix' +} + +// toolFlags processes the command line, splitting it at the first non-flag +// into the list of flags and list of packages. +func toolFlags(cmd *base.Command, args []string) (passToTool, packageNames []string) { + tool := parseToolFlag(cmd, args) + work.VetTool = tool + + // Query the tool for its flags. out := new(bytes.Buffer) - vetcmd := exec.Command(tool, "-flags") - vetcmd.Stdout = out - if err := vetcmd.Run(); err != nil { - fmt.Fprintf(os.Stderr, "go: can't execute %s -flags: %v\n", tool, err) + toolcmd := exec.Command(tool, "-flags") + toolcmd.Stdout = out + if err := toolcmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "go: %s -flags failed: %v\n", tool, err) base.SetExitStatus(2) base.Exit() } @@ -99,15 +112,20 @@ func vetFlags(args []string) (passToVet, packageNames []string) { base.Exit() } - // Add vet's flags to CmdVet.Flag. + // Add tool's flags to cmd.Flag. // - // Some flags, in particular -tags and -v, are known to vet but + // Some flags, in particular -tags and -v, are known to the tool but // also defined as build flags. This works fine, so we omit duplicates here. - // However some, like -x, are known to the build but not to vet. - isVetFlag := make(map[string]bool, len(analysisFlags)) - cf := CmdVet.Flag + // However some, like -x, are known to the build but not to the tool. + isToolFlag := make(map[string]bool, len(analysisFlags)) + cf := cmd.Flag for _, f := range analysisFlags { - isVetFlag[f.Name] = true + // We reimplement the unitchecker's -c=n flag. + // Don't allow it to be passed through. + if f.Name == "c" { + continue + } + isToolFlag[f.Name] = true if cf.Lookup(f.Name) == nil { if f.Bool { cf.Bool(f.Name, false, "") @@ -117,22 +135,22 @@ func vetFlags(args []string) (passToVet, packageNames []string) { } } - // Record the set of vet tool flags set by GOFLAGS. We want to pass them to - // the vet tool, but only if they aren't overridden by an explicit argument. - base.SetFromGOFLAGS(&CmdVet.Flag) + // Record the set of tool flags set by GOFLAGS. We want to pass them to + // the tool, but only if they aren't overridden by an explicit argument. + base.SetFromGOFLAGS(&cmd.Flag) addFromGOFLAGS := map[string]bool{} - CmdVet.Flag.Visit(func(f *flag.Flag) { - if isVetFlag[f.Name] { + cmd.Flag.Visit(func(f *flag.Flag) { + if isToolFlag[f.Name] { addFromGOFLAGS[f.Name] = true } }) explicitFlags := make([]string, 0, len(args)) for len(args) > 0 { - f, remainingArgs, err := cmdflag.ParseOne(&CmdVet.Flag, args) + f, remainingArgs, err := cmdflag.ParseOne(&cmd.Flag, args) if errors.Is(err, flag.ErrHelp) { - exitWithUsage() + exitWithUsage(cmd) } if errors.Is(err, cmdflag.ErrFlagTerminator) { @@ -151,12 +169,12 @@ func vetFlags(args []string) (passToVet, packageNames []string) { if err != nil { fmt.Fprintln(os.Stderr, err) - exitWithUsage() + exitWithUsage(cmd) } - if isVetFlag[f.Name] { + if isToolFlag[f.Name] { // Forward the raw arguments rather than cleaned equivalents, just in - // case the vet tool parses them idiosyncratically. + // case the tool parses them idiosyncratically. explicitFlags = append(explicitFlags, args[:len(args)-len(remainingArgs)]...) // This flag has been overridden explicitly, so don't forward its implicit @@ -168,26 +186,26 @@ func vetFlags(args []string) (passToVet, packageNames []string) { } // Prepend arguments from GOFLAGS before other arguments. - CmdVet.Flag.Visit(func(f *flag.Flag) { + cmd.Flag.Visit(func(f *flag.Flag) { if addFromGOFLAGS[f.Name] { - passToVet = append(passToVet, fmt.Sprintf("-%s=%s", f.Name, f.Value)) + passToTool = append(passToTool, fmt.Sprintf("-%s=%s", f.Name, f.Value)) } }) - passToVet = append(passToVet, explicitFlags...) - return passToVet, packageNames + passToTool = append(passToTool, explicitFlags...) + return passToTool, packageNames } -func exitWithUsage() { - fmt.Fprintf(os.Stderr, "usage: %s\n", CmdVet.UsageLine) - fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", CmdVet.LongName()) +func exitWithUsage(cmd *base.Command) { + fmt.Fprintf(os.Stderr, "usage: %s\n", cmd.UsageLine) + fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", cmd.LongName()) // This part is additional to what (*Command).Usage does: - cmd := "go tool vet" - if vetTool != "" { - cmd = vetTool + tool := toolFlag + if tool == "" { + tool = "go tool " + cmd.Name() } - fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", cmd) - fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", cmd) + fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", tool) + fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", tool) base.SetExitStatus(2) base.Exit() diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go index 88c24b11acc172..584c1ac6f41d23 100644 --- a/src/cmd/go/internal/work/buildid.go +++ b/src/cmd/go/internal/work/buildid.go @@ -148,9 +148,10 @@ func (b *Builder) toolID(name string) string { path := base.Tool(name) desc := "go tool " + name - // Special case: undocumented -vettool overrides usual vet, - // for testing vet or supplying an alternative analysis tool. - if name == "vet" && VetTool != "" { + // Special case: -{vet,fix}tool overrides usual cmd/{vet,fix} + // for testing or supplying an alternative analysis tool. + // (We use only "vet" terminology in the action graph.) + if name == "vet" { path = VetTool desc = VetTool } diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index 72b9177c9dbbeb..fa6ddce24bed3c 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -1265,7 +1265,8 @@ func buildVetConfig(a *Action, srcfiles []string) { } } -// VetTool is the path to an alternate vet tool binary. +// VetTool is the path to the effective vet or fix tool binary. +// The user may specify a non-default value using -{vet,fix}tool. // The caller is expected to set it (if needed) before executing any vet actions. var VetTool string @@ -1273,7 +1274,13 @@ var VetTool string // The caller is expected to set them before executing any vet actions. var VetFlags []string -// VetExplicit records whether the vet flags were set explicitly on the command line. +// VetHandleStdout determines how the stdout output of each vet tool +// invocation should be handled. The default behavior is to copy it to +// the go command's stdout, atomically. +var VetHandleStdout = copyToStdout + +// VetExplicit records whether the vet flags (which may include +// -{vet,fix}tool) were set explicitly on the command line. var VetExplicit bool func (b *Builder) vet(ctx context.Context, a *Action) error { @@ -1296,6 +1303,7 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { sh := b.Shell(a) + // We use "vet" terminology even when building action graphs for go fix. vcfg.VetxOnly = a.VetxOnly vcfg.VetxOutput = a.Objdir + "vet.out" vcfg.Stdout = a.Objdir + "vet.stdout" @@ -1322,7 +1330,7 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { // dependency tree turn on *more* analysis, as here. // (The unsafeptr check does not write any facts for use by // later vet runs, nor does unreachable.) - if a.Package.Goroot && !VetExplicit && VetTool == "" { + if a.Package.Goroot && !VetExplicit && VetTool == base.Tool("vet") { // Turn off -unsafeptr checks. // There's too much unsafe.Pointer code // that vet doesn't like in low-level packages @@ -1359,13 +1367,29 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { vcfg.PackageVetx[a1.Package.ImportPath] = a1.built } } - key := cache.ActionID(h.Sum()) + vetxKey := cache.ActionID(h.Sum()) // for .vetx file + + fmt.Fprintf(h, "stdout\n") + stdoutKey := cache.ActionID(h.Sum()) // for .stdout file - if vcfg.VetxOnly && !cfg.BuildA { + // Check the cache; -a forces a rebuild. + if !cfg.BuildA { c := cache.Default() - if file, _, err := cache.GetFile(c, key); err == nil { - a.built = file - return nil + if vcfg.VetxOnly { + if file, _, err := cache.GetFile(c, vetxKey); err == nil { + a.built = file + return nil + } + } else { + // Copy cached vet.std files to stdout. + if file, _, err := cache.GetFile(c, stdoutKey); err == nil { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() // ignore error (can't fail) + return VetHandleStdout(f) + } } } @@ -1387,31 +1411,46 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { p := a.Package tool := VetTool if tool == "" { - tool = base.Tool("vet") + panic("VetTool unset") + } + + if err := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg"); err != nil { + return err } - runErr := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg") - // If vet wrote export data, save it for input to future vets. + // Vet tool succeeded, possibly with facts and JSON stdout. Save both in cache. + + // Save facts if f, err := os.Open(vcfg.VetxOutput); err == nil { + defer f.Close() // ignore error a.built = vcfg.VetxOutput - cache.Default().Put(key, f) // ignore error - f.Close() // ignore error + cache.Default().Put(vetxKey, f) // ignore error } - // If vet wrote to stdout, copy it to go's stdout, atomically. + // Save stdout. if f, err := os.Open(vcfg.Stdout); err == nil { - stdoutMu.Lock() - if _, err := io.Copy(os.Stdout, f); err != nil && runErr == nil { - runErr = fmt.Errorf("copying vet tool stdout: %w", err) + defer f.Close() // ignore error + if err := VetHandleStdout(f); err != nil { + return err } - f.Close() // ignore error - stdoutMu.Unlock() + f.Seek(0, io.SeekStart) // ignore error + cache.Default().Put(stdoutKey, f) // ignore error } - return runErr + return nil } -var stdoutMu sync.Mutex // serializes concurrent writes (e.g. JSON values) to stdout +var stdoutMu sync.Mutex // serializes concurrent writes (of e.g. JSON values) to stdout + +// copyToStdout copies the stream to stdout while holding the lock. +func copyToStdout(r io.Reader) error { + stdoutMu.Lock() + defer stdoutMu.Unlock() + if _, err := io.Copy(os.Stdout, r); err != nil { + return fmt.Errorf("copying vet tool stdout: %w", err) + } + return nil +} // linkActionID computes the action ID for a link action. func (b *Builder) linkActionID(a *Action) cache.ActionID { diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go index e81969ca4a3144..8cdfd9196e4cb1 100644 --- a/src/cmd/go/main.go +++ b/src/cmd/go/main.go @@ -24,7 +24,6 @@ import ( "cmd/go/internal/clean" "cmd/go/internal/doc" "cmd/go/internal/envcmd" - "cmd/go/internal/fix" "cmd/go/internal/fmtcmd" "cmd/go/internal/generate" "cmd/go/internal/help" @@ -55,7 +54,7 @@ func init() { clean.CmdClean, doc.CmdDoc, envcmd.CmdEnv, - fix.CmdFix, + vet.CmdFix, fmtcmd.CmdFmt, generate.CmdGenerate, modget.CmdGet, diff --git a/src/cmd/go/testdata/script/chdir.txt b/src/cmd/go/testdata/script/chdir.txt index a6feed6b45fce0..41def410d5fa37 100644 --- a/src/cmd/go/testdata/script/chdir.txt +++ b/src/cmd/go/testdata/script/chdir.txt @@ -27,6 +27,10 @@ stderr 'strings\.test' go vet -C ../strings -n stderr strings_test +# go fix +go fix -C ../strings -n +stderr strings_test + # -C must be first on command line (as of Go 1.21) ! go test -n -C ../strings stderr '^invalid value "../strings" for flag -C: -C flag must be first flag on command line$' diff --git a/src/cmd/go/testdata/script/vet_asm.txt b/src/cmd/go/testdata/script/vet_asm.txt index 8aa69ce1a3c999..c046773a06c0dc 100644 --- a/src/cmd/go/testdata/script/vet_asm.txt +++ b/src/cmd/go/testdata/script/vet_asm.txt @@ -1,12 +1,12 @@ -env GO111MODULE=off - # Issue 27665. Verify that "go vet" analyzes non-Go files. -[!GOARCH:amd64] skip +env GO111MODULE=off +env GOARCH=amd64 + ! go vet -asmdecl a stderr 'f: invalid MOVW of x' -# -c flag shows context +# -c=n flag shows n lines of context ! go vet -c=2 -asmdecl a stderr '...invalid MOVW...' stderr '1 .*TEXT' diff --git a/src/cmd/go/testdata/script/vet_basic.txt b/src/cmd/go/testdata/script/vet_basic.txt new file mode 100644 index 00000000000000..5ae66438ea3d81 --- /dev/null +++ b/src/cmd/go/testdata/script/vet_basic.txt @@ -0,0 +1,125 @@ +# Test basic features of "go vet"/"go fix" CLI. +# +# The example relies on two analyzers: +# - hostport (which is included in both the fix and vet suites), and +# - printf (which is only in the vet suite). +# Each reports one diagnostic with a fix. + +# vet default flags print diagnostics to stderr. Diagnostic => nonzero exit. +! go vet example.com/x +stderr 'does not work with IPv6' +stderr 'non-constant format string in call to fmt.Sprintf' + +# -hostport runs only one analyzer. Diagnostic => failure. +! go vet -hostport example.com/x +stderr 'does not work with IPv6' +! stderr 'non-constant format string' + +# -timeformat runs only one analyzer. No diagnostics => success. +go vet -timeformat example.com/x +! stderr . + +# JSON output includes diagnostics and fixes. Always success. +go vet -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' + +# vet -fix -diff displays a diff. +go vet -fix -diff example.com/x +stdout '\-var _ = fmt.Sprintf\(s\)' +stdout '\+var _ = fmt.Sprintf\("%s", s\)' +stdout '\-var _, _ = net.Dial\("tcp", fmt.Sprintf\("%s:%d", s, 80\)\)' +stdout '\+var _, _ = net.Dial\("tcp", net.JoinHostPort\(s, "80"\)\)' + +# vet -fix quietly applies the vet suite fixes. +cp x.go x.go.bak +go vet -fix example.com/x +grep 'fmt.Sprintf\("%s", s\)' x.go +grep 'net.JoinHostPort' x.go +! stderr . +cp x.go.bak x.go + +! go vet -diff example.com/x +stderr 'go vet -diff flag requires -fix' + +# go fix applies the fix suite fixes. +go fix example.com/x +grep 'net.JoinHostPort' x.go +! grep 'fmt.Sprintf\("%s", s\)' x.go +! stderr . +cp x.go.bak x.go + +# Show diff of fixes from the fix suite. +go fix -diff example.com/x +! stdout '\-var _ = fmt.Sprintf\(s\)' +stdout '\-var _, _ = net.Dial\("tcp", fmt.Sprintf\("%s:%d", s, 80\)\)' +stdout '\+var _, _ = net.Dial\("tcp", net.JoinHostPort\(s, "80"\)\)' + +# Show fix-suite fixes in JSON form. +go fix -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' +! stdout '"printf":' +! stdout '"message": "non-constant format string.*",' +! stdout '"message": "Insert.*%s.*format.string",' + +# Show vet-suite fixes in JSON form. +go vet -fix -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' +stdout '"printf":' +stdout '"message": "non-constant format string.*",' +stdout '"suggested_fixes":' +stdout '"message": "Insert.*%s.*format.string",' + +# Reject -diff + -json. +! go fix -diff -json example.com/x +stderr '-json and -diff cannot be used together' + +# Legacy way of selecting fixers is a no-op. +go fix -fix=old1,old2 example.com/x +stderr 'go fix: the -fix=old1,old2 flag is obsolete and has no effect' + +# -c=n flag shows n lines of context. +! go vet -c=2 -printf example.com/x +stderr 'x.go:12:21: non-constant format string in call to fmt.Sprintf' +! stderr '9 ' +stderr '10 ' +stderr '11 // This call...' +stderr '12 var _ = fmt.Sprintf\(s\)' +stderr '13 ' +stderr '14 ' +! stderr '15 ' + +-- go.mod -- +module example.com/x +go 1.25 + +-- x.go -- +package x + + +import ( + "fmt" + "net" +) + +var s string + +// This call yields a "non-constant format string" diagnostic, with a fix (go vet only). +var _ = fmt.Sprintf(s) + +// This call yields a hostport diagnostic, with a fix (go vet and go fix). +var _, _ = net.Dial("tcp", fmt.Sprintf("%s:%d", s, 80)) diff --git a/src/cmd/go/testdata/script/vet_cache.txt b/src/cmd/go/testdata/script/vet_cache.txt new file mode 100644 index 00000000000000..c84844000a43d0 --- /dev/null +++ b/src/cmd/go/testdata/script/vet_cache.txt @@ -0,0 +1,24 @@ +# Test that go vet's caching of vet tool actions replays +# the recorded stderr output even after a cache hit. + +# Set up fresh GOCACHE. +env GOCACHE=$WORK/gocache + +# First time is a cache miss. +! go vet example.com/a +stderr 'fmt.Sprint call has possible Printf formatting directive' + +# Second time is assumed to be a cache hit for the stdout JSON, +# but we don't bother to assert it. Same diagnostics again. +! go vet example.com/a +stderr 'fmt.Sprint call has possible Printf formatting directive' + +-- go.mod -- +module example.com + +-- a/a.go -- +package a + +import "fmt" + +var _ = fmt.Sprint("%s") // oops! diff --git a/src/cmd/vet/doc.go b/src/cmd/vet/doc.go index 8e72c252ed9984..ca208845615c8b 100644 --- a/src/cmd/vet/doc.go +++ b/src/cmd/vet/doc.go @@ -40,6 +40,7 @@ To list the available checks, run "go tool vet help": directive check Go toolchain directives such as //go:debug errorsas report passing non-pointer or non-error values to errors.As framepointer report assembly that clobbers the frame pointer before saving it + hostport check format of addresses passed to net.Dial httpresponse check for mistakes using HTTP responses ifaceassert detect impossible interface-to-interface type assertions loopclosure check references to loop variables from within nested functions @@ -50,6 +51,7 @@ To list the available checks, run "go tool vet help": sigchanyzer check for unbuffered channel of os.Signal slog check for invalid structured logging calls stdmethods check signature of methods of well-known interfaces + stdversion report uses of too-new standard library symbols stringintconv check for string(int) conversions structtag check that struct field tags conform to reflect.StructTag.Get testinggoroutine report calls to (*testing.T).Fatal from goroutines started by a test diff --git a/src/cmd/vet/main.go b/src/cmd/vet/main.go index 49f4e2f3425694..e7164a46b0a323 100644 --- a/src/cmd/vet/main.go +++ b/src/cmd/vet/main.go @@ -7,10 +7,8 @@ package main import ( "cmd/internal/objabi" "cmd/internal/telemetry/counter" - "flag" - - "golang.org/x/tools/go/analysis/unitchecker" + "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/appends" "golang.org/x/tools/go/analysis/passes/asmdecl" "golang.org/x/tools/go/analysis/passes/assign" @@ -46,52 +44,57 @@ import ( "golang.org/x/tools/go/analysis/passes/unsafeptr" "golang.org/x/tools/go/analysis/passes/unusedresult" "golang.org/x/tools/go/analysis/passes/waitgroup" + "golang.org/x/tools/go/analysis/unitchecker" ) func main() { + // Keep consistent with cmd/fix/main.go! counter.Open() objabi.AddVersionFlag() - counter.Inc("vet/invocations") - unitchecker.Main( - appends.Analyzer, - asmdecl.Analyzer, - assign.Analyzer, - atomic.Analyzer, - bools.Analyzer, - buildtag.Analyzer, - cgocall.Analyzer, - composite.Analyzer, - copylock.Analyzer, - defers.Analyzer, - directive.Analyzer, - errorsas.Analyzer, - framepointer.Analyzer, - httpresponse.Analyzer, - hostport.Analyzer, - ifaceassert.Analyzer, - loopclosure.Analyzer, - lostcancel.Analyzer, - nilfunc.Analyzer, - printf.Analyzer, - shift.Analyzer, - sigchanyzer.Analyzer, - slog.Analyzer, - stdmethods.Analyzer, - stdversion.Analyzer, - stringintconv.Analyzer, - structtag.Analyzer, - tests.Analyzer, - testinggoroutine.Analyzer, - timeformat.Analyzer, - unmarshal.Analyzer, - unreachable.Analyzer, - unsafeptr.Analyzer, - unusedresult.Analyzer, - waitgroup.Analyzer, - ) - // It's possible that unitchecker will exit early. In - // those cases the flags won't be counted. - counter.CountFlags("vet/flag:", *flag.CommandLine) + unitchecker.Main(suite...) // (never returns) +} + +// The vet suite analyzers report diagnostics. +// (Diagnostics must describe real problems, but need not +// suggest fixes, and fixes are not necessarily safe to apply.) +var suite = []*analysis.Analyzer{ + appends.Analyzer, + asmdecl.Analyzer, + assign.Analyzer, + atomic.Analyzer, + bools.Analyzer, + buildtag.Analyzer, + cgocall.Analyzer, + composite.Analyzer, + copylock.Analyzer, + defers.Analyzer, + directive.Analyzer, + errorsas.Analyzer, + // fieldalignment.Analyzer omitted: too noisy + framepointer.Analyzer, + httpresponse.Analyzer, + hostport.Analyzer, + ifaceassert.Analyzer, + loopclosure.Analyzer, + lostcancel.Analyzer, + nilfunc.Analyzer, + printf.Analyzer, + // shadow.Analyzer omitted: too noisy + shift.Analyzer, + sigchanyzer.Analyzer, + slog.Analyzer, + stdmethods.Analyzer, + stdversion.Analyzer, + stringintconv.Analyzer, + structtag.Analyzer, + tests.Analyzer, + testinggoroutine.Analyzer, + timeformat.Analyzer, + unmarshal.Analyzer, + unreachable.Analyzer, + unsafeptr.Analyzer, + unusedresult.Analyzer, + waitgroup.Analyzer, } diff --git a/src/cmd/vet/testdata/print/print.go b/src/cmd/vet/testdata/print/print.go index e00222c42b5aef..3761da420bea3c 100644 --- a/src/cmd/vet/testdata/print/print.go +++ b/src/cmd/vet/testdata/print/print.go @@ -162,7 +162,7 @@ func PrintfTests() { Printf("hi") // ok const format = "%s %s\n" Printf(format, "hi", "there") - Printf(format, "hi") // ERROR "Printf format %s reads arg #2, but call has 1 arg$" + Printf(format, "hi") // ERROR "Printf format %s reads arg #2, but call has 1 arg" Printf("%s %d %.3v %q", "str", 4) // ERROR "Printf format %.3v reads arg #3, but call has 2 args" f := new(ptrStringer) f.Warn(0, "%s", "hello", 3) // ERROR "Warn call has possible Printf formatting directive %s" diff --git a/src/cmd/vet/vet_test.go b/src/cmd/vet/vet_test.go index 54eabca938c3a9..0d509de528f247 100644 --- a/src/cmd/vet/vet_test.go +++ b/src/cmd/vet/vet_test.go @@ -28,7 +28,8 @@ func TestMain(m *testing.M) { os.Exit(0) } - os.Setenv("GO_VETTEST_IS_VET", "1") // Set for subprocesses to inherit. + // Set for subprocesses to inherit. + os.Setenv("GO_VETTEST_IS_VET", "1") // ignore error os.Exit(m.Run()) } @@ -115,7 +116,7 @@ func TestVet(t *testing.T) { cmd.Env = append(os.Environ(), "GOWORK=off") cmd.Dir = "testdata/rangeloop" cmd.Stderr = new(strings.Builder) // all vet output goes to stderr - cmd.Run() + cmd.Run() // ignore error stderr := cmd.Stderr.(fmt.Stringer).String() filename := filepath.FromSlash("testdata/rangeloop/rangeloop.go") @@ -134,7 +135,7 @@ func TestVet(t *testing.T) { if err := errorCheck(stderr, false, filename, filepath.Base(filename)); err != nil { t.Errorf("error check failed: %s", err) - t.Log("vet stderr:\n", cmd.Stderr) + t.Logf("vet stderr:\n<<%s>>", cmd.Stderr) } }) @@ -146,7 +147,7 @@ func TestVet(t *testing.T) { cmd.Env = append(os.Environ(), "GOWORK=off") cmd.Dir = "testdata/stdversion" cmd.Stderr = new(strings.Builder) // all vet output goes to stderr - cmd.Run() + cmd.Run() // ignore error stderr := cmd.Stderr.(fmt.Stringer).String() filename := filepath.FromSlash("testdata/stdversion/stdversion.go") @@ -165,7 +166,7 @@ func TestVet(t *testing.T) { if err := errorCheck(stderr, false, filename, filepath.Base(filename)); err != nil { t.Errorf("error check failed: %s", err) - t.Log("vet stderr:\n", cmd.Stderr) + t.Logf("vet stderr:\n<<%s>>", cmd.Stderr) } }) } @@ -184,7 +185,7 @@ func cgoEnabled(t *testing.T) bool { func errchk(c *exec.Cmd, files []string, t *testing.T) { output, err := c.CombinedOutput() if _, ok := err.(*exec.ExitError); !ok { - t.Logf("vet output:\n%s", output) + t.Logf("vet output:\n<<%s>>", output) t.Fatal(err) } fullshort := make([]string, 0, len(files)*2) @@ -205,7 +206,6 @@ func TestTags(t *testing.T) { "x testtag y": 1, "othertag": 2, } { - tag, wantFile := tag, wantFile t.Run(tag, func(t *testing.T) { t.Parallel() t.Logf("-tags=%s", tag) @@ -266,7 +266,7 @@ func errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) { errmsgs, out = partitionStrings(we.prefix, out) } if len(errmsgs) == 0 { - errs = append(errs, fmt.Errorf("%s:%d: missing error %q", we.file, we.lineNum, we.reStr)) + errs = append(errs, fmt.Errorf("%s:%d: missing error %q (prefix: %s)", we.file, we.lineNum, we.reStr, we.prefix)) continue } matched := false From 69e82796322bbff60b522534a8a6eacb2bf1ebba Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Wed, 8 Oct 2025 22:33:24 +0100 Subject: [PATCH 118/152] net/http: set cookie host to Request.Host when available When both Request.URL and Request.Host are set, the host in URL is used for connecting at the transport level, while Host is used for the request host line. Cookies should be set for the request, not the underlying connection destination. Fixes #38988 Change-Id: I09053b87ccac67081f6038d205837d9763701526 Reviewed-on: https://go-review.googlesource.com/c/go/+/710335 Reviewed-by: Damien Neil Auto-Submit: Damien Neil Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI --- src/net/http/client.go | 9 +++++++-- src/net/http/client_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/src/net/http/client.go b/src/net/http/client.go index ba095ea1e34e5c..8faab2b17af01a 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -172,8 +172,13 @@ func refererForURL(lastReq, newReq *url.URL, explicitRef string) string { // didTimeout is non-nil only if err != nil. func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTimeout func() bool, err error) { + cookieURL := req.URL + if req.Host != "" { + cookieURL = cloneURL(cookieURL) + cookieURL.Host = req.Host + } if c.Jar != nil { - for _, cookie := range c.Jar.Cookies(req.URL) { + for _, cookie := range c.Jar.Cookies(cookieURL) { req.AddCookie(cookie) } } @@ -183,7 +188,7 @@ func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTime } if c.Jar != nil { if rc := resp.Cookies(); len(rc) > 0 { - c.Jar.SetCookies(req.URL, rc) + c.Jar.SetCookies(cookieURL, rc) } } return resp, nil, nil diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go index 94fddb508e0e38..2a3ee385f3c810 100644 --- a/src/net/http/client_test.go +++ b/src/net/http/client_test.go @@ -585,6 +585,36 @@ var echoCookiesRedirectHandler = HandlerFunc(func(w ResponseWriter, r *Request) } }) +func TestHostMismatchCookies(t *testing.T) { run(t, testHostMismatchCookies) } +func testHostMismatchCookies(t *testing.T, mode testMode) { + ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { + for _, c := range r.Cookies() { + c.Value = "SetOnServer" + SetCookie(w, c) + } + })).ts + + reqURL, _ := url.Parse(ts.URL) + hostURL := *reqURL + hostURL.Host = "cookies.example.com" + + c := ts.Client() + c.Jar = new(TestJar) + c.Jar.SetCookies(reqURL, []*Cookie{{Name: "First", Value: "SetOnClient"}}) + c.Jar.SetCookies(&hostURL, []*Cookie{{Name: "Second", Value: "SetOnClient"}}) + + req, _ := NewRequest("GET", ts.URL, NoBody) + req.Host = hostURL.Host + resp, err := c.Do(req) + if err != nil { + t.Fatalf("Get: %v", err) + } + resp.Body.Close() + + matchReturnedCookies(t, []*Cookie{{Name: "First", Value: "SetOnClient"}}, c.Jar.Cookies(reqURL)) + matchReturnedCookies(t, []*Cookie{{Name: "Second", Value: "SetOnServer"}}, c.Jar.Cookies(&hostURL)) +} + func TestClientSendsCookieFromJar(t *testing.T) { defer afterTest(t) tr := &recordingTransport{} From 584a89fe7455470b09643b30bdc3cc55bb75a552 Mon Sep 17 00:00:00 2001 From: cuishuang Date: Fri, 10 Oct 2025 11:47:13 +0800 Subject: [PATCH 119/152] all: omit unnecessary reassignment The new version of Go has been optimized, and variables do not need to be reassigned. Change-Id: I0374b049271e53510f2b162f6821fb3595f2c8ff Reviewed-on: https://go-review.googlesource.com/c/go/+/710835 Auto-Submit: Michael Pratt LUCI-TryBot-Result: Go LUCI Reviewed-by: Alan Donovan Reviewed-by: Michael Pratt --- src/encoding/base32/base32_test.go | 1 - src/encoding/xml/marshal_test.go | 1 - src/io/fs/readdir_test.go | 1 - src/testing/iotest/reader_test.go | 1 - 4 files changed, 4 deletions(-) diff --git a/src/encoding/base32/base32_test.go b/src/encoding/base32/base32_test.go index f5d3c49e38fbef..6f8d564def3c86 100644 --- a/src/encoding/base32/base32_test.go +++ b/src/encoding/base32/base32_test.go @@ -709,7 +709,6 @@ func TestBufferedDecodingPadding(t *testing.T) { } for _, testcase := range testcases { - testcase := testcase pr, pw := io.Pipe() go func() { for _, chunk := range testcase.chunks { diff --git a/src/encoding/xml/marshal_test.go b/src/encoding/xml/marshal_test.go index b8bce7170a60b6..6c7e711aac0566 100644 --- a/src/encoding/xml/marshal_test.go +++ b/src/encoding/xml/marshal_test.go @@ -2561,7 +2561,6 @@ var closeTests = []struct { func TestClose(t *testing.T) { for _, tt := range closeTests { - tt := tt t.Run(tt.desc, func(t *testing.T) { var out strings.Builder enc := NewEncoder(&out) diff --git a/src/io/fs/readdir_test.go b/src/io/fs/readdir_test.go index 4c409ae7a010e2..b89706b893afd2 100644 --- a/src/io/fs/readdir_test.go +++ b/src/io/fs/readdir_test.go @@ -72,7 +72,6 @@ func TestFileInfoToDirEntry(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.path, func(t *testing.T) { fi, err := Stat(testFs, test.path) if err != nil { diff --git a/src/testing/iotest/reader_test.go b/src/testing/iotest/reader_test.go index 1d222372caf384..cecfbfce4920c4 100644 --- a/src/testing/iotest/reader_test.go +++ b/src/testing/iotest/reader_test.go @@ -238,7 +238,6 @@ func TestErrReader(t *testing.T) { } for _, tt := range cases { - tt := tt t.Run(tt.name, func(t *testing.T) { n, err := ErrReader(tt.err).Read(nil) if err != tt.err { From aced4c79a2b2c60e464410cec1e5378d1011fa18 Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Thu, 9 Oct 2025 02:26:02 +0100 Subject: [PATCH 120/152] net/http: strip request body headers on POST to GET redirects According to WHATWG Fetch, when the body is dropped in a redirect, headers that describe the body should also be dropped. https://fetch.spec.whatwg.org/#http-redirect-fetch Fixes #57273 Change-Id: I84598f69608e95c1b556ea0ce5953ed43bf2d824 Reviewed-on: https://go-review.googlesource.com/c/go/+/710395 Auto-Submit: Damien Neil Reviewed-by: Michael Pratt Reviewed-by: Damien Neil LUCI-TryBot-Result: Go LUCI --- src/net/http/client.go | 18 +++++++++++++----- src/net/http/client_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/src/net/http/client.go b/src/net/http/client.go index 8faab2b17af01a..d6a801073553f7 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -690,8 +690,7 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { stripSensitiveHeaders = true } } - copyHeaders(req, stripSensitiveHeaders) - + copyHeaders(req, stripSensitiveHeaders, !includeBody) // Add the Referer header from the most recent // request URL to the new one, if it's not https->http: if ref := refererForURL(reqs[len(reqs)-1].URL, req.URL, req.Header.Get("Referer")); ref != "" { @@ -758,7 +757,7 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { // makeHeadersCopier makes a function that copies headers from the // initial Request, ireq. For every redirect, this function must be called // so that it can copy headers into the upcoming Request. -func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensitiveHeaders bool) { +func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensitiveHeaders, stripBodyHeaders bool) { // The headers to copy are from the very initial request. // We use a closured callback to keep a reference to these original headers. var ( @@ -772,7 +771,7 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensit } } - return func(req *Request, stripSensitiveHeaders bool) { + return func(req *Request, stripSensitiveHeaders, stripBodyHeaders bool) { // If Jar is present and there was some initial cookies provided // via the request header, then we may need to alter the initial // cookies as we follow redirects since each redirect may end up @@ -810,12 +809,21 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensit // (at least the safe ones). for k, vv := range ireqhdr { sensitive := false + body := false switch CanonicalHeaderKey(k) { case "Authorization", "Www-Authenticate", "Cookie", "Cookie2", "Proxy-Authorization", "Proxy-Authenticate": sensitive = true + + case "Content-Encoding", "Content-Language", "Content-Location", + "Content-Type": + // Headers relating to the body which is removed for + // POST to GET redirects + // https://fetch.spec.whatwg.org/#http-redirect-fetch + body = true + } - if !(sensitive && stripSensitiveHeaders) { + if !(sensitive && stripSensitiveHeaders) && !(body && stripBodyHeaders) { req.Header[k] = vv } } diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go index 2a3ee385f3c810..d184f720319ce4 100644 --- a/src/net/http/client_test.go +++ b/src/net/http/client_test.go @@ -1621,6 +1621,39 @@ func testClientStripHeadersOnRepeatedRedirect(t *testing.T, mode testMode) { } } +func TestClientStripHeadersOnPostToGetRedirect(t *testing.T) { + run(t, testClientStripHeadersOnPostToGetRedirect) +} +func testClientStripHeadersOnPostToGetRedirect(t *testing.T, mode testMode) { + ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method == "POST" { + Redirect(w, r, "/redirected", StatusFound) + return + } else if r.Method != "GET" { + t.Errorf("unexpected request method: %v", r.Method) + return + } + for key, val := range r.Header { + if strings.HasPrefix(key, "Content-") { + t.Errorf("unexpected request body header after redirect: %v: %v", key, val) + } + } + })).ts + + c := ts.Client() + + req, _ := NewRequest("POST", ts.URL, strings.NewReader("hello world")) + req.Header.Set("Content-Encoding", "a") + req.Header.Set("Content-Language", "b") + req.Header.Set("Content-Length", "c") + req.Header.Set("Content-Type", "d") + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() +} + // Issue 22233: copy host when Client follows a relative redirect. func TestClientCopyHostOnRedirect(t *testing.T) { run(t, testClientCopyHostOnRedirect) } func testClientCopyHostOnRedirect(t *testing.T, mode testMode) { From e3be2d1b2b68d960398a343805f77052d5decb22 Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Thu, 9 Oct 2025 01:56:09 +0100 Subject: [PATCH 121/152] net/url: disallow raw IPv6 addresses in host RFC 3986 requires square brackets around IPv6 addresses. Parse's acceptance of raw IPv6 addresses is non compliant, and complicates splitting out a port. Fixes #31024 Fixes #75223 Change-Id: I477dc420a7441cb33156627dbd5e46d88c677f1e Reviewed-on: https://go-review.googlesource.com/c/go/+/710176 LUCI-TryBot-Result: Go LUCI Reviewed-by: Damien Neil Reviewed-by: Michael Pratt --- src/net/url/url.go | 4 +++- src/net/url/url_test.go | 23 +++-------------------- 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/src/net/url/url.go b/src/net/url/url.go index 6afa30f162bd25..a69754880149a9 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -698,7 +698,9 @@ func parseHost(host string) (string, error) { return "", errors.New("invalid IP-literal") } return "[" + unescapedHostname + "]" + unescapedColonPort, nil - } else if i := strings.LastIndex(host, ":"); i != -1 { + } else if i := strings.Index(host, ":"); i != -1 { + // IPv4address / reg-name + // E.g. 1.2.3.4, 1.2.3.4:80, example.com, example.com:80 colonPort := host[i:] if !validOptionalPort(colonPort) { return "", fmt.Errorf("invalid port %q after host", colonPort) diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index 6084facacc0519..a7543d6fd40b1e 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -506,26 +506,6 @@ var urltests = []URLTest{ }, "", }, - { - // Malformed IPv6 but still accepted. - "http://2b01:e34:ef40:7730:8e70:5aff:fefe:edac:8080/foo", - &URL{ - Scheme: "http", - Host: "2b01:e34:ef40:7730:8e70:5aff:fefe:edac:8080", - Path: "/foo", - }, - "", - }, - { - // Malformed IPv6 but still accepted. - "http://2b01:e34:ef40:7730:8e70:5aff:fefe:edac:/foo", - &URL{ - Scheme: "http", - Host: "2b01:e34:ef40:7730:8e70:5aff:fefe:edac:", - Path: "/foo", - }, - "", - }, { "http://[2b01:e34:ef40:7730:8e70:5aff:fefe:edac]:8080/foo", &URL{ @@ -735,6 +715,9 @@ var parseRequestURLTests = []struct { {"https://[0:0::test.com]:80", false}, {"https://[2001:db8::test.com]", false}, {"https://[test.com]", false}, + {"https://1:2:3:4:5:6:7:8", false}, + {"https://1:2:3:4:5:6:7:8:80", false}, + {"https://example.com:80:", false}, } func TestParseRequestURI(t *testing.T) { From e8a53538b473f1a7a92602675eda2d34f3887611 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Thu, 9 Oct 2025 20:58:34 +0000 Subject: [PATCH 122/152] runtime: fail TestGoroutineLeakProfile on data race Some of the programs in testdata/testgoroutineleakprofile have data races because they were taken from a corpus that showcases general Go concurrency bugs, not just leaked goroutines. This causes some flakiness as tests might fail due to, for example, a concurrent map access, even outside of race mode. Let's just call data races a failure and fix them in the examples. As far as I can tell, there are only two that show up consistently. Fixes #75732. Change-Id: I160b3a1cdce4c2de3f2320b68b4083292e02b557 Reviewed-on: https://go-review.googlesource.com/c/go/+/710756 LUCI-TryBot-Result: Go LUCI Reviewed-by: Carlos Amedee Auto-Submit: Michael Knyszek Reviewed-by: Cherry Mui --- src/runtime/goroutineleakprofile_test.go | 10 +++---- .../testgoroutineleakprofile/goker/README.md | 30 +++++++++++-------- .../goker/cockroach1055.go | 4 +-- .../goker/moby27782.go | 7 +++++ 4 files changed, 31 insertions(+), 20 deletions(-) diff --git a/src/runtime/goroutineleakprofile_test.go b/src/runtime/goroutineleakprofile_test.go index 6e26bcab132831..f5d2dd6372e54e 100644 --- a/src/runtime/goroutineleakprofile_test.go +++ b/src/runtime/goroutineleakprofile_test.go @@ -487,7 +487,7 @@ func TestGoroutineLeakProfile(t *testing.T) { testCases = append(testCases, patternTestCases...) // Test cases must not panic or cause fatal exceptions. - failStates := regexp.MustCompile(`fatal|panic`) + failStates := regexp.MustCompile(`fatal|panic|DATA RACE`) testApp := func(exepath string, testCases []testCase) { @@ -520,9 +520,9 @@ func TestGoroutineLeakProfile(t *testing.T) { t.Errorf("Test %s produced no output. Is the goroutine leak profile collected?", tcase.name) } - // Zero tolerance policy for fatal exceptions or panics. + // Zero tolerance policy for fatal exceptions, panics, or data races. if failStates.MatchString(runOutput) { - t.Errorf("unexpected fatal exception or panic!\noutput:\n%s\n\n", runOutput) + t.Errorf("unexpected fatal exception or panic\noutput:\n%s\n\n", runOutput) } output += runOutput + "\n\n" @@ -540,7 +540,7 @@ func TestGoroutineLeakProfile(t *testing.T) { unexpectedLeaks := make([]string, 0, len(foundLeaks)) // Parse every leak and check if it is expected (maybe as a flaky leak). - LEAKS: + leaks: for _, leak := range foundLeaks { // Check if the leak is expected. // If it is, check whether it has been encountered before. @@ -569,7 +569,7 @@ func TestGoroutineLeakProfile(t *testing.T) { for flakyLeak := range tcase.flakyLeaks { if flakyLeak.MatchString(leak) { // The leak is flaky. Carry on to the next line. - continue LEAKS + continue leaks } } diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/README.md b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md index 88c50e1e480a08..e6f8fe23f26c02 100644 --- a/src/runtime/testdata/testgoroutineleakprofile/goker/README.md +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md @@ -24,18 +24,22 @@ Jingling Xue (jingling@cse.unsw.edu.au): White paper: https://lujie.ac.cn/files/papers/GoBench.pdf -The examples have been modified in order to run the goroutine leak -profiler. Buggy snippets are moved from within a unit test to separate -applications. Each is then independently executed, possibly as multiple -copies within the same application in order to exercise more interleavings. -Concurrently, the main program sets up a waiting period (typically 1ms), followed -by a goroutine leak profile request. Other modifications may involve injecting calls -to `runtime.Gosched()`, to more reliably exercise buggy interleavings, or reductions -in waiting periods when calling `time.Sleep`, in order to reduce overall testing time. - -The resulting goroutine leak profile is analyzed to ensure that no unexpected leaks occurred, -and that the expected leaks did occur. If the leak is flaky, the only purpose of the expected -leak list is to protect against unexpected leaks. +The examples have been modified in order to run the goroutine leak profiler. +Buggy snippets are moved from within a unit test to separate applications. +Each is then independently executed, possibly as multiple copies within the +same application in order to exercise more interleavings. Concurrently, the +main program sets up a waiting period (typically 1ms), followed by a goroutine +leak profile request. Other modifications may involve injecting calls to +`runtime.Gosched()`, to more reliably exercise buggy interleavings, or reductions +in waiting periods when calling `time.Sleep`, in order to reduce overall testing +time. + +The resulting goroutine leak profile is analyzed to ensure that no unexpecte +leaks occurred, and that the expected leaks did occur. If the leak is flaky, the +only purpose of the expected leak list is to protect against unexpected leaks. + +The examples have also been modified to remove data races, since those create flaky +test failures, when really all we care about are leaked goroutines. The entries below document each of the corresponding leaks. @@ -1844,4 +1848,4 @@ c.inbox <- <================> [<-c.inbox] . close(c.closed) . <-c.dispatcherLoopStopped ---------------------G1,G2 leak------------------------------- -``` \ No newline at end of file +``` diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go index 687baed25a2a44..87cf157996800e 100644 --- a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go @@ -44,9 +44,9 @@ func (s *Stopper_cockroach1055) SetStopped() { func (s *Stopper_cockroach1055) Quiesce() { s.mu.Lock() defer s.mu.Unlock() - s.draining = 1 + atomic.StoreInt32(&s.draining, 1) s.drain.Wait() - s.draining = 0 + atomic.StoreInt32(&s.draining, 0) } func (s *Stopper_cockroach1055) Stop() { diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go index 7b3398fd381210..9b53d9035ca1e1 100644 --- a/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go @@ -208,6 +208,7 @@ func (container *Container_moby27782) Reset() { } type JSONFileLogger_moby27782 struct { + mu sync.Mutex readers map[*LogWatcher_moby27782]struct{} } @@ -218,11 +219,17 @@ func (l *JSONFileLogger_moby27782) ReadLogs() *LogWatcher_moby27782 { } func (l *JSONFileLogger_moby27782) readLogs(logWatcher *LogWatcher_moby27782) { + l.mu.Lock() + defer l.mu.Unlock() + l.readers[logWatcher] = struct{}{} followLogs_moby27782(logWatcher) } func (l *JSONFileLogger_moby27782) Close() { + l.mu.Lock() + defer l.mu.Unlock() + for r := range l.readers { r.Close() delete(l.readers, r) From 48bb7a61147c397d0f45c10bc21ba12fa9cec0ad Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 10 Oct 2025 17:08:20 -0400 Subject: [PATCH 123/152] cmd/compile: repair bisection behavior for float-to-unsigned conversion My stab at a bisect-reproducer failed, but I verified that it fixed the problem described in the issue. Updates #75834 Change-Id: I9e0dfacd2bbd22cbc557e144920ee3417a48088c Reviewed-on: https://go-review.googlesource.com/c/go/+/710997 LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/ssagen/ssa.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 57cd9084821237..e2ef33274577cc 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2865,7 +2865,19 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value { } if ft.IsFloat() || tt.IsFloat() { - conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] + cft, ctt := s.concreteEtype(ft), s.concreteEtype(tt) + conv, ok := fpConvOpToSSA[twoTypes{cft, ctt}] + // there's a change to a conversion-op table, this restores the old behavior if ConvertHash is false. + // use salted hash to distinguish unsigned convert at a Pos from signed convert at a Pos + if ctt == types.TUINT32 && ft.IsFloat() && !base.ConvertHash.MatchPosWithInfo(n.Pos(), "U", nil) { + // revert to old behavior + conv.op1 = ssa.OpCvt64Fto64 + if cft == types.TFLOAT32 { + conv.op1 = ssa.OpCvt32Fto64 + } + conv.op2 = ssa.OpTrunc64to32 + + } if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat { if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { conv = conv1 @@ -5862,6 +5874,7 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt * // cutoff:=1<<(intY_Size-1) // if x < floatX(cutoff) { // result = uintY(x) // bThen + // // gated by ConvertHash, clamp negative inputs to zero // if x < 0 { // unlikely // result = 0 // bZero // } @@ -5879,7 +5892,8 @@ func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt * b.Likely = ssa.BranchLikely var bThen, bZero *ssa.Block - newConversion := base.ConvertHash.MatchPos(n.Pos(), nil) + // use salted hash to distinguish unsigned convert at a Pos from signed convert at a Pos + newConversion := base.ConvertHash.MatchPosWithInfo(n.Pos(), "U", nil) if newConversion { bZero = s.f.NewBlock(ssa.BlockPlain) bThen = s.f.NewBlock(ssa.BlockIf) From b497a29d25b0f6f29bedaa92ac1d40a1ee5c0956 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Mon, 6 Oct 2025 12:56:29 -0700 Subject: [PATCH 124/152] encoding/json: fix regression in quoted numbers under goexperiment.jsonv2 The legacy parsing of quoted numbers in v1 was according to the Go grammar for a number, rather than the JSON grammar for a number. The former is a superset of the latter. This is a historical mistake, but usages exist that depend on it. We already have branches for StringifyWithLegacySemantics to handle quoted nulls, so we can expand it to handle this. Fixes #75619 Change-Id: Ic07802539b7cbe0e1f53bd0f7e9bb344a8447203 Reviewed-on: https://go-review.googlesource.com/c/go/+/709615 Reviewed-by: Damien Neil Auto-Submit: Joseph Tsai LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt --- src/encoding/json/decode_test.go | 56 ++++++++++++++++++++++++++ src/encoding/json/v2/arshal_default.go | 51 ++++++++++++++++++----- src/encoding/json/v2_decode_test.go | 56 ++++++++++++++++++++++++++ src/encoding/json/v2_options.go | 4 +- 4 files changed, 157 insertions(+), 10 deletions(-) diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go index d12495f90b7141..0b26b8eb91813b 100644 --- a/src/encoding/json/decode_test.go +++ b/src/encoding/json/decode_test.go @@ -1237,6 +1237,62 @@ var unmarshalTests = []struct { out: (chan int)(nil), err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[chan int](), Offset: 1}, }, + + // #75619 + { + CaseName: Name("QuotedInt/GoSyntax"), + in: `{"X": "-0000123"}`, + ptr: new(struct { + X int64 `json:",string"` + }), + out: struct { + X int64 `json:",string"` + }{-123}, + }, + { + CaseName: Name("QuotedInt/Invalid"), + in: `{"X": "123 "}`, + ptr: new(struct { + X int64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 123 ", Type: reflect.TypeFor[int64](), Field: "X", Offset: int64(len(`{"X": "123 "`))}, + }, + { + CaseName: Name("QuotedUint/GoSyntax"), + in: `{"X": "0000123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + out: struct { + X uint64 `json:",string"` + }{123}, + }, + { + CaseName: Name("QuotedUint/Invalid"), + in: `{"X": "0x123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 0x123", Type: reflect.TypeFor[uint64](), Field: "X", Offset: int64(len(`{"X": "0x123"`))}, + }, + { + CaseName: Name("QuotedFloat/GoSyntax"), + in: `{"X": "0x1_4p-2"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + out: struct { + X float64 `json:",string"` + }{0x1_4p-2}, + }, + { + CaseName: Name("QuotedFloat/Invalid"), + in: `{"X": "1.5e1_"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 1.5e1_", Type: reflect.TypeFor[float64](), Field: "X", Offset: int64(len(`{"X": "1.5e1_"`))}, + }, } func TestMarshal(t *testing.T) { diff --git a/src/encoding/json/v2/arshal_default.go b/src/encoding/json/v2/arshal_default.go index c2307fa31d7fcc..078d345e1439c2 100644 --- a/src/encoding/json/v2/arshal_default.go +++ b/src/encoding/json/v2/arshal_default.go @@ -474,10 +474,21 @@ func makeIntArshaler(t reflect.Type) *arshaler { break } val = jsonwire.UnquoteMayCopy(val, flags.IsVerbatim()) - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetInt(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseInt(string(val), 10, bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetInt(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetInt(n) return nil } fallthrough @@ -561,10 +572,21 @@ func makeUintArshaler(t reflect.Type) *arshaler { break } val = jsonwire.UnquoteMayCopy(val, flags.IsVerbatim()) - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetUint(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseUint(string(val), 10, bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetUint(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetUint(n) return nil } fallthrough @@ -671,10 +693,21 @@ func makeFloatArshaler(t reflect.Type) *arshaler { if !stringify { break } - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetFloat(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseFloat(string(val), bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetFloat(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetFloat(n) return nil } if n, err := jsonwire.ConsumeNumber(val); n != len(val) || err != nil { diff --git a/src/encoding/json/v2_decode_test.go b/src/encoding/json/v2_decode_test.go index f9b0a60f47cfd5..28c57ec8bf5c73 100644 --- a/src/encoding/json/v2_decode_test.go +++ b/src/encoding/json/v2_decode_test.go @@ -1243,6 +1243,62 @@ var unmarshalTests = []struct { out: (chan int)(nil), err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[chan int]()}, }, + + // #75619 + { + CaseName: Name("QuotedInt/GoSyntax"), + in: `{"X": "-0000123"}`, + ptr: new(struct { + X int64 `json:",string"` + }), + out: struct { + X int64 `json:",string"` + }{-123}, + }, + { + CaseName: Name("QuotedInt/Invalid"), + in: `{"X": "123 "}`, + ptr: new(struct { + X int64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 123 ", Type: reflect.TypeFor[int64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, + { + CaseName: Name("QuotedUint/GoSyntax"), + in: `{"X": "0000123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + out: struct { + X uint64 `json:",string"` + }{123}, + }, + { + CaseName: Name("QuotedUint/Invalid"), + in: `{"X": "0x123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 0x123", Type: reflect.TypeFor[uint64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, + { + CaseName: Name("QuotedFloat/GoSyntax"), + in: `{"X": "0x1_4p-2"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + out: struct { + X float64 `json:",string"` + }{0x1_4p-2}, + }, + { + CaseName: Name("QuotedFloat/Invalid"), + in: `{"X": "1.5e1_"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 1.5e1_", Type: reflect.TypeFor[float64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, } func TestMarshal(t *testing.T) { diff --git a/src/encoding/json/v2_options.go b/src/encoding/json/v2_options.go index 4dea88ad7edaf6..819fe59f412c68 100644 --- a/src/encoding/json/v2_options.go +++ b/src/encoding/json/v2_options.go @@ -506,7 +506,9 @@ func ReportErrorsWithLegacySemantics(v bool) Options { // When marshaling, such Go values are serialized as their usual // JSON representation, but quoted within a JSON string. // When unmarshaling, such Go values must be deserialized from -// a JSON string containing their usual JSON representation. +// a JSON string containing their usual JSON representation or +// Go number representation for that numeric kind. +// Note that the Go number grammar is a superset of the JSON number grammar. // A JSON null quoted in a JSON string is a valid substitute for JSON null // while unmarshaling into a Go value that `string` takes effect on. // From 8aa1efa223d7bd39faaabdfbf85882ed3942a6f4 Mon Sep 17 00:00:00 2001 From: Cherry Mui Date: Sat, 11 Oct 2025 10:36:49 -0400 Subject: [PATCH 125/152] cmd/link: in TestFallocate, only check number of blocks on Darwin The number-of-blocks check was introduced when fixing a Darwin- specific bug. On Darwin, the file allocation syscall is a bit tricky. On Linux and BSDs, it is more straightforward and unlikely to go wrong. The test itself, on the other hand, is less reliable on Linux (and perhaps BSDs), as it is considered less portable and is an implementation detail of the file system. Given these two reasons, only check it on Darwin. Fixes #75795. Change-Id: I3da891fd60a141c3eca5d0f5ec20c2cad65b8862 Reviewed-on: https://go-review.googlesource.com/c/go/+/711095 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall --- src/cmd/link/internal/ld/fallocate_test.go | 25 ++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/src/cmd/link/internal/ld/fallocate_test.go b/src/cmd/link/internal/ld/fallocate_test.go index 3c6b7ef752edd3..f463b5b63b3b69 100644 --- a/src/cmd/link/internal/ld/fallocate_test.go +++ b/src/cmd/link/internal/ld/fallocate_test.go @@ -10,6 +10,7 @@ import ( "errors" "os" "path/filepath" + "runtime" "syscall" "testing" ) @@ -53,12 +54,24 @@ func TestFallocate(t *testing.T) { if got := stat.Size(); got != sz { t.Errorf("unexpected file size: got %d, want %d", got, sz) } - // The number of blocks must be enough for the requested size. - // We used to require an exact match, but it appears that - // some file systems allocate a few extra blocks in some cases. - // See issue #41127. - if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want { - t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want) + if runtime.GOOS == "darwin" { + // Check the number of allocated blocks on Darwin. On Linux (and + // perhaps BSDs), stat's Blocks field may not be portable as it + // is an implementation detail of the file system. On Darwin, it + // is documented as "the actual number of blocks allocated for + // the file in 512-byte units". + // The check is introduced when fixing a Darwin-specific bug. On + // Darwin, the file allocation syscall is a bit tricky. On Linux + // and BSDs, it is more straightforward and unlikely to go wrong. + // Given these two reasons, only check it on Darwin. + // + // The number of blocks must be enough for the requested size. + // We used to require an exact match, but it appears that + // some file systems allocate a few extra blocks in some cases. + // See issue #41127. + if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want { + t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want) + } } out.munmap() } From 1cd71689f2ed8f07031a0cc58fc3586ca501839f Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Thu, 9 Oct 2025 13:35:24 -0700 Subject: [PATCH 126/152] crypto/x509: rework fix for CVE-2025-58187 In CL 709854 we enabled strict validation for a number of properties of domain names (and their constraints). This caused significant breakage, since we didn't previously disallow the creation of certificates which contained these malformed domains. Rollback a number of the properties we enforced, making domainNameValid only enforce the same properties that domainToReverseLabels does. Since this also undoes some of the DoS protections our initial fix enabled, this change also adds caching of constraints in isValid (which perhaps is the fix we should've initially chosen). Updates #75835 Fixes #75828 Change-Id: Ie6ca6b4f30e9b8a143692b64757f7bbf4671ed0e Reviewed-on: https://go-review.googlesource.com/c/go/+/710735 LUCI-TryBot-Result: Go LUCI Reviewed-by: Damien Neil --- src/crypto/x509/name_constraints_test.go | 75 +++++++++++++++++++-- src/crypto/x509/parser.go | 57 +++++++++++----- src/crypto/x509/parser_test.go | 84 +++++++++++++++++++++--- src/crypto/x509/verify.go | 53 ++++++++++----- src/crypto/x509/verify_test.go | 2 +- 5 files changed, 221 insertions(+), 50 deletions(-) diff --git a/src/crypto/x509/name_constraints_test.go b/src/crypto/x509/name_constraints_test.go index 831fcbc8d2eb82..a5851845164d10 100644 --- a/src/crypto/x509/name_constraints_test.go +++ b/src/crypto/x509/name_constraints_test.go @@ -1456,7 +1456,63 @@ var nameConstraintsTests = []nameConstraintsTest{ expectedError: "incompatible key usage", }, - // #77: if several EKUs are requested, satisfying any of them is sufficient. + // An invalid DNS SAN should be detected only at validation time so + // that we can process CA certificates in the wild that have invalid SANs. + // See https://github.com/golang/go/issues/23995 + + // #77: an invalid DNS or mail SAN will not be detected if name constraint + // checking is not triggered. + { + roots: make([]constraintsSpec, 1), + intermediates: [][]constraintsSpec{ + { + {}, + }, + }, + leaf: leafSpec{ + sans: []string{"dns:this is invalid", "email:this @ is invalid"}, + }, + }, + + // #78: an invalid DNS SAN will be detected if any name constraint checking + // is triggered. + { + roots: []constraintsSpec{ + { + bad: []string{"uri:"}, + }, + }, + intermediates: [][]constraintsSpec{ + { + {}, + }, + }, + leaf: leafSpec{ + sans: []string{"dns:this is invalid"}, + }, + expectedError: "cannot parse dnsName", + }, + + // #79: an invalid email SAN will be detected if any name constraint + // checking is triggered. + { + roots: []constraintsSpec{ + { + bad: []string{"uri:"}, + }, + }, + intermediates: [][]constraintsSpec{ + { + {}, + }, + }, + leaf: leafSpec{ + sans: []string{"email:this @ is invalid"}, + }, + expectedError: "cannot parse rfc822Name", + }, + + // #80: if several EKUs are requested, satisfying any of them is sufficient. { roots: make([]constraintsSpec, 1), intermediates: [][]constraintsSpec{ @@ -1471,7 +1527,7 @@ var nameConstraintsTests = []nameConstraintsTest{ requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth, ExtKeyUsageEmailProtection}, }, - // #78: EKUs that are not asserted in VerifyOpts are not required to be + // #81: EKUs that are not asserted in VerifyOpts are not required to be // nested. { roots: make([]constraintsSpec, 1), @@ -1490,7 +1546,7 @@ var nameConstraintsTests = []nameConstraintsTest{ }, }, - // #79: a certificate without SANs and CN is accepted in a constrained chain. + // #82: a certificate without SANs and CN is accepted in a constrained chain. { roots: []constraintsSpec{ { @@ -1507,7 +1563,7 @@ var nameConstraintsTests = []nameConstraintsTest{ }, }, - // #80: a certificate without SANs and with a CN that does not parse as a + // #83: a certificate without SANs and with a CN that does not parse as a // hostname is accepted in a constrained chain. { roots: []constraintsSpec{ @@ -1526,7 +1582,7 @@ var nameConstraintsTests = []nameConstraintsTest{ }, }, - // #81: a certificate with SANs and CN is accepted in a constrained chain. + // #84: a certificate with SANs and CN is accepted in a constrained chain. { roots: []constraintsSpec{ { @@ -1544,7 +1600,14 @@ var nameConstraintsTests = []nameConstraintsTest{ }, }, - // #82: URIs with IPv6 addresses with zones and ports are rejected + // #85: .example.com is an invalid DNS name, it should not match the + // constraint example.com. + { + roots: []constraintsSpec{{ok: []string{"dns:example.com"}}}, + leaf: leafSpec{sans: []string{"dns:.example.com"}}, + expectedError: "cannot parse dnsName \".example.com\"", + }, + // #86: URIs with IPv6 addresses with zones and ports are rejected { roots: []constraintsSpec{ { diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go index 9d6bfd6e95f949..680dcee203a828 100644 --- a/src/crypto/x509/parser.go +++ b/src/crypto/x509/parser.go @@ -413,14 +413,10 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string if err := isIA5String(email); err != nil { return errors.New("x509: SAN rfc822Name is malformed") } - parsed, ok := parseRFC2821Mailbox(email) - if !ok || (ok && !domainNameValid(parsed.domain, false)) { - return errors.New("x509: SAN rfc822Name is malformed") - } emailAddresses = append(emailAddresses, email) case nameTypeDNS: name := string(data) - if err := isIA5String(name); err != nil || (err == nil && !domainNameValid(name, false)) { + if err := isIA5String(name); err != nil { return errors.New("x509: SAN dNSName is malformed") } dnsNames = append(dnsNames, string(name)) @@ -430,9 +426,12 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string return errors.New("x509: SAN uniformResourceIdentifier is malformed") } uri, err := url.Parse(uriStr) - if err != nil || (err == nil && uri.Host != "" && !domainNameValid(uri.Host, false)) { + if err != nil { return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) } + if len(uri.Host) > 0 && !domainNameValid(uri.Host, false) { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) + } uris = append(uris, uri) case nameTypeIP: switch len(data) { @@ -1296,36 +1295,58 @@ func ParseRevocationList(der []byte) (*RevocationList, error) { return rl, nil } -// domainNameValid does minimal domain name validity checking. In particular it -// enforces the following properties: -// - names cannot have the trailing period -// - names can only have a leading period if constraint is true -// - names must be <= 253 characters -// - names cannot have empty labels -// - names cannot labels that are longer than 63 characters -// -// Note that this does not enforce the LDH requirements for domain names. +// domainNameValid is an alloc-less version of the checks that +// domainToReverseLabels does. func domainNameValid(s string, constraint bool) bool { - if len(s) == 0 && constraint { + // TODO(#75835): This function omits a number of checks which we + // really should be doing to enforce that domain names are valid names per + // RFC 1034. We previously enabled these checks, but this broke a + // significant number of certificates we previously considered valid, and we + // happily create via CreateCertificate (et al). We should enable these + // checks, but will need to gate them behind a GODEBUG. + // + // I have left the checks we previously enabled, noted with "TODO(#75835)" so + // that we can easily re-enable them once we unbreak everyone. + + // TODO(#75835): this should only be true for constraints. + if len(s) == 0 { return true } - if len(s) == 0 || (!constraint && s[0] == '.') || s[len(s)-1] == '.' || len(s) > 253 { + + // Do not allow trailing period (FQDN format is not allowed in SANs or + // constraints). + if s[len(s)-1] == '.' { return false } + + // TODO(#75835): domains must have at least one label, cannot have + // a leading empty label, and cannot be longer than 253 characters. + // if len(s) == 0 || (!constraint && s[0] == '.') || len(s) > 253 { + // return false + // } + lastDot := -1 if constraint && s[0] == '.' { s = s[1:] } for i := 0; i <= len(s); i++ { + if i < len(s) && (s[i] < 33 || s[i] > 126) { + // Invalid character. + return false + } if i == len(s) || s[i] == '.' { labelLen := i if lastDot >= 0 { labelLen -= lastDot + 1 } - if labelLen == 0 || labelLen > 63 { + if labelLen == 0 { return false } + // TODO(#75835): labels cannot be longer than 63 characters. + // if labelLen > 63 { + // return false + // } lastDot = i } } diff --git a/src/crypto/x509/parser_test.go b/src/crypto/x509/parser_test.go index 1b553e362e48a0..d53b805b786990 100644 --- a/src/crypto/x509/parser_test.go +++ b/src/crypto/x509/parser_test.go @@ -5,6 +5,9 @@ package x509 import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "encoding/asn1" "encoding/pem" "os" @@ -260,7 +263,31 @@ func TestDomainNameValid(t *testing.T) { constraint bool valid bool }{ - {"empty name, name", "", false, false}, + // TODO(#75835): these tests are for stricter name validation, which we + // had to disable. Once we reenable these strict checks, behind a + // GODEBUG, we should add them back in. + // {"empty name, name", "", false, false}, + // {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, false}, + // {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, false}, + // {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, false}, + // {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, false}, + // {"64 char single label, name", strings.Repeat("a", 64), false, false}, + // {"64 char single label, constraint", strings.Repeat("a", 64), true, false}, + // {"64 char label, name", "a." + strings.Repeat("a", 64), false, false}, + // {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, false}, + + // TODO(#75835): these are the inverse of the tests above, they should be removed + // once the strict checking is enabled. + {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, true}, + {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, true}, + {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, true}, + {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, true}, + {"64 char single label, name", strings.Repeat("a", 64), false, true}, + {"64 char single label, constraint", strings.Repeat("a", 64), true, true}, + {"64 char label, name", "a." + strings.Repeat("a", 64), false, true}, + {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, true}, + + // Check we properly enforce properties of domain names. {"empty name, constraint", "", true, true}, {"empty label, name", "a..a", false, false}, {"empty label, constraint", "a..a", true, false}, @@ -274,23 +301,60 @@ func TestDomainNameValid(t *testing.T) { {"trailing period, constraint", "a.", true, false}, {"bare label, name", "a", false, true}, {"bare label, constraint", "a", true, true}, - {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, false}, - {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, false}, - {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, false}, - {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, false}, - {"64 char single label, name", strings.Repeat("a", 64), false, false}, - {"64 char single label, constraint", strings.Repeat("a", 64), true, false}, {"63 char single label, name", strings.Repeat("a", 63), false, true}, {"63 char single label, constraint", strings.Repeat("a", 63), true, true}, - {"64 char label, name", "a." + strings.Repeat("a", 64), false, false}, - {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, false}, {"63 char label, name", "a." + strings.Repeat("a", 63), false, true}, {"63 char label, constraint", "a." + strings.Repeat("a", 63), true, true}, } { t.Run(tc.name, func(t *testing.T) { - if tc.valid != domainNameValid(tc.dnsName, tc.constraint) { + valid := domainNameValid(tc.dnsName, tc.constraint) + if tc.valid != valid { t.Errorf("domainNameValid(%q, %t) = %v; want %v", tc.dnsName, tc.constraint, !tc.valid, tc.valid) } + // Also check that we enforce the same properties as domainToReverseLabels + trimmedName := tc.dnsName + if tc.constraint && len(trimmedName) > 1 && trimmedName[0] == '.' { + trimmedName = trimmedName[1:] + } + _, revValid := domainToReverseLabels(trimmedName) + if valid != revValid { + t.Errorf("domainNameValid(%q, %t) = %t != domainToReverseLabels(%q) = %t", tc.dnsName, tc.constraint, valid, trimmedName, revValid) + } }) } } + +func TestRoundtripWeirdSANs(t *testing.T) { + // TODO(#75835): check that certificates we create with CreateCertificate that have malformed SAN values + // can be parsed by ParseCertificate. We should eventually restrict this, but for now we have to maintain + // this property as people have been relying on it. + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + badNames := []string{ + "baredomain", + "baredomain.", + strings.Repeat("a", 255), + strings.Repeat("a", 65) + ".com", + } + tmpl := &Certificate{ + EmailAddresses: badNames, + DNSNames: badNames, + } + b, err := CreateCertificate(rand.Reader, tmpl, tmpl, &k.PublicKey, k) + if err != nil { + t.Fatal(err) + } + _, err = ParseCertificate(b) + if err != nil { + t.Fatalf("Couldn't roundtrip certificate: %v", err) + } +} + +func FuzzDomainNameValid(f *testing.F) { + f.Fuzz(func(t *testing.T, data string) { + domainNameValid(data, false) + domainNameValid(data, true) + }) +} diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 058153fbe73461..bf7e7ec058db2b 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -429,7 +429,7 @@ func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { return reverseLabels, true } -func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) { +func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // If the constraint contains an @, then it specifies an exact mailbox // name. if strings.Contains(constraint, "@") { @@ -442,10 +442,10 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro // Otherwise the constraint is like a DNS constraint of the domain part // of the mailbox. - return matchDomainConstraint(mailbox.domain, constraint) + return matchDomainConstraint(mailbox.domain, constraint, reversedDomainsCache, reversedConstraintsCache) } -func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { +func matchURIConstraint(uri *url.URL, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // From RFC 5280, Section 4.2.1.10: // “a uniformResourceIdentifier that does not include an authority // component with a host name specified as a fully qualified domain @@ -474,7 +474,7 @@ func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) } - return matchDomainConstraint(host, constraint) + return matchDomainConstraint(host, constraint, reversedDomainsCache, reversedConstraintsCache) } func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { @@ -491,16 +491,21 @@ func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { return true, nil } -func matchDomainConstraint(domain, constraint string) (bool, error) { +func matchDomainConstraint(domain, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // The meaning of zero length constraints is not specified, but this // code follows NSS and accepts them as matching everything. if len(constraint) == 0 { return true, nil } - domainLabels, ok := domainToReverseLabels(domain) - if !ok { - return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + domainLabels, found := reversedDomainsCache[domain] + if !found { + var ok bool + domainLabels, ok = domainToReverseLabels(domain) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + } + reversedDomainsCache[domain] = domainLabels } // RFC 5280 says that a leading period in a domain name means that at @@ -514,9 +519,14 @@ func matchDomainConstraint(domain, constraint string) (bool, error) { constraint = constraint[1:] } - constraintLabels, ok := domainToReverseLabels(constraint) - if !ok { - return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + constraintLabels, found := reversedConstraintsCache[constraint] + if !found { + var ok bool + constraintLabels, ok = domainToReverseLabels(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + } + reversedConstraintsCache[constraint] = constraintLabels } if len(domainLabels) < len(constraintLabels) || @@ -637,6 +647,19 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V } } + // Each time we do constraint checking, we need to check the constraints in + // the current certificate against all of the names that preceded it. We + // reverse these names using domainToReverseLabels, which is a relatively + // expensive operation. Since we check each name against each constraint, + // this requires us to do N*C calls to domainToReverseLabels (where N is the + // total number of names that preceed the certificate, and C is the total + // number of constraints in the certificate). By caching the results of + // calling domainToReverseLabels, we can reduce that to N+C calls at the + // cost of keeping all of the parsed names and constraints in memory until + // we return from isValid. + reversedDomainsCache := map[string][]string{} + reversedConstraintsCache := map[string][]string{} + if (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() { toCheck := []*Certificate{} @@ -657,20 +680,20 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, func(parsedName, constraint any) (bool, error) { - return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string)) + return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string), reversedDomainsCache, reversedConstraintsCache) }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { return err } case nameTypeDNS: name := string(data) - if _, ok := domainToReverseLabels(name); !ok { + if !domainNameValid(name, false) { return fmt.Errorf("x509: cannot parse dnsName %q", name) } if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, func(parsedName, constraint any) (bool, error) { - return matchDomainConstraint(parsedName.(string), constraint.(string)) + return matchDomainConstraint(parsedName.(string), constraint.(string), reversedDomainsCache, reversedConstraintsCache) }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { return err } @@ -684,7 +707,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, func(parsedName, constraint any) (bool, error) { - return matchURIConstraint(parsedName.(*url.URL), constraint.(string)) + return matchURIConstraint(parsedName.(*url.URL), constraint.(string), reversedDomainsCache, reversedConstraintsCache) }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { return err } diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 5595f99ea5e43a..60a4cea9146adf 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -1352,7 +1352,7 @@ var nameConstraintTests = []struct { func TestNameConstraints(t *testing.T) { for i, test := range nameConstraintTests { - result, err := matchDomainConstraint(test.domain, test.constraint) + result, err := matchDomainConstraint(test.domain, test.constraint, map[string][]string{}, map[string][]string{}) if err != nil && !test.expectError { t.Errorf("unexpected error for test #%d: domain=%s, constraint=%s, err=%s", i, test.domain, test.constraint, err) From 6bcd97d9f4386528aa85eb3cc27da0ed902de870 Mon Sep 17 00:00:00 2001 From: Julien Cretel Date: Wed, 1 Oct 2025 20:08:18 +0000 Subject: [PATCH 127/152] all: replace calls to errors.As with errors.AsType This change replaces most occurrences (in code as well as in comments) of errors.As with errors.AsType. It leaves the errors package and vendored code untouched. Change-Id: I3bde73f318a0b408bdb8f5a251494af15a13118a GitHub-Last-Rev: 8aaaa36a5a12d2a6a90c6d51680464e1a3115139 GitHub-Pull-Request: golang/go#75698 Reviewed-on: https://go-review.googlesource.com/c/go/+/708495 Auto-Submit: Michael Pratt LUCI-TryBot-Result: Go LUCI Reviewed-by: Alan Donovan Reviewed-by: Michael Pratt --- src/cmd/compile/internal/types2/api_test.go | 8 ++-- src/cmd/go/internal/base/path.go | 3 +- src/cmd/go/internal/doc/pkgsite.go | 3 +- src/cmd/go/internal/fmtcmd/fmt.go | 9 ++--- src/cmd/go/internal/load/pkg.go | 4 +- src/cmd/go/internal/modget/get.go | 38 ++++++++++--------- src/cmd/go/internal/modget/query.go | 2 +- src/cmd/go/internal/modload/build.go | 22 +++++------ src/cmd/go/internal/modload/buildlist.go | 2 +- src/cmd/go/internal/modload/edit.go | 6 ++- src/cmd/go/internal/modload/import.go | 4 +- src/cmd/go/internal/modload/list.go | 6 +-- src/cmd/go/internal/modload/load.go | 20 +++++----- src/cmd/go/internal/modload/modfile.go | 10 +++-- src/cmd/go/internal/modload/query.go | 2 +- src/cmd/go/internal/test/test.go | 3 +- src/cmd/go/internal/test/testflag.go | 4 +- .../go/internal/vcweb/vcstest/vcstest_test.go | 4 +- src/cmd/go/internal/vcweb/vcweb.go | 4 +- src/cmd/go/internal/version/version.go | 2 +- src/cmd/go/internal/vet/vetflag.go | 2 +- src/cmd/go/internal/work/build.go | 2 +- src/cmd/go/internal/work/exec.go | 7 ++-- .../bootstrap_test/experiment_toolid_test.go | 2 +- src/cmd/internal/robustio/robustio_darwin.go | 7 +--- src/cmd/internal/robustio/robustio_flaky.go | 3 +- src/cmd/internal/robustio/robustio_windows.go | 3 +- src/cmd/internal/script/engine.go | 8 ++-- .../internal/script/scripttest/scripttest.go | 2 +- src/cmd/link/link_test.go | 12 +++--- src/crypto/tls/conn.go | 4 +- src/crypto/tls/handshake_server.go | 5 +-- src/crypto/tls/handshake_server_test.go | 3 +- src/crypto/tls/quic.go | 7 ++-- src/crypto/tls/quic_test.go | 3 +- src/debug/buildinfo/buildinfo.go | 2 +- src/encoding/json/jsontext/coder_test.go | 2 +- src/encoding/json/jsontext/fuzz_test.go | 7 ++-- src/encoding/json/jsontext/state.go | 3 +- src/encoding/json/v2/errors.go | 3 +- src/encoding/json/v2/example_test.go | 3 +- src/go/types/api_test.go | 8 ++-- .../runtime/wasitest/testdata/tcpecho.go | 3 +- src/internal/testenv/testenv_unix.go | 3 +- src/io/fs/readdir_test.go | 4 +- src/log/log_test.go | 3 +- src/log/slog/logger_test.go | 3 +- src/net/dnsclient_unix.go | 3 +- src/net/dnsclient_unix_test.go | 9 ++--- src/net/http/h2_error_test.go | 17 ++++----- src/net/lookup_test.go | 10 ++--- src/os/exec/exec_test.go | 10 ++--- src/os/os_test.go | 3 +- src/runtime/pprof/proto_windows.go | 3 +- src/testing/fstest/testfs.go | 2 +- src/testing/fstest/testfs_test.go | 8 +++- src/text/template/exec_test.go | 3 +- src/text/template/funcs.go | 2 +- 58 files changed, 160 insertions(+), 180 deletions(-) diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 0d3c8b8e3e5e0f..4b7012e6c45e9f 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -2468,8 +2468,8 @@ func TestInstantiateErrors(t *testing.T) { t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs) } - var argErr *ArgumentError - if !errors.As(err, &argErr) { + argErr, ok := errors.AsType[*ArgumentError](err) + if !ok { t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs) } @@ -2484,8 +2484,8 @@ func TestArgumentErrorUnwrapping(t *testing.T) { Index: 1, Err: Error{Msg: "test"}, } - var e Error - if !errors.As(err, &e) { + e, ok := errors.AsType[Error](err) + if !ok { t.Fatalf("error %v does not wrap types.Error", err) } if e.Msg != "test" { diff --git a/src/cmd/go/internal/base/path.go b/src/cmd/go/internal/base/path.go index 5bb7bc3bde63e2..a7577f62e76898 100644 --- a/src/cmd/go/internal/base/path.go +++ b/src/cmd/go/internal/base/path.go @@ -55,8 +55,7 @@ func sameFile(path1, path2 string) bool { // ShortPathError rewrites the path in err using base.ShortPath, if err is a wrapped PathError. func ShortPathError(err error) error { - var pe *fs.PathError - if errors.As(err, &pe) { + if pe, ok := errors.AsType[*fs.PathError](err); ok { pe.Path = ShortPath(pe.Path) } return err diff --git a/src/cmd/go/internal/doc/pkgsite.go b/src/cmd/go/internal/doc/pkgsite.go index 06289ac4fc9a8a..c173167b6329a4 100644 --- a/src/cmd/go/internal/doc/pkgsite.go +++ b/src/cmd/go/internal/doc/pkgsite.go @@ -81,8 +81,7 @@ func doPkgsite(urlPath, fragment string) error { cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { - var ee *exec.ExitError - if errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { // Exit with the same exit status as pkgsite to avoid // printing of "exit status" error messages. // Any relevant messages have already been printed diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go index 62b22f6bcfa407..83fba9661a66fc 100644 --- a/src/cmd/go/internal/fmtcmd/fmt.go +++ b/src/cmd/go/internal/fmtcmd/fmt.go @@ -68,11 +68,10 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { continue } if pkg.Error != nil { - var nogo *load.NoGoError - var embed *load.EmbedError - if (errors.As(pkg.Error, &nogo) || errors.As(pkg.Error, &embed)) && len(pkg.InternalAllGoFiles()) > 0 { - // Skip this error, as we will format - // all files regardless. + if _, ok := errors.AsType[*load.NoGoError](pkg.Error); ok { + // Skip this error, as we will format all files regardless. + } else if _, ok := errors.AsType[*load.EmbedError](pkg.Error); ok && len(pkg.InternalAllGoFiles()) > 0 { + // Skip this error, as we will format all files regardless. } else { base.Errorf("%v", pkg.Error) continue diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 135d7579d65dff..a894affc844066 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -290,8 +290,8 @@ func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportSta // Replace (possibly wrapped) *build.NoGoError with *load.NoGoError. // The latter is more specific about the cause. - var nogoErr *build.NoGoError - if errors.As(err, &nogoErr) { + nogoErr, ok := errors.AsType[*build.NoGoError](err) + if ok { if p.Dir == "" && nogoErr.Dir != "" { p.Dir = nogoErr.Dir } diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index d8b1f83bf1d132..5017f878ed3647 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -1294,15 +1294,13 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack continue } - var ( - importMissing *modload.ImportMissingError - ambiguous *modload.AmbiguousImportError - ) - if !errors.As(err, &importMissing) && !errors.As(err, &ambiguous) { - // The package, which is a dependency of something we care about, has some - // problem that we can't resolve with a version change. - // Leave the error for the final LoadPackages call. - continue + if _, ok := errors.AsType[*modload.ImportMissingError](err); !ok { + if _, ok := errors.AsType[*modload.AmbiguousImportError](err); !ok { + // The package, which is a dependency of something we care about, has some + // problem that we can't resolve with a version change. + // Leave the error for the final LoadPackages call. + continue + } } path := pkgPath @@ -1674,7 +1672,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } base.SetExitStatus(1) - if ambiguousErr := (*modload.AmbiguousImportError)(nil); errors.As(err, &ambiguousErr) { + if ambiguousErr, ok := errors.AsType[*modload.AmbiguousImportError](err); ok { for _, m := range ambiguousErr.Modules { relevantMods[m] |= hasPkg } @@ -1717,7 +1715,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin i := i r.work.Add(func() { err := modload.CheckRetractions(ctx, retractions[i].m) - if retractErr := (*modload.ModuleRetractedError)(nil); errors.As(err, &retractErr) { + if _, ok := errors.AsType[*modload.ModuleRetractedError](err); ok { retractions[i].message = err.Error() } }) @@ -1994,8 +1992,8 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi toolchain.SwitchOrFatal(ctx, err) } - var constraint *modload.ConstraintError - if !errors.As(err, &constraint) { + constraint, ok := errors.AsType[*modload.ConstraintError](err) + if !ok { base.Fatal(err) } @@ -2066,8 +2064,11 @@ func reqsFromGoMod(f *modfile.File) []module.Version { // does not exist at the requested version, either because the module does not // exist at all or because it does not include that specific version. func isNoSuchModuleVersion(err error) bool { - var noMatch *modload.NoMatchingVersionError - return errors.Is(err, os.ErrNotExist) || errors.As(err, &noMatch) + if errors.Is(err, os.ErrNotExist) { + return true + } + _, ok := errors.AsType[*modload.NoMatchingVersionError](err) + return ok } // isNoSuchPackageVersion reports whether err indicates that the requested @@ -2075,8 +2076,11 @@ func isNoSuchModuleVersion(err error) bool { // that could contain it exists at that version, or because every such module // that does exist does not actually contain the package. func isNoSuchPackageVersion(err error) bool { - var noPackage *modload.PackageNotInModuleError - return isNoSuchModuleVersion(err) || errors.As(err, &noPackage) + if isNoSuchModuleVersion(err) { + return true + } + _, ok := errors.AsType[*modload.PackageNotInModuleError](err) + return ok } // workspace represents the set of modules in a workspace. diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go index db09947293c6dc..59f3023ffc4f41 100644 --- a/src/cmd/go/internal/modget/query.go +++ b/src/cmd/go/internal/modget/query.go @@ -283,7 +283,7 @@ func reportError(q *query, err error) { // If err already mentions all of the relevant parts of q, just log err to // reduce stutter. Otherwise, log both q and err. // - // TODO(bcmills): Use errors.As to unpack these errors instead of parsing + // TODO(bcmills): Use errors.AsType to unpack these errors instead of parsing // strings with regular expressions. if !utf8.ValidString(q.pattern) || !utf8.ValidString(q.version) { diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index cb168d58a227bc..a8ab82d1ecb246 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -129,11 +129,10 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { } info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed) - var noVersionErr *NoMatchingVersionError - if errors.Is(err, ErrDisallowed) || + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) || - errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. + errors.Is(err, ErrDisallowed) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // Ignore "disallowed" errors. This means the current version is @@ -238,10 +237,10 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { } err := CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version}) - var noVersionErr *NoMatchingVersionError - var retractErr *ModuleRetractedError - if err == nil || errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. + if err == nil { + return + } else if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // We should report other errors though. An attacker that controls the @@ -250,7 +249,7 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { // hide versions, since the "list" and "latest" endpoints are not // authenticated. return - } else if errors.As(err, &retractErr) { + } else if retractErr, ok := errors.AsType[*ModuleRetractedError](err); ok { if len(retractErr.Rationale) == 0 { m.Retracted = []string{"retracted by module author"} } else { @@ -265,9 +264,8 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { // author. m.Error is set if there's an error loading deprecation information. func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { deprecation, err := CheckDeprecation(ctx, module.Version{Path: m.Path, Version: m.Version}) - var noVersionErr *NoMatchingVersionError - if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // We should report other errors though. An attacker that controls the diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index 086626042c95b7..cf64ee1dc210f9 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -922,7 +922,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir q.Add(func() { skipModFile := true _, _, _, _, err := importFromModules(ctx, pkg.path, tidy, nil, skipModFile) - if aie := (*AmbiguousImportError)(nil); errors.As(err, &aie) { + if _, ok := errors.AsType[*AmbiguousImportError](err); ok { disambiguateRoot.Store(pkg.mod, true) } }) diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go index 153c21a90cc423..96d864545d042a 100644 --- a/src/cmd/go/internal/modload/edit.go +++ b/src/cmd/go/internal/modload/edit.go @@ -226,8 +226,10 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // conflict we discover from one or more of the original roots. mg, upgradedRoots, err := extendGraph(ctx, rootPruning, roots, selectedRoot) if err != nil { - var tooNew *gover.TooNewError - if mg == nil || errors.As(err, &tooNew) { + if mg == nil { + return orig, false, err + } + if _, ok := errors.AsType[*gover.TooNewError](err); ok { return orig, false, err } // We're about to walk the entire extended module graph, so we will find diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 83e7e037117a4d..392fe3edd77b52 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -410,7 +410,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M root, isLocal, err := fetch(ctx, m) if err != nil { - if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + if _, ok := errors.AsType[*sumMissingError](err); ok { // We are missing a sum needed to fetch a module in the build list. // We can't verify that the package is unique, and we may not find // the package at all. Keep checking other modules to decide which @@ -549,7 +549,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver for _, m := range mods { root, isLocal, err := fetch(ctx, m) if err != nil { - if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + if _, ok := errors.AsType[*sumMissingError](err); ok { return module.Version{}, &ImportMissingSumError{importPath: path} } return module.Version{}, err diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index b66e73a112cedd..803bab49ae96bb 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -305,13 +305,11 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List // modinfoError wraps an error to create an error message in // modinfo.ModuleError with minimal redundancy. func modinfoError(path, vers string, err error) *modinfo.ModuleError { - var nerr *NoMatchingVersionError - var merr *module.ModuleError - if errors.As(err, &nerr) { + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok { // NoMatchingVersionError contains the query, so we don't mention the // query again in ModuleError. err = &module.ModuleError{Path: path, Err: err} - } else if !errors.As(err, &merr) { + } else if _, ok := errors.AsType[*module.ModuleError](err); !ok { // If the error does not contain path and version, wrap it in a // module.ModuleError. err = &module.ModuleError{Path: path, Version: vers, Err: err} diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 413de8148fb79e..0d661eb2e7b0be 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -1304,7 +1304,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { } // Add importer information to checksum errors. - if sumErr := (*ImportMissingSumError)(nil); errors.As(pkg.err, &sumErr) { + if sumErr, ok := errors.AsType[*ImportMissingSumError](pkg.err); ok { if importer := pkg.stack; importer != nil { sumErr.importer = importer.path sumErr.importerVersion = importer.mod.Version @@ -1312,7 +1312,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { } } - if stdErr := (*ImportMissingError)(nil); errors.As(pkg.err, &stdErr) && stdErr.isStd { + if stdErr, ok := errors.AsType[*ImportMissingError](pkg.err); ok && stdErr.isStd { // Add importer go version information to import errors of standard // library packages arising from newer releases. if importer := pkg.stack; importer != nil { @@ -1384,7 +1384,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err var maxTooNew *gover.TooNewError for _, pkg := range ld.pkgs { if pkg.err != nil { - if tooNew := (*gover.TooNewError)(nil); errors.As(pkg.err, &tooNew) { + if tooNew, ok := errors.AsType[*gover.TooNewError](pkg.err); ok { if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 { maxTooNew = tooNew } @@ -1573,7 +1573,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod // we should only add the missing import once. continue } - if !errors.As(pkg.err, new(*ImportMissingError)) { + if _, ok := errors.AsType[*ImportMissingError](pkg.err); !ok { // Leave other errors for Import or load.Packages to report. continue } @@ -1584,8 +1584,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod var err error mod, err = queryImport(ctx, pkg.path, ld.requirements) if err != nil { - var ime *ImportMissingError - if errors.As(err, &ime) { + if ime, ok := errors.AsType[*ImportMissingError](err); ok { for curstack := pkg.stack; curstack != nil; curstack = curstack.stack { if LoaderState.MainModules.Contains(curstack.mod.Path) { ime.ImportingMainModule = curstack.mod @@ -1625,7 +1624,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod maxTooNewPkg *loadPkg ) for _, pm := range pkgMods { - if tooNew := (*gover.TooNewError)(nil); errors.As(pm.pkg.err, &tooNew) { + if tooNew, ok := errors.AsType[*gover.TooNewError](pm.pkg.err); ok { if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 { maxTooNew = tooNew maxTooNewPkg = pm.pkg @@ -1771,8 +1770,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch // full module graph. m, _, _, _, err := importFromModules(ctx, path, ld.requirements, nil, ld.skipImportModFiles) if err != nil { - var missing *ImportMissingError - if errors.As(err, &missing) && ld.ResolveMissingImports { + if _, ok := errors.AsType[*ImportMissingError](err); ok && ld.ResolveMissingImports { // This package isn't provided by any selected module. // If we can find it, it will be a new root dependency. m, err = queryImport(ctx, path, ld.requirements) @@ -2196,14 +2194,14 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, // module that previously provided the package to a version that no // longer does, or to a version for which the module source code (but // not the go.mod file in isolation) has a checksum error. - if missing := (*ImportMissingError)(nil); errors.As(mismatch.err, &missing) { + if _, ok := errors.AsType[*ImportMissingError](mismatch.err); ok { selected := module.Version{ Path: pkg.mod.Path, Version: mg.Selected(pkg.mod.Path), } ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it in %s", pkg.stackText(), pkg.mod, compatVersion, selected)) } else { - if ambiguous := (*AmbiguousImportError)(nil); errors.As(mismatch.err, &ambiguous) { + if _, ok := errors.AsType[*AmbiguousImportError](mismatch.err); ok { // TODO: Is this check needed? } ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it:\n\t%v", pkg.stackText(), pkg.mod, compatVersion, mismatch.err)) diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index fa2348d97baf25..3fdbdc7010b993 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -76,8 +76,7 @@ func ReadModFile(gomod string, fix modfile.VersionFixer) (data []byte, f *modfil } func shortPathErrorList(err error) error { - var el modfile.ErrorList - if errors.As(err, &el) { + if el, ok := errors.AsType[modfile.ErrorList](err); ok { for i := range el { el[i].Filename = base.ShortPath(el[i].Filename) } @@ -175,12 +174,15 @@ func (e *excludedError) Is(err error) bool { return err == ErrDisallowed } // its author. func CheckRetractions(ctx context.Context, m module.Version) (err error) { defer func() { - if retractErr := (*ModuleRetractedError)(nil); err == nil || errors.As(err, &retractErr) { + if err == nil { + return + } + if _, ok := errors.AsType[*ModuleRetractedError](err); ok { return } // Attribute the error to the version being checked, not the version from // which the retractions were to be loaded. - if mErr := (*module.ModuleError)(nil); errors.As(err, &mErr) { + if mErr, ok := errors.AsType[*module.ModuleError](err); ok { err = mErr.Err } err = &retractionLoadingError{m: m, err: err} diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index 94ee8bc955918c..b37a244fbbacdd 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -932,7 +932,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod if notExistErr == nil { notExistErr = rErr } - } else if iv := (*module.InvalidVersionError)(nil); errors.As(rErr, &iv) { + } else if _, ok := errors.AsType[*module.InvalidVersionError](rErr); ok { if invalidVersion == nil { invalidVersion = rErr } diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index e667bd64f7997f..e225929add20c2 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -1741,8 +1741,7 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) } else if errors.Is(err, exec.ErrWaitDelay) { fmt.Fprintf(cmd.Stdout, "*** Test I/O incomplete %v after exiting.\n", cmd.WaitDelay) } - var ee *exec.ExitError - if len(out) == 0 || !errors.As(err, &ee) || !ee.Exited() { + if ee, ok := errors.AsType[*exec.ExitError](err); !ok || !ee.Exited() || len(out) == 0 { // If there was no test output, print the exit status so that the reason // for failure is clear. fmt.Fprintf(cmd.Stdout, "%s\n", err) diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index fc2b22cb56a9ee..d6891a1d0b955b 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -261,7 +261,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { break } - if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) { + if nf, ok := errors.AsType[cmdflag.NonFlagError](err); ok { if !inPkgList && packageNames != nil { // We already saw the package list previously, and this argument is not // a flag, so it — and everything after it — must be either a value for @@ -296,7 +296,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { inPkgList = false } - if nd := (cmdflag.FlagNotDefinedError{}); errors.As(err, &nd) { + if nd, ok := errors.AsType[cmdflag.FlagNotDefinedError](err); ok { // This is a flag we do not know. We must assume that any args we see // after this might be flag arguments, not package names, so make // packageNames non-nil to indicate that the package list is complete. diff --git a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go index 67234ac20d4628..6a6a0eee57ce85 100644 --- a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go +++ b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go @@ -155,10 +155,10 @@ func TestScripts(t *testing.T) { t.Log(buf) } if err != nil { - if notInstalled := (vcweb.ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) { + if _, ok := errors.AsType[vcweb.ServerNotInstalledError](err); ok || errors.Is(err, exec.ErrNotFound) { t.Skip(err) } - if skip := (vcweb.SkipError{}); errors.As(err, &skip) { + if skip, ok := errors.AsType[vcweb.SkipError](err); ok { if skip.Msg == "" { t.Skip("SKIP") } else { diff --git a/src/cmd/go/internal/vcweb/vcweb.go b/src/cmd/go/internal/vcweb/vcweb.go index b81ff5e63de72a..4b4e127bb042e0 100644 --- a/src/cmd/go/internal/vcweb/vcweb.go +++ b/src/cmd/go/internal/vcweb/vcweb.go @@ -244,9 +244,9 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { }) if err != nil { s.logger.Print(err) - if notFound := (ScriptNotFoundError{}); errors.As(err, ¬Found) { + if _, ok := errors.AsType[ScriptNotFoundError](err); ok { http.NotFound(w, req) - } else if notInstalled := (ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) { + } else if _, ok := errors.AsType[ServerNotInstalledError](err); ok || errors.Is(err, exec.ErrNotFound) { http.Error(w, err.Error(), http.StatusNotImplemented) } else { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go index c26dd42b4e1a08..781bc080e89fe4 100644 --- a/src/cmd/go/internal/version/version.go +++ b/src/cmd/go/internal/version/version.go @@ -168,7 +168,7 @@ func scanFile(file string, info fs.FileInfo, mustPrint bool) bool { bi, err := buildinfo.ReadFile(file) if err != nil { if mustPrint { - if pathErr := (*os.PathError)(nil); errors.As(err, &pathErr) && filepath.Clean(pathErr.Path) == filepath.Clean(file) { + if pathErr, ok := errors.AsType[*os.PathError](err); ok && filepath.Clean(pathErr.Path) == filepath.Clean(file) { fmt.Fprintf(os.Stderr, "%v\n", file) } else { // Skip errors for non-Go binaries. diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go index 7ebd8c9bfd19eb..7342b99d6e36bb 100644 --- a/src/cmd/go/internal/vet/vetflag.go +++ b/src/cmd/go/internal/vet/vetflag.go @@ -160,7 +160,7 @@ func toolFlags(cmd *base.Command, args []string) (passToTool, packageNames []str break } - if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) { + if _, ok := errors.AsType[cmdflag.NonFlagError](err); ok { // Everything from here on out — including the argument we just consumed — // must be a package name. packageNames = args diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index adc98f93138007..fdb483d46f21d9 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -705,7 +705,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { continue } haveErrors = true - if missingErr := (*modload.ImportMissingError)(nil); !errors.As(pkg.Error, &missingErr) { + if _, ok := errors.AsType[*modload.ImportMissingError](pkg.Error); !ok { allMissingErrors = false break } diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index fa6ddce24bed3c..703b4367db31ef 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -169,9 +169,10 @@ func (b *Builder) Do(ctx context.Context, root *Action) { a.Package.Incomplete = true } } else { - var ipe load.ImportPathError - if a.Package != nil && (!errors.As(err, &ipe) || ipe.ImportPath() != a.Package.ImportPath) { - err = fmt.Errorf("%s: %v", a.Package.ImportPath, err) + if a.Package != nil { + if ipe, ok := errors.AsType[load.ImportPathError](err); !ok || ipe.ImportPath() != a.Package.ImportPath { + err = fmt.Errorf("%s: %v", a.Package.ImportPath, err) + } } sh := b.Shell(a) sh.Errorf("%s", err) diff --git a/src/cmd/internal/bootstrap_test/experiment_toolid_test.go b/src/cmd/internal/bootstrap_test/experiment_toolid_test.go index ff2379c8998c76..ca292b700861a9 100644 --- a/src/cmd/internal/bootstrap_test/experiment_toolid_test.go +++ b/src/cmd/internal/bootstrap_test/experiment_toolid_test.go @@ -97,7 +97,7 @@ func runCmd(t *testing.T, dir string, env []string, path string, args ...string) cmd.Env = env out, err := cmd.Output() if err != nil { - if ee := (*exec.ExitError)(nil); errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { out = append(out, ee.Stderr...) } t.Fatalf("%s failed:\n%s\n%s", cmd, out, err) diff --git a/src/cmd/internal/robustio/robustio_darwin.go b/src/cmd/internal/robustio/robustio_darwin.go index 99fd8ebc2fff18..69ea2479308dea 100644 --- a/src/cmd/internal/robustio/robustio_darwin.go +++ b/src/cmd/internal/robustio/robustio_darwin.go @@ -13,9 +13,6 @@ const errFileNotFound = syscall.ENOENT // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { - return errno == errFileNotFound - } - return false + errno, ok := errors.AsType[syscall.Errno](err) + return ok && errno == errFileNotFound } diff --git a/src/cmd/internal/robustio/robustio_flaky.go b/src/cmd/internal/robustio/robustio_flaky.go index c56e36ca62412a..ec1a2daea65852 100644 --- a/src/cmd/internal/robustio/robustio_flaky.go +++ b/src/cmd/internal/robustio/robustio_flaky.go @@ -31,8 +31,7 @@ func retry(f func() (err error, mayRetry bool)) error { return err } - var errno syscall.Errno - if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok && (lowestErrno == 0 || errno < lowestErrno) { bestErr = err lowestErrno = errno } else if bestErr == nil { diff --git a/src/cmd/internal/robustio/robustio_windows.go b/src/cmd/internal/robustio/robustio_windows.go index 687dcb66f83d15..ad46ec5cfeb601 100644 --- a/src/cmd/internal/robustio/robustio_windows.go +++ b/src/cmd/internal/robustio/robustio_windows.go @@ -14,8 +14,7 @@ const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok { switch errno { case syscall.ERROR_ACCESS_DENIED, syscall.ERROR_FILE_NOT_FOUND, diff --git a/src/cmd/internal/script/engine.go b/src/cmd/internal/script/engine.go index eb9344f6e2a1eb..4607868379488a 100644 --- a/src/cmd/internal/script/engine.go +++ b/src/cmd/internal/script/engine.go @@ -185,7 +185,7 @@ func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Wri var lineno int lineErr := func(err error) error { - if errors.As(err, new(*CommandError)) { + if _, ok := errors.AsType[*CommandError](err); ok { return err } return fmt.Errorf("%s:%d: %w", file, lineno, err) @@ -283,7 +283,7 @@ func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Wri // Run the command. err = e.runCommand(s, cmd, impl) if err != nil { - if stop := (stopError{}); errors.As(err, &stop) { + if stop, ok := errors.AsType[stopError](err); ok { // Since the 'stop' command halts execution of the entire script, // log its message separately from the section in which it appears. err = endSection(true) @@ -607,13 +607,13 @@ func checkStatus(cmd *command, err error) error { return nil } - if s := (stopError{}); errors.As(err, &s) { + if _, ok := errors.AsType[stopError](err); ok { // This error originated in the Stop command. // Propagate it as-is. return cmdError(cmd, err) } - if w := (waitError{}); errors.As(err, &w) { + if _, ok := errors.AsType[waitError](err); ok { // This error was surfaced from a background process by a call to Wait. // Add a call frame for Wait itself, but ignore its "want" field. // (Wait itself cannot fail to wait on commands or else it would leak diff --git a/src/cmd/internal/script/scripttest/scripttest.go b/src/cmd/internal/script/scripttest/scripttest.go index bace662a6722fd..349201fd188c1b 100644 --- a/src/cmd/internal/script/scripttest/scripttest.go +++ b/src/cmd/internal/script/scripttest/scripttest.go @@ -89,7 +89,7 @@ func Run(t testing.TB, e *script.Engine, s *script.State, filename string, testS return e.Execute(s, filename, bufio.NewReader(testScript), log) }() - if skip := (skipError{}); errors.As(err, &skip) { + if skip, ok := errors.AsType[skipError](err); ok { if skip.msg == "" { t.Skip("SKIP") } else { diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 0125ba8e0f56be..31822d21f39d31 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -1532,11 +1532,13 @@ func TestFlagS(t *testing.T) { } cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", exe) out, err = cmd.CombinedOutput() - if err != nil && !errors.As(err, new(*exec.ExitError)) { - // Error exit is fine as it may have no symbols. - // On darwin we need to emit dynamic symbol references so it - // actually has some symbols, and nm succeeds. - t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out) + if err != nil { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { + // Error exit is fine as it may have no symbols. + // On darwin we need to emit dynamic symbol references so it + // actually has some symbols, and nm succeeds. + t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out) + } } for _, s := range syms { if bytes.Contains(out, []byte(s)) { diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go index 09dc9ea94c939f..2de120a1329f87 100644 --- a/src/crypto/tls/conn.go +++ b/src/crypto/tls/conn.go @@ -1578,9 +1578,9 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) { // the handshake (RFC 9001, Section 5.7). c.quicSetReadSecret(QUICEncryptionLevelApplication, c.cipherSuite, c.in.trafficSecret) } else { - var a alert c.out.Lock() - if !errors.As(c.out.err, &a) { + a, ok := errors.AsType[alert](c.out.err) + if !ok { a = alertInternalError } c.out.Unlock() diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go index 088c66fadb2a44..a2cf176a86c0d8 100644 --- a/src/crypto/tls/handshake_server.go +++ b/src/crypto/tls/handshake_server.go @@ -965,10 +965,9 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error { chains, err := certs[0].Verify(opts) if err != nil { - var errCertificateInvalid x509.CertificateInvalidError - if errors.As(err, &x509.UnknownAuthorityError{}) { + if _, ok := errors.AsType[x509.UnknownAuthorityError](err); ok { c.sendAlert(alertUnknownCA) - } else if errors.As(err, &errCertificateInvalid) && errCertificateInvalid.Reason == x509.Expired { + } else if errCertificateInvalid, ok := errors.AsType[x509.CertificateInvalidError](err); ok && errCertificateInvalid.Reason == x509.Expired { c.sendAlert(alertCertificateExpired) } else { c.sendAlert(alertBadCertificate) diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go index 941f2a3373feb9..43183db2a19770 100644 --- a/src/crypto/tls/handshake_server_test.go +++ b/src/crypto/tls/handshake_server_test.go @@ -403,8 +403,7 @@ func TestAlertForwarding(t *testing.T) { err := Server(s, testConfig).Handshake() s.Close() - var opErr *net.OpError - if !errors.As(err, &opErr) || opErr.Err != error(alertUnknownCA) { + if opErr, ok := errors.AsType[*net.OpError](err); !ok || opErr.Err != error(alertUnknownCA) { t.Errorf("Got error: %s; expected: %s", err, error(alertUnknownCA)) } } diff --git a/src/crypto/tls/quic.go b/src/crypto/tls/quic.go index 3be479eb12f085..2ba2242b2d93d2 100644 --- a/src/crypto/tls/quic.go +++ b/src/crypto/tls/quic.go @@ -362,12 +362,11 @@ func quicError(err error) error { if err == nil { return nil } - var ae AlertError - if errors.As(err, &ae) { + if _, ok := errors.AsType[AlertError](err); ok { return err } - var a alert - if !errors.As(err, &a) { + a, ok := errors.AsType[alert](err) + if !ok { a = alertInternalError } // Return an error wrapping the original error and an AlertError. diff --git a/src/crypto/tls/quic_test.go b/src/crypto/tls/quic_test.go index f6e8c55d9d63e4..5f4b2b7707d01e 100644 --- a/src/crypto/tls/quic_test.go +++ b/src/crypto/tls/quic_test.go @@ -368,8 +368,7 @@ func TestQUICHandshakeError(t *testing.T) { if !errors.Is(err, AlertError(alertBadCertificate)) { t.Errorf("connection handshake terminated with error %q, want alertBadCertificate", err) } - var e *CertificateVerificationError - if !errors.As(err, &e) { + if _, ok := errors.AsType[*CertificateVerificationError](err); !ok { t.Errorf("connection handshake terminated with error %q, want CertificateVerificationError", err) } } diff --git a/src/debug/buildinfo/buildinfo.go b/src/debug/buildinfo/buildinfo.go index 12e3b750d233a7..d202d5050a2786 100644 --- a/src/debug/buildinfo/buildinfo.go +++ b/src/debug/buildinfo/buildinfo.go @@ -67,7 +67,7 @@ const ( // with module support. func ReadFile(name string) (info *BuildInfo, err error) { defer func() { - if pathErr := (*fs.PathError)(nil); errors.As(err, &pathErr) { + if _, ok := errors.AsType[*fs.PathError](err); ok { err = fmt.Errorf("could not read Go build info: %w", err) } else if err != nil { err = fmt.Errorf("could not read Go build info from %s: %w", name, err) diff --git a/src/encoding/json/jsontext/coder_test.go b/src/encoding/json/jsontext/coder_test.go index 4a9efb3b8f97a5..8602e3e7fff286 100644 --- a/src/encoding/json/jsontext/coder_test.go +++ b/src/encoding/json/jsontext/coder_test.go @@ -486,7 +486,7 @@ func testCoderInterleaved(t *testing.T, where jsontest.CasePos, modeName string, // Retry as a ReadToken call. expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']' if expectError { - if !errors.As(err, new(*SyntacticError)) { + if _, ok := errors.AsType[*SyntacticError](err); !ok { t.Fatalf("%s: Decoder.ReadToken error is %T, want %T", where, err, new(SyntacticError)) } tickTock = !tickTock diff --git a/src/encoding/json/jsontext/fuzz_test.go b/src/encoding/json/jsontext/fuzz_test.go index 60d16b9e27805c..3ad181d43416b8 100644 --- a/src/encoding/json/jsontext/fuzz_test.go +++ b/src/encoding/json/jsontext/fuzz_test.go @@ -53,9 +53,10 @@ func FuzzCoder(f *testing.F) { } else { val, err := dec.ReadValue() if err != nil { - expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']' - if expectError && errors.As(err, new(*SyntacticError)) { - continue + if expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']'; expectError { + if _, ok := errors.AsType[*SyntacticError](err); ok { + continue + } } if err == io.EOF { break diff --git a/src/encoding/json/jsontext/state.go b/src/encoding/json/jsontext/state.go index d214fd5190325e..538dfe32bfa9d2 100644 --- a/src/encoding/json/jsontext/state.go +++ b/src/encoding/json/jsontext/state.go @@ -24,8 +24,7 @@ import ( // The name of a duplicate JSON object member can be extracted as: // // err := ... -// var serr jsontext.SyntacticError -// if errors.As(err, &serr) && serr.Err == jsontext.ErrDuplicateName { +// if serr, ok := errors.AsType[jsontext.SyntacticError](err); ok && serr.Err == jsontext.ErrDuplicateName { // ptr := serr.JSONPointer // JSON pointer to duplicate name // name := ptr.LastToken() // duplicate name itself // ... diff --git a/src/encoding/json/v2/errors.go b/src/encoding/json/v2/errors.go index 9485d7b527793c..0f50d608c9ed27 100644 --- a/src/encoding/json/v2/errors.go +++ b/src/encoding/json/v2/errors.go @@ -28,8 +28,7 @@ import ( // The name of an unknown JSON object member can be extracted as: // // err := ... -// var serr json.SemanticError -// if errors.As(err, &serr) && serr.Err == json.ErrUnknownName { +// if serr, ok := errors.AsType[json.SemanticError](err); ok && serr.Err == json.ErrUnknownName { // ptr := serr.JSONPointer // JSON pointer to unknown name // name := ptr.LastToken() // unknown name itself // ... diff --git a/src/encoding/json/v2/example_test.go b/src/encoding/json/v2/example_test.go index c6bf0a864d8385..6d539bbd36b529 100644 --- a/src/encoding/json/v2/example_test.go +++ b/src/encoding/json/v2/example_test.go @@ -371,8 +371,7 @@ func Example_unknownMembers() { // Specifying RejectUnknownMembers causes Unmarshal // to reject the presence of any unknown members. err = json.Unmarshal([]byte(input), new(Color), json.RejectUnknownMembers(true)) - var serr *json.SemanticError - if errors.As(err, &serr) && serr.Err == json.ErrUnknownName { + if serr, ok := errors.AsType[*json.SemanticError](err); ok && serr.Err == json.ErrUnknownName { fmt.Println("Unmarshal error:", serr.Err, strconv.Quote(serr.JSONPointer.LastToken())) } diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index f7a98ae28061bb..2798b9e0c4eb47 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -2480,8 +2480,8 @@ func TestInstantiateErrors(t *testing.T) { t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs) } - var argErr *ArgumentError - if !errors.As(err, &argErr) { + argErr, ok := errors.AsType[*ArgumentError](err) + if !ok { t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs) } @@ -2496,8 +2496,8 @@ func TestArgumentErrorUnwrapping(t *testing.T) { Index: 1, Err: Error{Msg: "test"}, } - var e Error - if !errors.As(err, &e) { + e, ok := errors.AsType[Error](err) + if !ok { t.Fatalf("error %v does not wrap types.Error", err) } if e.Msg != "test" { diff --git a/src/internal/runtime/wasitest/testdata/tcpecho.go b/src/internal/runtime/wasitest/testdata/tcpecho.go index 819e3526885642..6da56acba10a1c 100644 --- a/src/internal/runtime/wasitest/testdata/tcpecho.go +++ b/src/internal/runtime/wasitest/testdata/tcpecho.go @@ -62,8 +62,7 @@ func findListener() (net.Listener, error) { l, err := net.FileListener(f) f.Close() - var se syscall.Errno - switch errors.As(err, &se); se { + switch se, _ := errors.AsType[syscall.Errno](err); se { case syscall.ENOTSOCK: continue case syscall.EBADF: diff --git a/src/internal/testenv/testenv_unix.go b/src/internal/testenv/testenv_unix.go index a629078842eadc..22eeca220da017 100644 --- a/src/internal/testenv/testenv_unix.go +++ b/src/internal/testenv/testenv_unix.go @@ -21,8 +21,7 @@ func syscallIsNotSupported(err error) bool { return false } - var errno syscall.Errno - if errors.As(err, &errno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok { switch errno { case syscall.EPERM, syscall.EROFS: // User lacks permission: either the call requires root permission and the diff --git a/src/io/fs/readdir_test.go b/src/io/fs/readdir_test.go index b89706b893afd2..b729bf27ac40ac 100644 --- a/src/io/fs/readdir_test.go +++ b/src/io/fs/readdir_test.go @@ -93,8 +93,8 @@ func TestFileInfoToDirEntry(t *testing.T) { } func errorPath(err error) string { - var perr *PathError - if !errors.As(err, &perr) { + perr, ok := errors.AsType[*PathError](err) + if !ok { return "" } return perr.Path diff --git a/src/log/log_test.go b/src/log/log_test.go index 8cc05c5e647f95..5d5d38cc10dbae 100644 --- a/src/log/log_test.go +++ b/src/log/log_test.go @@ -272,8 +272,7 @@ func TestCallDepth(t *testing.T) { cmd.Env = append(cmd.Environ(), envVar+"=1") out, err := cmd.CombinedOutput() - var exitErr *exec.ExitError - if !errors.As(err, &exitErr) { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { t.Fatalf("expected exec.ExitError: %v", err) } diff --git a/src/log/slog/logger_test.go b/src/log/slog/logger_test.go index bf645d9c4c6310..edacef13a4d518 100644 --- a/src/log/slog/logger_test.go +++ b/src/log/slog/logger_test.go @@ -303,8 +303,7 @@ func TestCallDepthConnection(t *testing.T) { cmd.Env = append(cmd.Environ(), envVar+"=1") out, err := cmd.CombinedOutput() - var exitErr *exec.ExitError - if !errors.As(err, &exitErr) { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { t.Fatalf("expected exec.ExitError: %v", err) } diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go index 5e060a6b489bf9..940fcccf7f2139 100644 --- a/src/net/dnsclient_unix.go +++ b/src/net/dnsclient_unix.go @@ -842,8 +842,7 @@ func (r *Resolver) goLookupPTR(ctx context.Context, addr string, order hostLooku } p, server, err := r.lookup(ctx, arpa, dnsmessage.TypePTR, conf) if err != nil { - var dnsErr *DNSError - if errors.As(err, &dnsErr) && dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); ok && dnsErr.IsNotFound { if order == hostLookupDNSFiles { names := lookupStaticAddr(addr) if len(names) > 0 { diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go index 826b4daba1e7fd..fc1d40f18b6f9a 100644 --- a/src/net/dnsclient_unix_test.go +++ b/src/net/dnsclient_unix_test.go @@ -2627,8 +2627,7 @@ func TestLongDNSNames(t *testing.T) { } expectedErr := DNSError{Err: errNoSuchHost.Error(), Name: v.req, IsNotFound: true} - var dnsErr *DNSError - errors.As(err, &dnsErr) + dnsErr, _ := errors.AsType[*DNSError](err) if dnsErr == nil || *dnsErr != expectedErr { t.Errorf("%v: Lookup%v: unexpected error: %v", i, testName, err) } @@ -2820,8 +2819,7 @@ func TestLookupOrderFilesNoSuchHost(t *testing.T) { } expectedErr := DNSError{Err: errNoSuchHost.Error(), Name: testName, IsNotFound: true} - var dnsErr *DNSError - errors.As(err, &dnsErr) + dnsErr, _ := errors.AsType[*DNSError](err) if dnsErr == nil || *dnsErr != expectedErr { t.Errorf("Lookup%v: unexpected error: %v", v.name, err) } @@ -2853,8 +2851,7 @@ func TestExtendedRCode(t *testing.T) { r := &Resolver{PreferGo: true, Dial: fake.DialContext} _, _, err := r.tryOneName(context.Background(), getSystemDNSConfig(), "go.dev.", dnsmessage.TypeA) - var dnsErr *DNSError - if !(errors.As(err, &dnsErr) && dnsErr.Err == errServerMisbehaving.Error()) { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || dnsErr.Err != errServerMisbehaving.Error() { t.Fatalf("r.tryOneName(): unexpected error: %v", err) } } diff --git a/src/net/http/h2_error_test.go b/src/net/http/h2_error_test.go index 5e400683b415e7..e71825451a8e32 100644 --- a/src/net/http/h2_error_test.go +++ b/src/net/http/h2_error_test.go @@ -25,19 +25,18 @@ func (e externalStreamError) Error() string { } func TestStreamError(t *testing.T) { - var target externalStreamError streamErr := http2streamError(42, http2ErrCodeProtocol) - ok := errors.As(streamErr, &target) + extStreamErr, ok := errors.AsType[externalStreamError](streamErr) if !ok { - t.Fatalf("errors.As failed") + t.Fatalf("errors.AsType failed") } - if target.StreamID != streamErr.StreamID { - t.Errorf("got StreamID %v, expected %v", target.StreamID, streamErr.StreamID) + if extStreamErr.StreamID != streamErr.StreamID { + t.Errorf("got StreamID %v, expected %v", extStreamErr.StreamID, streamErr.StreamID) } - if target.Cause != streamErr.Cause { - t.Errorf("got Cause %v, expected %v", target.Cause, streamErr.Cause) + if extStreamErr.Cause != streamErr.Cause { + t.Errorf("got Cause %v, expected %v", extStreamErr.Cause, streamErr.Cause) } - if uint32(target.Code) != uint32(streamErr.Code) { - t.Errorf("got Code %v, expected %v", target.Code, streamErr.Code) + if uint32(extStreamErr.Code) != uint32(streamErr.Code) { + t.Errorf("got Code %v, expected %v", extStreamErr.Code, streamErr.Code) } } diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go index 514cbd098ae772..2a774100a8ec67 100644 --- a/src/net/lookup_test.go +++ b/src/net/lookup_test.go @@ -1420,8 +1420,8 @@ func testLookupNoData(t *testing.T, prefix string) { return } - var dnsErr *DNSError - if errors.As(err, &dnsErr) { + dnsErr, ok := errors.AsType[*DNSError](err) + if ok { succeeded := true if !dnsErr.IsNotFound { succeeded = false @@ -1455,8 +1455,7 @@ func testLookupNoData(t *testing.T, prefix string) { func TestLookupPortNotFound(t *testing.T) { allResolvers(t, func(t *testing.T) { _, err := LookupPort("udp", "_-unknown-service-") - var dnsErr *DNSError - if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || !dnsErr.IsNotFound { t.Fatalf("unexpected error: %v", err) } }) @@ -1475,8 +1474,7 @@ var tcpOnlyService = func() string { func TestLookupPortDifferentNetwork(t *testing.T) { allResolvers(t, func(t *testing.T) { _, err := LookupPort("udp", tcpOnlyService) - var dnsErr *DNSError - if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || !dnsErr.IsNotFound { t.Fatalf("unexpected error: %v", err) } }) diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go index 3bded3dea604fb..1decebdc222d23 100644 --- a/src/os/exec/exec_test.go +++ b/src/os/exec/exec_test.go @@ -1378,8 +1378,8 @@ func TestWaitInterrupt(t *testing.T) { // The child process should be reported as failed, // and the grandchild will exit (or die by SIGPIPE) once the // stderr pipe is closed. - if ee := new(*exec.ExitError); !errors.As(err, ee) { - t.Errorf("Wait error = %v; want %T", err, *ee) + if ee, ok := errors.AsType[*exec.ExitError](err); !ok { + t.Errorf("Wait error = %v; want %T", err, ee) } }) @@ -1423,8 +1423,8 @@ func TestWaitInterrupt(t *testing.T) { // This command ignores SIGINT, sleeping until it is killed. // Wait should return the usual error for a killed process. - if ee := new(*exec.ExitError); !errors.As(err, ee) { - t.Errorf("Wait error = %v; want %T", err, *ee) + if ee, ok := errors.AsType[*exec.ExitError](err); !ok { + t.Errorf("Wait error = %v; want %T", err, ee) } }) @@ -1471,7 +1471,7 @@ func TestWaitInterrupt(t *testing.T) { t.Logf("stderr:\n%s", cmd.Stderr) t.Logf("[%d] %v", cmd.Process.Pid, err) - if ee := new(*exec.ExitError); !errors.As(err, ee) { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { t.Errorf("Wait error = %v; want %v", err, ctx.Err()) } diff --git a/src/os/os_test.go b/src/os/os_test.go index 9f6eb13e1f96a9..536734901baff6 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -840,8 +840,7 @@ func TestReaddirOfFile(t *testing.T) { if err == nil { t.Error("Readdirnames succeeded; want non-nil error") } - var pe *PathError - if !errors.As(err, &pe) || pe.Path != f.Name() { + if pe, ok := errors.AsType[*PathError](err); !ok || pe.Path != f.Name() { t.Errorf("Readdirnames returned %q; want a PathError with path %q", err, f.Name()) } if len(names) > 0 { diff --git a/src/runtime/pprof/proto_windows.go b/src/runtime/pprof/proto_windows.go index f4dc44bd078eac..3118e8911e28ec 100644 --- a/src/runtime/pprof/proto_windows.go +++ b/src/runtime/pprof/proto_windows.go @@ -67,8 +67,7 @@ func readMainModuleMapping() (start, end uint64, exe, buildID string, err error) func createModuleSnapshot() (syscall.Handle, error) { for { snap, err := syscall.CreateToolhelp32Snapshot(windows.TH32CS_SNAPMODULE|windows.TH32CS_SNAPMODULE32, uint32(syscall.Getpid())) - var errno syscall.Errno - if err != nil && errors.As(err, &errno) && errno == windows.ERROR_BAD_LENGTH { + if errno, ok := errors.AsType[syscall.Errno](err); ok && errno == windows.ERROR_BAD_LENGTH { // When CreateToolhelp32Snapshot(SNAPMODULE|SNAPMODULE32, ...) fails // with ERROR_BAD_LENGTH then it should be retried until it succeeds. continue diff --git a/src/testing/fstest/testfs.go b/src/testing/fstest/testfs.go index 1fb84b892842cb..72830a09a727bd 100644 --- a/src/testing/fstest/testfs.go +++ b/src/testing/fstest/testfs.go @@ -29,7 +29,7 @@ import ( // The contents of fsys must not change concurrently with TestFS. // // If TestFS finds any misbehaviors, it returns either the first error or a -// list of errors. Use [errors.Is] or [errors.As] to inspect. +// list of errors. Use [errors.Is] or [errors.AsType] to inspect. // // Typical usage inside a test is: // diff --git a/src/testing/fstest/testfs_test.go b/src/testing/fstest/testfs_test.go index d6d6d89b89fdaa..e3d7f1ab44d7b7 100644 --- a/src/testing/fstest/testfs_test.go +++ b/src/testing/fstest/testfs_test.go @@ -105,8 +105,12 @@ func TestTestFSWrappedErrors(t *testing.T) { // TestFS is expected to return a list of errors. // Enforce that the list can be extracted for browsing. - var errs interface{ Unwrap() []error } - if !errors.As(err, &errs) { + type wrapper interface{ + error + Unwrap() []error + } + errs, ok := errors.AsType[wrapper](err) + if !ok { t.Errorf("caller should be able to extract the errors as a list: %#v", err) } else { for _, err := range errs.Unwrap() { diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go index 65440901a0b4ec..8665f3ad4987c2 100644 --- a/src/text/template/exec_test.go +++ b/src/text/template/exec_test.go @@ -1015,8 +1015,7 @@ func TestExecError_CustomError(t *testing.T) { var b bytes.Buffer err := tmpl.Execute(&b, nil) - var e *CustomError - if !errors.As(err, &e) { + if _, ok := errors.AsType[*CustomError](err); !ok { t.Fatalf("expected custom error; got %s", err) } } diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go index c28c3ea2002d21..30b3243a5a8416 100644 --- a/src/text/template/funcs.go +++ b/src/text/template/funcs.go @@ -22,7 +22,7 @@ import ( // return value evaluates to non-nil during execution, execution terminates and // Execute returns that error. // -// Errors returned by Execute wrap the underlying error; call [errors.As] to +// Errors returned by Execute wrap the underlying error; call [errors.AsType] to // unwrap them. // // When template execution invokes a function with an argument list, that list From 0e64ee1286c092eca95b3ffcc5917d34f43d4c0f Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Fri, 10 Oct 2025 17:56:04 -0700 Subject: [PATCH 128/152] encoding/json/v2: report EOF for top-level values in UnmarshalDecode The fully streaming UnmarshalJSONFrom method and UnmarshalFromFunc introduce an edge case where they can encounter EOF in the stream, where it should be reported upstream as EOF rather than ErrUnexpectedEOF or be wrapped within a SemanticError. This is not possible with other unmarshal methods since the "json" package would read the appropriate JSON value before calling the custom method or function. To avoid custom unmarshal methods from encountering EOF, check whether the stream is already at EOF for top-level values before calling the custom method. Also, when wrapping EOF within a SemanticError, convert it to ErrUnexpectedEOF to better indicate that this is unexpected. Fixes #75802 Change-Id: I001396734b7e95b5337f77b71326284974ee730a Reviewed-on: https://go-review.googlesource.com/c/go/+/710877 LUCI-TryBot-Result: Go LUCI Reviewed-by: Johan Brandhorst-Satzkorn Reviewed-by: Michael Pratt Reviewed-by: Carlos Amedee --- src/encoding/json/jsontext/decode.go | 6 ++++ src/encoding/json/v2/arshal.go | 3 +- src/encoding/json/v2/arshal_funcs.go | 4 +++ src/encoding/json/v2/arshal_methods.go | 4 +++ src/encoding/json/v2/arshal_test.go | 40 +++++++++++++++++++++++++- src/encoding/json/v2/errors.go | 16 +++++++++-- 6 files changed, 68 insertions(+), 5 deletions(-) diff --git a/src/encoding/json/jsontext/decode.go b/src/encoding/json/jsontext/decode.go index f505de44684a51..511832f2ae0879 100644 --- a/src/encoding/json/jsontext/decode.go +++ b/src/encoding/json/jsontext/decode.go @@ -792,6 +792,12 @@ func (d *decoderState) CheckNextValue(last bool) error { return nil } +// AtEOF reports whether the decoder is at EOF. +func (d *decoderState) AtEOF() bool { + _, err := d.consumeWhitespace(d.prevEnd) + return err == io.ErrUnexpectedEOF +} + // CheckEOF verifies that the input has no more data. func (d *decoderState) CheckEOF() error { return d.checkEOF(d.prevEnd) diff --git a/src/encoding/json/v2/arshal.go b/src/encoding/json/v2/arshal.go index 6b4bcb0c74cf7c..573d26567f3368 100644 --- a/src/encoding/json/v2/arshal.go +++ b/src/encoding/json/v2/arshal.go @@ -440,8 +440,9 @@ func UnmarshalRead(in io.Reader, out any, opts ...Options) (err error) { // Unlike [Unmarshal] and [UnmarshalRead], decode options are ignored because // they must have already been specified on the provided [jsontext.Decoder]. // -// The input may be a stream of one or more JSON values, +// The input may be a stream of zero or more JSON values, // where this only unmarshals the next JSON value in the stream. +// If there are no more top-level JSON values, it reports [io.EOF]. // The output must be a non-nil pointer. // See [Unmarshal] for details about the conversion of JSON into a Go value. func UnmarshalDecode(in *jsontext.Decoder, out any, opts ...Options) (err error) { diff --git a/src/encoding/json/v2/arshal_funcs.go b/src/encoding/json/v2/arshal_funcs.go index 673caf3c37693d..28916af948db6e 100644 --- a/src/encoding/json/v2/arshal_funcs.go +++ b/src/encoding/json/v2/arshal_funcs.go @@ -9,6 +9,7 @@ package json import ( "errors" "fmt" + "io" "reflect" "sync" @@ -306,6 +307,9 @@ func UnmarshalFromFunc[T any](fn func(*jsontext.Decoder, T) error) *Unmarshalers fnc: func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error { xd := export.Decoder(dec) prevDepth, prevLength := xd.Tokens.DepthLength() + if prevDepth == 1 && xd.AtEOF() { + return io.EOF // check EOF early to avoid fn reporting an EOF + } xd.Flags.Set(jsonflags.WithinArshalCall | 1) v, _ := reflect.TypeAssert[T](va.castTo(t)) err := fn(dec, v) diff --git a/src/encoding/json/v2/arshal_methods.go b/src/encoding/json/v2/arshal_methods.go index 2decd144dbeaf9..1621eadc080763 100644 --- a/src/encoding/json/v2/arshal_methods.go +++ b/src/encoding/json/v2/arshal_methods.go @@ -9,6 +9,7 @@ package json import ( "encoding" "errors" + "io" "reflect" "encoding/json/internal" @@ -302,6 +303,9 @@ func makeMethodArshaler(fncs *arshaler, t reflect.Type) *arshaler { } xd := export.Decoder(dec) prevDepth, prevLength := xd.Tokens.DepthLength() + if prevDepth == 1 && xd.AtEOF() { + return io.EOF // check EOF early to avoid fn reporting an EOF + } xd.Flags.Set(jsonflags.WithinArshalCall | 1) unmarshaler, _ := reflect.TypeAssert[UnmarshalerFrom](va.Addr()) err := unmarshaler.UnmarshalJSONFrom(dec) diff --git a/src/encoding/json/v2/arshal_test.go b/src/encoding/json/v2/arshal_test.go index 75093345a3b93e..dc15c5a5f531b5 100644 --- a/src/encoding/json/v2/arshal_test.go +++ b/src/encoding/json/v2/arshal_test.go @@ -7833,7 +7833,8 @@ func TestUnmarshal(t *testing.T) { })), wantErr: EU(errSomeError).withType(0, T[unmarshalJSONv2Func]()), }, { - name: jsontest.Name("Methods/Invalid/JSONv2/TooFew"), + name: jsontest.Name("Methods/Invalid/JSONv2/TooFew"), + inBuf: `{}`, inVal: addr(unmarshalJSONv2Func(func(*jsontext.Decoder) error { return nil // do nothing })), @@ -9234,6 +9235,43 @@ func TestUnmarshalReuse(t *testing.T) { }) } +type unmarshalerEOF struct{} + +func (unmarshalerEOF) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + return io.EOF // should be wrapped and converted by Unmarshal to io.ErrUnexpectedEOF +} + +// TestUnmarshalEOF verifies that io.EOF is only ever returned by +// UnmarshalDecode for a top-level value. +func TestUnmarshalEOF(t *testing.T) { + opts := WithUnmarshalers(UnmarshalFromFunc(func(dec *jsontext.Decoder, _ *struct{}) error { + return io.EOF // should be wrapped and converted by Unmarshal to io.ErrUnexpectedEOF + })) + + for _, in := range []string{"", "[", "[null", "[null]"} { + for _, newOut := range []func() any{ + func() any { return new(unmarshalerEOF) }, + func() any { return new([]unmarshalerEOF) }, + func() any { return new(struct{}) }, + func() any { return new([]struct{}) }, + } { + wantErr := io.ErrUnexpectedEOF + if gotErr := Unmarshal([]byte(in), newOut(), opts); !errors.Is(gotErr, wantErr) { + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + } + if gotErr := UnmarshalRead(strings.NewReader(in), newOut(), opts); !errors.Is(gotErr, wantErr) { + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + } + switch gotErr := UnmarshalDecode(jsontext.NewDecoder(strings.NewReader(in)), newOut(), opts); { + case in != "" && !errors.Is(gotErr, wantErr): + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + case in == "" && gotErr != io.EOF: + t.Errorf("Unmarshal = %v, want %v", gotErr, io.EOF) + } + } + } +} + type ReaderFunc func([]byte) (int, error) func (f ReaderFunc) Read(b []byte) (int, error) { return f(b) } diff --git a/src/encoding/json/v2/errors.go b/src/encoding/json/v2/errors.go index 0f50d608c9ed27..4421f8b821cb5c 100644 --- a/src/encoding/json/v2/errors.go +++ b/src/encoding/json/v2/errors.go @@ -10,6 +10,7 @@ import ( "cmp" "errors" "fmt" + "io" "reflect" "strconv" "strings" @@ -118,7 +119,7 @@ func newInvalidFormatError(c coder, t reflect.Type) error { // newMarshalErrorBefore wraps err in a SemanticError assuming that e // is positioned right before the next token or value, which causes an error. func newMarshalErrorBefore(e *jsontext.Encoder, t reflect.Type, err error) error { - return &SemanticError{action: "marshal", GoType: t, Err: err, + return &SemanticError{action: "marshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: e.OutputOffset() + int64(export.Encoder(e).CountNextDelimWhitespace()), JSONPointer: jsontext.Pointer(export.Encoder(e).AppendStackPointer(nil, +1))} } @@ -134,7 +135,7 @@ func newUnmarshalErrorBefore(d *jsontext.Decoder, t reflect.Type, err error) err if export.Decoder(d).Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) { k = d.PeekKind() } - return &SemanticError{action: "unmarshal", GoType: t, Err: err, + return &SemanticError{action: "unmarshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: d.InputOffset() + int64(export.Decoder(d).CountNextDelimWhitespace()), JSONPointer: jsontext.Pointer(export.Decoder(d).AppendStackPointer(nil, +1)), JSONKind: k} @@ -157,7 +158,7 @@ func newUnmarshalErrorBeforeWithSkipping(d *jsontext.Decoder, t reflect.Type, er // is positioned right after the previous token or value, which caused an error. func newUnmarshalErrorAfter(d *jsontext.Decoder, t reflect.Type, err error) error { tokOrVal := export.Decoder(d).PreviousTokenOrValue() - return &SemanticError{action: "unmarshal", GoType: t, Err: err, + return &SemanticError{action: "unmarshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: d.InputOffset() - int64(len(tokOrVal)), JSONPointer: jsontext.Pointer(export.Decoder(d).AppendStackPointer(nil, -1)), JSONKind: jsontext.Value(tokOrVal).Kind()} @@ -206,6 +207,7 @@ func newSemanticErrorWithPosition(c coder, t reflect.Type, prevDepth int, prevLe if serr == nil { serr = &SemanticError{Err: err} } + serr.Err = toUnexpectedEOF(serr.Err) var currDepth int var currLength int64 var coderState interface{ AppendStackPointer([]byte, int) []byte } @@ -432,3 +434,11 @@ func newDuplicateNameError(ptr jsontext.Pointer, quotedName []byte, offset int64 Err: jsontext.ErrDuplicateName, } } + +// toUnexpectedEOF converts [io.EOF] to [io.ErrUnexpectedEOF]. +func toUnexpectedEOF(err error) error { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err +} From 9b8742f2e79438b9442afa4c0a0139d3937ea33f Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 13 Oct 2025 09:39:06 -0700 Subject: [PATCH 129/152] cmd/compile: don't depend on arch-dependent conversions in the compiler Leave those constant foldings for runtime, similar to how we do it for NaN generation. These are the only instances I could find in cmd/compile/..., using objdump -d ../pkg/tool/darwin_arm64/compile| egrep "(fcvtz|>:)" | grep -B1 fcvt (There are instances in other places, like runtime and reflect, but I don't think those places would affect compiler output.) Change-Id: I4113fe4570115e4765825cf442cb1fde97cf2f27 Reviewed-on: https://go-review.googlesource.com/c/go/+/711281 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase Reviewed-by: Keith Randall --- .../compile/internal/ssa/_gen/generic.rules | 8 ++-- .../compile/internal/ssa/rewritegeneric.go | 16 +++++++ test/codegen/math.go | 45 +++++++++++++++++++ 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 23ce21a8b2cc28..af9c24f53fd245 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -50,10 +50,10 @@ (Cvt32to64F (Const32 [c])) => (Const64F [float64(c)]) (Cvt64to32F (Const64 [c])) => (Const32F [float32(c)]) (Cvt64to64F (Const64 [c])) => (Const64F [float64(c)]) -(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)]) -(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)]) -(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)]) -(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)]) +(Cvt32Fto32 (Const32F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)]) +(Cvt32Fto64 (Const32F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)]) +(Cvt64Fto32 (Const64F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)]) +(Cvt64Fto64 (Const64F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)]) (Round32F x:(Const32F)) => x (Round64F x:(Const64F)) => x (CvtBoolToUint8 (ConstBool [false])) => (Const8 [0]) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index c36ecc1cc60fcc..79c444a86b29df 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -6607,12 +6607,16 @@ func rewriteValuegeneric_OpCtz8(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { v_0 := v.Args[0] // match: (Cvt32Fto32 (Const32F [c])) + // cond: c >= -1<<31 && c < 1<<31 // result: (Const32 [int32(c)]) for { if v_0.Op != OpConst32F { break } c := auxIntToFloat32(v_0.AuxInt) + if !(c >= -1<<31 && c < 1<<31) { + break + } v.reset(OpConst32) v.AuxInt = int32ToAuxInt(int32(c)) return true @@ -6622,12 +6626,16 @@ func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool { v_0 := v.Args[0] // match: (Cvt32Fto64 (Const32F [c])) + // cond: c >= -1<<63 && c < 1<<63 // result: (Const64 [int64(c)]) for { if v_0.Op != OpConst32F { break } c := auxIntToFloat32(v_0.AuxInt) + if !(c >= -1<<63 && c < 1<<63) { + break + } v.reset(OpConst64) v.AuxInt = int64ToAuxInt(int64(c)) return true @@ -6682,12 +6690,16 @@ func rewriteValuegeneric_OpCvt32to64F(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool { v_0 := v.Args[0] // match: (Cvt64Fto32 (Const64F [c])) + // cond: c >= -1<<31 && c < 1<<31 // result: (Const32 [int32(c)]) for { if v_0.Op != OpConst64F { break } c := auxIntToFloat64(v_0.AuxInt) + if !(c >= -1<<31 && c < 1<<31) { + break + } v.reset(OpConst32) v.AuxInt = int32ToAuxInt(int32(c)) return true @@ -6732,12 +6744,16 @@ func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool { v_0 := v.Args[0] // match: (Cvt64Fto64 (Const64F [c])) + // cond: c >= -1<<63 && c < 1<<63 // result: (Const64 [int64(c)]) for { if v_0.Op != OpConst64F { break } c := auxIntToFloat64(v_0.AuxInt) + if !(c >= -1<<63 && c < 1<<63) { + break + } v.reset(OpConst64) v.AuxInt = int64ToAuxInt(int64(c)) return true diff --git a/test/codegen/math.go b/test/codegen/math.go index eadf9d7d0554db..5787657d2bc3df 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -330,3 +330,48 @@ func nanGenerate32() float32 { // amd64/v3:"VFMADD231SS" return z0 + z1 } + +func outOfBoundsConv(i32 *[2]int32, u32 *[2]uint32, i64 *[2]int64, u64 *[2]uint64) { + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + i32[0] = int32(two40()) + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + i32[1] = int32(-two40()) + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + u32[0] = uint32(two41()) + // on arm64, this uses an explicit <0 comparison, so it constant folds. + // on amd64, this uses an explicit <0 comparison, so it constant folds. + // amd64: "MOVL\t[$]0," + u32[1] = uint32(minus1()) + // arm64: "FCVTZSD" + // amd64: "CVTTSD2SQ" + i64[0] = int64(two80()) + // arm64: "FCVTZSD" + // amd64: "CVTTSD2SQ" + i64[1] = int64(-two80()) + // arm64: "FCVTZUD" + // amd64: "CVTTSD2SQ" + u64[0] = uint64(two81()) + // arm64: "FCVTZUD" + // on amd64, this uses an explicit <0 comparison, so it constant folds. + // amd64: "MOVQ\t[$]0," + u64[1] = uint64(minus1()) +} + +func two40() float64 { + return 1 << 40 +} +func two41() float64 { + return 1 << 41 +} +func two80() float64 { + return 1 << 80 +} +func two81() float64 { + return 1 << 81 +} +func minus1() float64 { + return -1 +} From 9fdd6904da3d6ef2ed457fada1fb26130213f359 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Thu, 9 Oct 2025 15:02:23 -0400 Subject: [PATCH 130/152] strconv: add tests that Java once mishandled Change-Id: I372233d8494665b3300f9a186c883a4254435e1c Reviewed-on: https://go-review.googlesource.com/c/go/+/710617 LUCI-TryBot-Result: Go LUCI Reviewed-by: David Chase --- src/strconv/ftoa_test.go | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/strconv/ftoa_test.go b/src/strconv/ftoa_test.go index 3512ccf58070ec..40faa433a64cf8 100644 --- a/src/strconv/ftoa_test.go +++ b/src/strconv/ftoa_test.go @@ -172,6 +172,11 @@ var ftoatests = []ftoaTest{ {3.999969482421875, 'x', 2, "0x1.00p+02"}, {3.999969482421875, 'x', 1, "0x1.0p+02"}, {3.999969482421875, 'x', 0, "0x1p+02"}, + + // Cases that Java once mishandled, from David Chase. + {1.801439850948199e+16, 'g', -1, "1.801439850948199e+16"}, + {5.960464477539063e-08, 'g', -1, "5.960464477539063e-08"}, + {1.012e-320, 'g', -1, "1.012e-320"}, } func TestFtoa(t *testing.T) { @@ -186,13 +191,20 @@ func TestFtoa(t *testing.T) { t.Error("AppendFloat testN=64", test.f, string(test.fmt), test.prec, "want", "abc"+test.s, "got", string(x)) } if float64(float32(test.f)) == test.f && test.fmt != 'b' { + test_s := test.s + if test.f == 5.960464477539063e-08 { + // This test is an exact float32 but asking for float64 precision in the string. + // (All our other float64-only tests fail to exactness check above.) + test_s = "5.9604645e-08" + continue + } s := FormatFloat(test.f, test.fmt, test.prec, 32) if s != test.s { - t.Error("testN=32", test.f, string(test.fmt), test.prec, "want", test.s, "got", s) + t.Error("testN=32", test.f, string(test.fmt), test.prec, "want", test_s, "got", s) } x := AppendFloat([]byte("abc"), test.f, test.fmt, test.prec, 32) - if string(x) != "abc"+test.s { - t.Error("AppendFloat testN=32", test.f, string(test.fmt), test.prec, "want", "abc"+test.s, "got", string(x)) + if string(x) != "abc"+test_s { + t.Error("AppendFloat testN=32", test.f, string(test.fmt), test.prec, "want", "abc"+test_s, "got", string(x)) } } } From 1abc6b0204ed231311c9bbc53cfab36dc546aa8e Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Mon, 13 Oct 2025 12:47:42 -0700 Subject: [PATCH 131/152] go/types, types2: permit type cycles through type parameter lists Issue #49439 was about a deadlock during type inference inside a type parameter list of a recursive constraint. As a remedy we disallowed recursive type parameter lists. In the meantime we have removed support for type inference for type arguments to generic types; the Go 1.18 generic release didn't support it. As a consequence, the fix for #49439, CL 361922, is probably not needed anymore: cycles through type parameter lists are ok. Fixes #68162. For #49439. Change-Id: Ie9deb3274914d428e8e45071cee5e68abf8afe9c Reviewed-on: https://go-review.googlesource.com/c/go/+/711420 Commit-Queue: Robert Griesemer Auto-Submit: Robert Griesemer Reviewed-by: Mark Freeman Reviewed-by: Robert Griesemer TryBot-Bypass: Robert Griesemer --- src/cmd/compile/internal/types2/decl.go | 6 +++++ src/go/types/decl.go | 6 +++++ .../types/testdata/fixedbugs/issue45550.go | 2 +- .../types/testdata/fixedbugs/issue46461.go | 6 ++--- .../types/testdata/fixedbugs/issue46461a.go | 7 +++--- .../types/testdata/fixedbugs/issue47796.go | 14 +++++------ .../types/testdata/fixedbugs/issue48529.go | 2 +- .../types/testdata/fixedbugs/issue49439.go | 14 +++++------ .../types/testdata/fixedbugs/issue68162.go | 24 +++++++++++++++++++ test/typeparam/issue46461.go | 4 ++-- test/typeparam/issue46461b.dir/a.go | 2 +- test/typeparam/issue46461b.dir/b.go | 4 +--- test/typeparam/issue48280.dir/a.go | 2 +- test/typeparam/issue48306.dir/a.go | 2 +- 14 files changed, 64 insertions(+), 31 deletions(-) create mode 100644 src/internal/types/testdata/fixedbugs/issue68162.go diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go index 34105816a65af3..3ff24269669a94 100644 --- a/src/cmd/compile/internal/types2/decl.go +++ b/src/cmd/compile/internal/types2/decl.go @@ -302,6 +302,12 @@ loop: } } + // Cycles through type parameter lists are ok (go.dev/issue/68162). + // TODO(gri) if we are happy with this this, remove this flag and simplify code. + if tparCycle { + return true + } + check.cycleError(cycle, firstInSrc(cycle)) return false } diff --git a/src/go/types/decl.go b/src/go/types/decl.go index 42423d291cee8d..fcae1f95295a67 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -303,6 +303,12 @@ loop: } } + // Cycles through type parameter lists are ok (go.dev/issue/68162). + // TODO(gri) if we are happy with this this, remove this flag and simplify code. + if tparCycle { + return true + } + check.cycleError(cycle, firstInSrc(cycle)) return false } diff --git a/src/internal/types/testdata/fixedbugs/issue45550.go b/src/internal/types/testdata/fixedbugs/issue45550.go index 2ea4ffe3079540..32fdde6740c7a5 100644 --- a/src/internal/types/testdata/fixedbugs/issue45550.go +++ b/src/internal/types/testdata/fixedbugs/issue45550.go @@ -4,7 +4,7 @@ package p -type Builder /* ERROR "invalid recursive type" */ [T interface{ struct{ Builder[T] } }] struct{} +type Builder[T ~struct{ Builder[T] }] struct{} type myBuilder struct { Builder[myBuilder] } diff --git a/src/internal/types/testdata/fixedbugs/issue46461.go b/src/internal/types/testdata/fixedbugs/issue46461.go index e823013f995167..454f7e836537c6 100644 --- a/src/internal/types/testdata/fixedbugs/issue46461.go +++ b/src/internal/types/testdata/fixedbugs/issue46461.go @@ -7,16 +7,16 @@ package p // test case 1 -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U] }] int +type T[U interface{ M() T[U] }] int type X int func (X) M() T[X] { return 0 } // test case 2 -type A /* ERROR "invalid recursive type" */ [T interface{ A[T] }] interface{} +type A[T interface{ A[T] }] interface{} // test case 3 -type A2 /* ERROR "invalid recursive type" */ [U interface{ A2[U] }] interface{ M() A2[U] } +type A2[U interface{ A2[U] }] interface{ M() A2[U] } type I interface{ A2[I]; M() A2[I] } diff --git a/src/internal/types/testdata/fixedbugs/issue46461a.go b/src/internal/types/testdata/fixedbugs/issue46461a.go index e4b8e1a240a973..74ed6c4882719c 100644 --- a/src/internal/types/testdata/fixedbugs/issue46461a.go +++ b/src/internal/types/testdata/fixedbugs/issue46461a.go @@ -7,17 +7,16 @@ package p // test case 1 -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U] }] int +type T[U interface{ M() T[U] }] int type X int func (X) M() T[X] { return 0 } // test case 2 -type A /* ERROR "invalid recursive type" */ [T interface{ A[T] }] interface{} +type A[T interface{ A[T] }] interface{} // test case 3 -// TODO(gri) should report error only once -type A2 /* ERROR "invalid recursive type" */ /* ERROR "invalid recursive type" */ [U interface{ A2[U] }] interface{ M() A2[U] } +type A2[U interface{ A2[U] }] interface{ M() A2[U] } type I interface{ A2[I]; M() A2[I] } diff --git a/src/internal/types/testdata/fixedbugs/issue47796.go b/src/internal/types/testdata/fixedbugs/issue47796.go index 7f719ff6745eaa..b07cdddababf67 100644 --- a/src/internal/types/testdata/fixedbugs/issue47796.go +++ b/src/internal/types/testdata/fixedbugs/issue47796.go @@ -6,16 +6,16 @@ package p // parameterized types with self-recursive constraints type ( - T1 /* ERROR "invalid recursive type" */ [P T1[P]] interface{} - T2 /* ERROR "invalid recursive type" */ [P, Q T2[P, Q]] interface{} + T1[P T1[P]] interface{} + T2[P, Q T2[P, Q]] interface{} T3[P T2[P, Q], Q interface{ ~string }] interface{} - T4a /* ERROR "invalid recursive type" */ [P T4a[P]] interface{ ~int } - T4b /* ERROR "invalid recursive type" */ [P T4b[int]] interface{ ~int } - T4c /* ERROR "invalid recursive type" */ [P T4c[string]] interface{ ~int } + T4a[P T4a[P]] interface{ ~int } + T4b[P T4b[int]] interface{ ~int } + T4c[P T4c[string /* ERROR "string does not satisfy T4c[string]" */]] interface{ ~int } // mutually recursive constraints - T5 /* ERROR "invalid recursive type" */ [P T6[P]] interface{ int } + T5[P T6[P]] interface{ int } T6[P T5[P]] interface{ int } ) @@ -28,6 +28,6 @@ var ( // test case from issue -type Eq /* ERROR "invalid recursive type" */ [a Eq[a]] interface { +type Eq[a Eq[a]] interface { Equal(that a) bool } diff --git a/src/internal/types/testdata/fixedbugs/issue48529.go b/src/internal/types/testdata/fixedbugs/issue48529.go index bcc5e3536d3457..eca1da89232b08 100644 --- a/src/internal/types/testdata/fixedbugs/issue48529.go +++ b/src/internal/types/testdata/fixedbugs/issue48529.go @@ -4,7 +4,7 @@ package p -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U, int] }] int +type T[U interface{ M() T /* ERROR "too many type arguments for type T" */ [U, int] }] int type X int diff --git a/src/internal/types/testdata/fixedbugs/issue49439.go b/src/internal/types/testdata/fixedbugs/issue49439.go index 3852f160948913..63bedf61911914 100644 --- a/src/internal/types/testdata/fixedbugs/issue49439.go +++ b/src/internal/types/testdata/fixedbugs/issue49439.go @@ -6,21 +6,21 @@ package p import "unsafe" -type T0 /* ERROR "invalid recursive type" */ [P T0[P]] struct{} +type T0[P T0[P]] struct{} -type T1 /* ERROR "invalid recursive type" */ [P T2[P]] struct{} -type T2[P T1[P]] struct{} +type T1[P T2[P /* ERROR "P does not satisfy T1[P]" */]] struct{} +type T2[P T1[P /* ERROR "P does not satisfy T2[P]" */]] struct{} -type T3 /* ERROR "invalid recursive type" */ [P interface{ ~struct{ f T3[int] } }] struct{} +type T3[P interface{ ~struct{ f T3[int /* ERROR "int does not satisfy" */ ] } }] struct{} // valid cycle in M type N[P M[P]] struct{} -type M[Q any] struct { F *M[Q] } +type M[Q any] struct{ F *M[Q] } // "crazy" case type TC[P [unsafe.Sizeof(func() { - type T [P [unsafe.Sizeof(func(){})]byte] struct{} + type T[P [unsafe.Sizeof(func() {})]byte] struct{} })]byte] struct{} // test case from issue -type X /* ERROR "invalid recursive type" */ [T any, PT X[T]] interface{} +type X[T any, PT X /* ERROR "not enough type arguments for type X" */ [T]] interface{} diff --git a/src/internal/types/testdata/fixedbugs/issue68162.go b/src/internal/types/testdata/fixedbugs/issue68162.go new file mode 100644 index 00000000000000..8efd8a66dff4be --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68162.go @@ -0,0 +1,24 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type N[B N[B]] interface { + Add(B) B +} + +func Add[P N[P]](x, y P) P { + return x.Add(y) +} + +type MyInt int + +func (x MyInt) Add(y MyInt) MyInt { + return x + y +} + +func main() { + var x, y MyInt = 2, 3 + println(Add(x, y)) +} diff --git a/test/typeparam/issue46461.go b/test/typeparam/issue46461.go index 363a87cfe08f10..7e35106c15b42e 100644 --- a/test/typeparam/issue46461.go +++ b/test/typeparam/issue46461.go @@ -1,4 +1,4 @@ -// errorcheck +// compile // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -6,7 +6,7 @@ package p -type T[U interface{ M() T[U] }] int // ERROR "invalid recursive type: T refers to itself" +type T[U interface{ M() T[U] }] int type X int diff --git a/test/typeparam/issue46461b.dir/a.go b/test/typeparam/issue46461b.dir/a.go index fcb414266d741c..0d53b3e20429cd 100644 --- a/test/typeparam/issue46461b.dir/a.go +++ b/test/typeparam/issue46461b.dir/a.go @@ -4,4 +4,4 @@ package a -type T[U interface{ M() int }] int +type T[U interface{ M() T[U] }] int diff --git a/test/typeparam/issue46461b.dir/b.go b/test/typeparam/issue46461b.dir/b.go index a4583257ffd657..3393a375c20570 100644 --- a/test/typeparam/issue46461b.dir/b.go +++ b/test/typeparam/issue46461b.dir/b.go @@ -8,6 +8,4 @@ import "./a" type X int -func (X) M() int { return 0 } - -type _ a.T[X] +func (X) M() a.T[X] { return 0 } diff --git a/test/typeparam/issue48280.dir/a.go b/test/typeparam/issue48280.dir/a.go index f66fd30e34ee43..17859e6aa902f0 100644 --- a/test/typeparam/issue48280.dir/a.go +++ b/test/typeparam/issue48280.dir/a.go @@ -4,7 +4,7 @@ package a -type I[T any] interface { +type I[T I[T]] interface { F() T } diff --git a/test/typeparam/issue48306.dir/a.go b/test/typeparam/issue48306.dir/a.go index fdfd86cb6d4a5d..739750b20b35e6 100644 --- a/test/typeparam/issue48306.dir/a.go +++ b/test/typeparam/issue48306.dir/a.go @@ -4,6 +4,6 @@ package a -type I[T any] interface { +type I[T I[T]] interface { F() T } From 6fd8e88d07b08531a5585aa4fbcc6043d556742f Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Sat, 11 Oct 2025 11:37:58 -0700 Subject: [PATCH 132/152] encoding/json/v2: restrict presence of default options Originally, DefaultOptionsV1 and DefaultOptionsV2 represented the full set of all options with specific ones set to true or false. However, there are certain options such as WithIndent or WithMarshalers that are neither v1 or v2 specific. At some point we removed whitespace related options from the set: https://github.com/go-json-experiment/json/pull/26 This avoids DefaultOptionsV1 or DefaultOptionsV2 from affecting any previously set whitespace. However, why are whitespace options special and thus excluded from the set? What about Marshalers? As a more principaled way to address this, we restrict DefaultOptionsV1 and DefaultOptionsV2 to only be the options where the default setting changes between v1 and v2. All other options are unpopulated. This avoids a panic with GetOption(DefaultOptionsV2, WithMarshalers) since DefaultOptionsV2 previously had the presence bit for Marshalers set to true, but had no actual value. Now, the presence bit is set to false, so the value is not consulted. Fixes #75149 Change-Id: I30b45abd35404578b4135cc3bad1a1a2993cb0cf Reviewed-on: https://go-review.googlesource.com/c/go/+/710878 Reviewed-by: Johan Brandhorst-Satzkorn Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI Reviewed-by: Damien Neil --- src/encoding/json/internal/jsonopts/options.go | 8 ++++---- src/encoding/json/internal/jsonopts/options_test.go | 3 +++ src/encoding/json/v2/options.go | 5 ++--- src/encoding/json/v2_options.go | 4 +--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/encoding/json/internal/jsonopts/options.go b/src/encoding/json/internal/jsonopts/options.go index e4c3f47d36adc8..39da81b34549ec 100644 --- a/src/encoding/json/internal/jsonopts/options.go +++ b/src/encoding/json/internal/jsonopts/options.go @@ -48,16 +48,16 @@ type ArshalValues struct { // DefaultOptionsV2 is the set of all options that define default v2 behavior. var DefaultOptionsV2 = Struct{ Flags: jsonflags.Flags{ - Presence: uint64(jsonflags.AllFlags & ^jsonflags.WhitespaceFlags), - Values: uint64(0), + Presence: uint64(jsonflags.DefaultV1Flags), + Values: uint64(0), // all flags in DefaultV1Flags are false }, } // DefaultOptionsV1 is the set of all options that define default v1 behavior. var DefaultOptionsV1 = Struct{ Flags: jsonflags.Flags{ - Presence: uint64(jsonflags.AllFlags & ^jsonflags.WhitespaceFlags), - Values: uint64(jsonflags.DefaultV1Flags), + Presence: uint64(jsonflags.DefaultV1Flags), + Values: uint64(jsonflags.DefaultV1Flags), // all flags in DefaultV1Flags are true }, } diff --git a/src/encoding/json/internal/jsonopts/options_test.go b/src/encoding/json/internal/jsonopts/options_test.go index ebfaf05c833e05..caa686e4f0d579 100644 --- a/src/encoding/json/internal/jsonopts/options_test.go +++ b/src/encoding/json/internal/jsonopts/options_test.go @@ -200,6 +200,9 @@ func TestGet(t *testing.T) { if v, ok := json.GetOption(opts, json.WithUnmarshalers); v != nil || ok { t.Errorf(`GetOption(..., WithUnmarshalers) = (%v, %v), want (nil, false)`, v, ok) } + if v, ok := json.GetOption(json.DefaultOptionsV2(), json.WithMarshalers); v != nil || ok { + t.Errorf(`GetOption(..., WithMarshalers) = (%v, %v), want (nil, false)`, v, ok) + } } var sink struct { diff --git a/src/encoding/json/v2/options.go b/src/encoding/json/v2/options.go index 0942d2d30784f9..9685f20f9f805f 100644 --- a/src/encoding/json/v2/options.go +++ b/src/encoding/json/v2/options.go @@ -97,9 +97,8 @@ func GetOption[T any](opts Options, setter func(T) Options) (T, bool) { } // DefaultOptionsV2 is the full set of all options that define v2 semantics. -// It is equivalent to all options under [Options], [encoding/json.Options], -// and [encoding/json/jsontext.Options] being set to false or the zero value, -// except for the options related to whitespace formatting. +// It is equivalent to the set of options in [encoding/json.DefaultOptionsV1] +// all being set to false. All other options are not present. func DefaultOptionsV2() Options { return &jsonopts.DefaultOptionsV2 } diff --git a/src/encoding/json/v2_options.go b/src/encoding/json/v2_options.go index 819fe59f412c68..2bdec86fdeab5b 100644 --- a/src/encoding/json/v2_options.go +++ b/src/encoding/json/v2_options.go @@ -227,9 +227,7 @@ type Options = jsonopts.Options // - [jsontext.EscapeForJS] // - [jsontext.PreserveRawStrings] // -// All other boolean options are set to false. -// All non-boolean options are set to the zero value, -// except for [jsontext.WithIndent], which defaults to "\t". +// All other options are not present. // // The [Marshal] and [Unmarshal] functions in this package are // semantically identical to calling the v2 equivalents with this option: From 60f6d2f6230c5085ad25a9e3ebdaaae2aefdfe36 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Wed, 8 Oct 2025 13:02:36 +0200 Subject: [PATCH 133/152] crypto/internal/fips140/entropy: support SHA-384 sizes for ACVP tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I6a6a6964decc662d753ee3eec357570bd3c95e2d Reviewed-on: https://go-review.googlesource.com/c/go/+/710056 Auto-Submit: Filippo Valsorda LUCI-TryBot-Result: Go LUCI Reviewed-by: Roland Shoemaker Reviewed-by: Nooras Saba‎ Reviewed-by: Daniel McCarney --- src/crypto/internal/fips140/entropy/sha384.go | 57 +++++++++++++++---- .../internal/fips140test/entropy_test.go | 13 ++++- 2 files changed, 58 insertions(+), 12 deletions(-) diff --git a/src/crypto/internal/fips140/entropy/sha384.go b/src/crypto/internal/fips140/entropy/sha384.go index ec23cfc9ad3661..c20f76b57979af 100644 --- a/src/crypto/internal/fips140/entropy/sha384.go +++ b/src/crypto/internal/fips140/entropy/sha384.go @@ -8,19 +8,22 @@ import "math/bits" // This file includes a SHA-384 implementation to insulate the entropy source // from any changes in the FIPS 140-3 module's crypto/internal/fips140/sha512 -// package. We only support 1024-byte inputs. +// package. We support 1024-byte inputs for the entropy source, and arbitrary +// length inputs for ACVP testing. + +var initState = [8]uint64{ + 0xcbbb9d5dc1059ed8, + 0x629a292a367cd507, + 0x9159015a3070dd17, + 0x152fecd8f70e5939, + 0x67332667ffc00b31, + 0x8eb44a8768581511, + 0xdb0c2e0d64f98fa7, + 0x47b5481dbefa4fa4, +} func SHA384(p *[1024]byte) [48]byte { - h := [8]uint64{ - 0xcbbb9d5dc1059ed8, - 0x629a292a367cd507, - 0x9159015a3070dd17, - 0x152fecd8f70e5939, - 0x67332667ffc00b31, - 0x8eb44a8768581511, - 0xdb0c2e0d64f98fa7, - 0x47b5481dbefa4fa4, - } + h := initState sha384Block(&h, (*[128]byte)(p[0:128])) sha384Block(&h, (*[128]byte)(p[128:256])) @@ -36,6 +39,38 @@ func SHA384(p *[1024]byte) [48]byte { bePutUint64(padlen[112+8:], 1024*8) sha384Block(&h, &padlen) + return digestBytes(&h) +} + +func TestingOnlySHA384(p []byte) [48]byte { + if len(p) == 1024 { + return SHA384((*[1024]byte)(p)) + } + + h := initState + bitLen := uint64(len(p)) * 8 + + // Process full 128-byte blocks. + for len(p) >= 128 { + sha384Block(&h, (*[128]byte)(p[:128])) + p = p[128:] + } + + // Process final block and padding. + var finalBlock [128]byte + copy(finalBlock[:], p) + finalBlock[len(p)] = 0x80 + if len(p) >= 112 { + sha384Block(&h, &finalBlock) + finalBlock = [128]byte{} + } + bePutUint64(finalBlock[112+8:], bitLen) + sha384Block(&h, &finalBlock) + + return digestBytes(&h) +} + +func digestBytes(h *[8]uint64) [48]byte { var digest [48]byte bePutUint64(digest[0:], h[0]) bePutUint64(digest[8:], h[1]) diff --git a/src/crypto/internal/fips140test/entropy_test.go b/src/crypto/internal/fips140test/entropy_test.go index a84b50c62b22b6..ef6909efbac2f6 100644 --- a/src/crypto/internal/fips140test/entropy_test.go +++ b/src/crypto/internal/fips140test/entropy_test.go @@ -11,6 +11,7 @@ import ( "crypto/internal/cryptotest" "crypto/internal/fips140/drbg" "crypto/internal/fips140/entropy" + "crypto/rand" "crypto/sha256" "crypto/sha512" "encoding/hex" @@ -159,6 +160,16 @@ func TestEntropySHA384(t *testing.T) { if got != want { t.Errorf("SHA384() = %x, want %x", got, want) } + + for l := range 1024*3 + 1 { + input := make([]byte, l) + rand.Read(input) + want := sha512.Sum384(input) + got := entropy.TestingOnlySHA384(input) + if got != want { + t.Errorf("TestingOnlySHA384(%d bytes) = %x, want %x", l, got, want) + } + } } func TestEntropyRepetitionCountTest(t *testing.T) { @@ -230,7 +241,7 @@ func TestEntropyUnchanged(t *testing.T) { // entropy source through the Entropy Source Validation program, // independently of the FIPS 140-3 module. It must not change even across // FIPS 140-3 module versions, in order to reuse the ESV certificate. - exp := "35976eb8a11678c79777da07aaab5511d4325701f837777df205f6e7b20c6821" + exp := "1b68d4c091ef66c6006602e4ed3ac10f8a82ad193708ec99d63b145e3baa3e6c" if got := hex.EncodeToString(h.Sum(nil)); got != exp { t.Errorf("hash of crypto/internal/fips140/entropy = %s, want %s", got, exp) } From f6b9d56affb75103507f2b6ed4ffa98ca899b39d Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Wed, 8 Oct 2025 13:30:34 +0200 Subject: [PATCH 134/152] crypto/internal/fips140/entropy: fix benign race Fixes #75690 Fixes #75842 Change-Id: I6a6a696420f51f28f48535c34cf347e2cbd4add5 Reviewed-on: https://go-review.googlesource.com/c/go/+/710058 Auto-Submit: Filippo Valsorda Reviewed-by: David Chase Reviewed-by: Roland Shoemaker LUCI-TryBot-Result: Go LUCI --- src/crypto/internal/fips140/entropy/entropy.go | 4 +++- src/crypto/internal/fips140test/entropy_test.go | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/crypto/internal/fips140/entropy/entropy.go b/src/crypto/internal/fips140/entropy/entropy.go index 273f05c817aff8..f5b2f53752a028 100644 --- a/src/crypto/internal/fips140/entropy/entropy.go +++ b/src/crypto/internal/fips140/entropy/entropy.go @@ -123,7 +123,9 @@ func (s *source) Sample() uint8 { // Perform a few memory accesses in an unpredictable pattern to expose the // next measurement to as much system noise as possible. memory, lcgState := s.memory, s.lcgState - _ = memory[0] // hoist the nil check out of touchMemory + if memory == nil { // remove the nil check from the inlined touchMemory calls + panic("entropy: nil memory buffer") + } for range 64 { lcgState = 1664525*lcgState + 1013904223 // Discard the lower bits, which tend to fall into short cycles. diff --git a/src/crypto/internal/fips140test/entropy_test.go b/src/crypto/internal/fips140test/entropy_test.go index ef6909efbac2f6..a33e2e7bbc907d 100644 --- a/src/crypto/internal/fips140test/entropy_test.go +++ b/src/crypto/internal/fips140test/entropy_test.go @@ -241,7 +241,7 @@ func TestEntropyUnchanged(t *testing.T) { // entropy source through the Entropy Source Validation program, // independently of the FIPS 140-3 module. It must not change even across // FIPS 140-3 module versions, in order to reuse the ESV certificate. - exp := "1b68d4c091ef66c6006602e4ed3ac10f8a82ad193708ec99d63b145e3baa3e6c" + exp := "2541273241ae8aafe55026328354ed3799df1e2fb308b2097833203a42911b53" if got := hex.EncodeToString(h.Sum(nil)); got != exp { t.Errorf("hash of crypto/internal/fips140/entropy = %s, want %s", got, exp) } @@ -249,12 +249,12 @@ func TestEntropyUnchanged(t *testing.T) { func TestEntropyRace(t *testing.T) { // Check that concurrent calls to Seed don't trigger the race detector. - for range 2 { + for range 16 { go func() { _, _ = entropy.Seed(&memory) }() } - // Same, with the higher-level DRBG. More concurrent calls to hit the Pool. + // Same, with the higher-level DRBG. for range 16 { go func() { var b [64]byte From 3765758b96746e202bb52312d4b026085a0f25a1 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Mon, 13 Oct 2025 15:55:04 -0700 Subject: [PATCH 135/152] go/types, types2: minor cleanup (remove TODO) Follow-up to CL 711420. Change-Id: If577e96f413e46b98dd86d11605de1004637851a Reviewed-on: https://go-review.googlesource.com/c/go/+/711540 Reviewed-by: Robert Griesemer Reviewed-by: Mark Freeman LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/types2/decl.go | 35 +++++++++++-------------- src/go/types/decl.go | 35 +++++++++++-------------- 2 files changed, 32 insertions(+), 38 deletions(-) diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go index 3ff24269669a94..4ebd9454070757 100644 --- a/src/cmd/compile/internal/types2/decl.go +++ b/src/cmd/compile/internal/types2/decl.go @@ -225,8 +225,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) { start := obj.color() - grey // index of obj in objPath cycle := check.objPath[start:] tparCycle := false // if set, the cycle is through a type parameter list - nval := 0 // number of (constant or variable) values in the cycle; valid if !generic - ndef := 0 // number of type definitions in the cycle; valid if !generic + nval := 0 // number of (constant or variable) values in the cycle + ndef := 0 // number of type definitions in the cycle loop: for _, obj := range cycle { switch obj := obj.(type) { @@ -235,7 +235,7 @@ loop: case *TypeName: // If we reach a generic type that is part of a cycle // and we are in a type parameter list, we have a cycle - // through a type parameter list, which is invalid. + // through a type parameter list. if check.inTParamList && isGeneric(obj.typ) { tparCycle = true break loop @@ -286,25 +286,22 @@ loop: }() } - if !tparCycle { - // A cycle involving only constants and variables is invalid but we - // ignore them here because they are reported via the initialization - // cycle check. - if nval == len(cycle) { - return true - } + // Cycles through type parameter lists are ok (go.dev/issue/68162). + if tparCycle { + return true + } - // A cycle involving only types (and possibly functions) must have at least - // one type definition to be permitted: If there is no type definition, we - // have a sequence of alias type names which will expand ad infinitum. - if nval == 0 && ndef > 0 { - return true - } + // A cycle involving only constants and variables is invalid but we + // ignore them here because they are reported via the initialization + // cycle check. + if nval == len(cycle) { + return true } - // Cycles through type parameter lists are ok (go.dev/issue/68162). - // TODO(gri) if we are happy with this this, remove this flag and simplify code. - if tparCycle { + // A cycle involving only types (and possibly functions) must have at least + // one type definition to be permitted: If there is no type definition, we + // have a sequence of alias type names which will expand ad infinitum. + if nval == 0 && ndef > 0 { return true } diff --git a/src/go/types/decl.go b/src/go/types/decl.go index fcae1f95295a67..aef7f6ad0b9099 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -226,8 +226,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) { start := obj.color() - grey // index of obj in objPath cycle := check.objPath[start:] tparCycle := false // if set, the cycle is through a type parameter list - nval := 0 // number of (constant or variable) values in the cycle; valid if !generic - ndef := 0 // number of type definitions in the cycle; valid if !generic + nval := 0 // number of (constant or variable) values in the cycle + ndef := 0 // number of type definitions in the cycle loop: for _, obj := range cycle { switch obj := obj.(type) { @@ -236,7 +236,7 @@ loop: case *TypeName: // If we reach a generic type that is part of a cycle // and we are in a type parameter list, we have a cycle - // through a type parameter list, which is invalid. + // through a type parameter list. if check.inTParamList && isGeneric(obj.typ) { tparCycle = true break loop @@ -287,25 +287,22 @@ loop: }() } - if !tparCycle { - // A cycle involving only constants and variables is invalid but we - // ignore them here because they are reported via the initialization - // cycle check. - if nval == len(cycle) { - return true - } + // Cycles through type parameter lists are ok (go.dev/issue/68162). + if tparCycle { + return true + } - // A cycle involving only types (and possibly functions) must have at least - // one type definition to be permitted: If there is no type definition, we - // have a sequence of alias type names which will expand ad infinitum. - if nval == 0 && ndef > 0 { - return true - } + // A cycle involving only constants and variables is invalid but we + // ignore them here because they are reported via the initialization + // cycle check. + if nval == len(cycle) { + return true } - // Cycles through type parameter lists are ok (go.dev/issue/68162). - // TODO(gri) if we are happy with this this, remove this flag and simplify code. - if tparCycle { + // A cycle involving only types (and possibly functions) must have at least + // one type definition to be permitted: If there is no type definition, we + // have a sequence of alias type names which will expand ad infinitum. + if nval == 0 && ndef > 0 { return true } From 5a9ef44bc05b937cff2394b5880ecca616cb878e Mon Sep 17 00:00:00 2001 From: Mateusz Poliwczak Date: Sun, 12 Oct 2025 11:15:11 +0200 Subject: [PATCH 136/152] cmd/compile/internal/devirtualize: fix OCONVNOP assertion Fixes #75863 Change-Id: I1e5a0f3880dcd5f820a5b6f4540c49b16a6a6964 Reviewed-on: https://go-review.googlesource.com/c/go/+/711141 Reviewed-by: Keith Randall Reviewed-by: Lasse Folger Auto-Submit: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/devirtualize/devirtualize.go | 2 +- test/devirtualization.go | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index 9d4160085edde9..363cd6f2e5d17a 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -223,7 +223,7 @@ func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types switch n1 := n.(type) { case *ir.ConvExpr: if n1.Op() == ir.OCONVNOP { - if !n1.Type().IsInterface() || !types.Identical(n1.Type(), n1.X.Type()) { + if !n1.Type().IsInterface() || !types.Identical(n1.Type().Underlying(), n1.X.Type().Underlying()) { // As we check (directly before this switch) whether n is an interface, thus we should only reach // here for iface conversions where both operands are the same. base.Fatalf("not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type()) diff --git a/test/devirtualization.go b/test/devirtualization.go index e3319052945e00..edabb94108d02d 100644 --- a/test/devirtualization.go +++ b/test/devirtualization.go @@ -1139,6 +1139,12 @@ func devirtWrapperType() { var a A = (implWrapper)(i) // ERROR "implWrapper\(i\) does not escape$" a.A() // ERROR "devirtualizing a.A to implWrapper$" "inlining call to implWrapper.A" } + { + type anyWrapper any + var foo any = &Impl{} // ERROR "&Impl\{\} does not escape" + var bar anyWrapper = foo + bar.(M).M() // ERROR "devirtualizing bar\.\(M\).M to \*Impl" "inlining call to \(\*Impl\)\.M" + } } func selfAssigns() { From 0a239bcc9987b8a20a152feceb4884e810683a2c Mon Sep 17 00:00:00 2001 From: Damien Neil Date: Tue, 14 Oct 2025 09:34:11 -0700 Subject: [PATCH 137/152] Revert "net/url: disallow raw IPv6 addresses in host" This reverts commit e3be2d1b2b68d960398a343805f77052d5decb22. Reason for revert: Causes extensive failures in Google-internal testing. Change-Id: I232f547fc326dff7df959d25f3a89777ea33b201 Reviewed-on: https://go-review.googlesource.com/c/go/+/711800 Auto-Submit: Damien Neil Reviewed-by: Sean Liao LUCI-TryBot-Result: Go LUCI Reviewed-by: Michael Pratt --- src/net/url/url.go | 4 +--- src/net/url/url_test.go | 23 ++++++++++++++++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/net/url/url.go b/src/net/url/url.go index a69754880149a9..6afa30f162bd25 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -698,9 +698,7 @@ func parseHost(host string) (string, error) { return "", errors.New("invalid IP-literal") } return "[" + unescapedHostname + "]" + unescapedColonPort, nil - } else if i := strings.Index(host, ":"); i != -1 { - // IPv4address / reg-name - // E.g. 1.2.3.4, 1.2.3.4:80, example.com, example.com:80 + } else if i := strings.LastIndex(host, ":"); i != -1 { colonPort := host[i:] if !validOptionalPort(colonPort) { return "", fmt.Errorf("invalid port %q after host", colonPort) diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index a7543d6fd40b1e..6084facacc0519 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -506,6 +506,26 @@ var urltests = []URLTest{ }, "", }, + { + // Malformed IPv6 but still accepted. + "http://2b01:e34:ef40:7730:8e70:5aff:fefe:edac:8080/foo", + &URL{ + Scheme: "http", + Host: "2b01:e34:ef40:7730:8e70:5aff:fefe:edac:8080", + Path: "/foo", + }, + "", + }, + { + // Malformed IPv6 but still accepted. + "http://2b01:e34:ef40:7730:8e70:5aff:fefe:edac:/foo", + &URL{ + Scheme: "http", + Host: "2b01:e34:ef40:7730:8e70:5aff:fefe:edac:", + Path: "/foo", + }, + "", + }, { "http://[2b01:e34:ef40:7730:8e70:5aff:fefe:edac]:8080/foo", &URL{ @@ -715,9 +735,6 @@ var parseRequestURLTests = []struct { {"https://[0:0::test.com]:80", false}, {"https://[2001:db8::test.com]", false}, {"https://[test.com]", false}, - {"https://1:2:3:4:5:6:7:8", false}, - {"https://1:2:3:4:5:6:7:8:80", false}, - {"https://example.com:80:", false}, } func TestParseRequestURI(t *testing.T) { From 0ddb5ed4653a2cac6ecc7315fcbb1e9f6dcb6dda Mon Sep 17 00:00:00 2001 From: Mateusz Poliwczak Date: Sun, 12 Oct 2025 10:56:13 +0200 Subject: [PATCH 138/152] cmd/compile/internal/devirtualize: use FatalfAt instead of Fatalf where possible Change-Id: I5e9e9c89336446720c3c21347969e4126a6a6964 Reviewed-on: https://go-review.googlesource.com/c/go/+/711140 Reviewed-by: Keith Randall Reviewed-by: Keith Randall LUCI-TryBot-Result: Go LUCI Reviewed-by: Dmitri Shuralyov Auto-Submit: Keith Randall --- .../internal/devirtualize/devirtualize.go | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index 363cd6f2e5d17a..dfcdd42236dcca 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -187,7 +187,7 @@ func concreteType(s *State, n ir.Node) (typ *types.Type) { return nil } if typ != nil && typ.IsInterface() { - base.Fatalf("typ.IsInterface() = true; want = false; typ = %v", typ) + base.FatalfAt(n.Pos(), "typ.IsInterface() = true; want = false; typ = %v", typ) } return typ } @@ -226,7 +226,7 @@ func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types if !n1.Type().IsInterface() || !types.Identical(n1.Type().Underlying(), n1.X.Type().Underlying()) { // As we check (directly before this switch) whether n is an interface, thus we should only reach // here for iface conversions where both operands are the same. - base.Fatalf("not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type()) + base.FatalfAt(n1.Pos(), "not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type()) } n = n1.X continue @@ -260,12 +260,12 @@ func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types } if name.Op() != ir.ONAME { - base.Fatalf("name.Op = %v; want = ONAME", n.Op()) + base.FatalfAt(name.Pos(), "name.Op = %v; want = ONAME", n.Op()) } // name.Curfn must be set, as we checked name.Class != ir.PAUTO before. if name.Curfn == nil { - base.Fatalf("name.Curfn = nil; want not nil") + base.FatalfAt(name.Pos(), "name.Curfn = nil; want not nil") } if name.Addrtaken() { @@ -385,11 +385,11 @@ func (s *State) InlinedCall(fun *ir.Func, origCall *ir.CallExpr, inlinedCall *ir func (s *State) assignments(n *ir.Name) []assignment { fun := n.Curfn if fun == nil { - base.Fatalf("n.Curfn = ") + base.FatalfAt(n.Pos(), "n.Curfn = ") } if !n.Type().IsInterface() { - base.Fatalf("name passed to assignments is not of an interface type: %v", n.Type()) + base.FatalfAt(n.Pos(), "name passed to assignments is not of an interface type: %v", n.Type()) } // Analyze assignments in func, if not analyzed before. @@ -430,7 +430,7 @@ func (s *State) analyze(nodes ir.Nodes) { n = n.Canonical() if n.Op() != ir.ONAME { - base.Fatalf("n.Op = %v; want = ONAME", n.Op()) + base.FatalfAt(n.Pos(), "n.Op = %v; want = ONAME", n.Op()) } switch a := assignment.(type) { @@ -492,14 +492,14 @@ func (s *State) analyze(nodes ir.Nodes) { case ir.OAS2DOTTYPE: n := n.(*ir.AssignListStmt) if n.Rhs[0] == nil { - base.Fatalf("n.Rhs[0] == nil; n = %v", n) + base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n) } assign(n.Lhs[0], n.Rhs[0]) assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize case ir.OAS2MAPR, ir.OAS2RECV, ir.OSELRECV2: n := n.(*ir.AssignListStmt) if n.Rhs[0] == nil { - base.Fatalf("n.Rhs[0] == nil; n = %v", n) + base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n) } assign(n.Lhs[0], n.Rhs[0].Type()) assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize @@ -529,7 +529,7 @@ func (s *State) analyze(nodes ir.Nodes) { assign(p, call.ReturnVars[i]) } } else { - base.Fatalf("unexpected type %T in OAS2FUNC Rhs[0]", call) + base.FatalfAt(n.Pos(), "unexpected type %T in OAS2FUNC Rhs[0]", call) } case ir.ORANGE: n := n.(*ir.RangeStmt) @@ -545,7 +545,7 @@ func (s *State) analyze(nodes ir.Nodes) { assign(n.Value, xTyp.Elem()) } else if xTyp.IsChan() { assign(n.Key, xTyp.Elem()) - base.Assertf(n.Value == nil, "n.Value != nil in range over chan") + base.AssertfAt(n.Value == nil, n.Pos(), "n.Value != nil in range over chan") } else if xTyp.IsMap() { assign(n.Key, xTyp.Key()) assign(n.Value, xTyp.Elem()) @@ -556,7 +556,7 @@ func (s *State) analyze(nodes ir.Nodes) { } else { // We will not reach here in case of an range-over-func, as it is // rewrtten to function calls in the noder package. - base.Fatalf("range over unexpected type %v", n.X.Type()) + base.FatalfAt(n.Pos(), "range over unexpected type %v", n.X.Type()) } case ir.OSWITCH: n := n.(*ir.SwitchStmt) From 4dbf1a5a4c8cdb4233d17cc4fa1ca8865add6ac5 Mon Sep 17 00:00:00 2001 From: Mateusz Poliwczak Date: Sun, 12 Oct 2025 11:24:32 +0200 Subject: [PATCH 139/152] cmd/compile/internal/devirtualize: do not track assignments to non-PAUTO We do not lookup/devirtualize such, so we can skip tracking them. Change-Id: I8bdb0b11c694e4b2326c236093508a356a6a6964 Reviewed-on: https://go-review.googlesource.com/c/go/+/711160 Reviewed-by: Keith Randall Auto-Submit: Keith Randall Reviewed-by: Dmitri Shuralyov LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/devirtualize/devirtualize.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index dfcdd42236dcca..cb4608a0246574 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -387,6 +387,9 @@ func (s *State) assignments(n *ir.Name) []assignment { if fun == nil { base.FatalfAt(n.Pos(), "n.Curfn = ") } + if n.Class != ir.PAUTO { + base.FatalfAt(n.Pos(), "n.Class = %v; want = PAUTO", n.Class) + } if !n.Type().IsInterface() { base.FatalfAt(n.Pos(), "name passed to assignments is not of an interface type: %v", n.Type()) @@ -432,6 +435,9 @@ func (s *State) analyze(nodes ir.Nodes) { if n.Op() != ir.ONAME { base.FatalfAt(n.Pos(), "n.Op = %v; want = ONAME", n.Op()) } + if n.Class != ir.PAUTO { + return nil, -1 + } switch a := assignment.(type) { case nil: From 11d3d2f77d8293fe14638e74cbf52d1241b60e78 Mon Sep 17 00:00:00 2001 From: Bill Roberts Date: Mon, 8 Sep 2025 11:31:22 -0500 Subject: [PATCH 140/152] cmd/internal/obj/arm64: add support for PAC instructions Add support for the Pointer Authentication Code instructions required for the ELF ABI when enabling PAC aware binaries. This allows for assembly writers to add PAC instructions where needed to support this ABI. Follow up work is to enable the compiler to emit these instructions in the appropriate places. The TL;DR for the Linux ABI is that the prologue of a function that pushes the link register (LR) to the stack, signs the LR with a key managed by the operating system and hardware using a PAC instruction, like "paciasp". The function epilog, when restoring the LR from the stack will verify the signature, using an instruction like "autiasp". This helps prevents attackers from modifying the return address on the stack, a common technique for ROP attacks. Details on PAC can be found here: - https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/enabling-pac-and-bti-on-aarch64 - https://developer.arm.com/documentation/109576/0100/Pointer-Authentication-Code The ABI details can be found here: - https://github.com/ARM-software/abi-aa/blob/main/aaelf64/aaelf64.rst Change-Id: I4516ed1294d19f9ff9d278833d542821b6642aa9 Reviewed-on: https://go-review.googlesource.com/c/go/+/676675 Reviewed-by: Cherry Mui Reviewed-by: Joel Sing LUCI-TryBot-Result: Go LUCI Reviewed-by: Dmitri Shuralyov --- src/cmd/asm/internal/asm/testdata/arm64.s | 8 ++++++ .../asm/internal/asm/testdata/arm64error.s | 6 +++++ src/cmd/internal/obj/arm64/a.out.go | 6 +++++ src/cmd/internal/obj/arm64/anames.go | 6 +++++ src/cmd/internal/obj/arm64/asm7.go | 25 +++++++++++++++++++ 5 files changed, 51 insertions(+) diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 236f1a66979099..109a3d8316678b 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -1894,4 +1894,12 @@ next: BTI J // 9f2403d5 BTI JC // df2403d5 +// Pointer Authentication Codes (PAC) + PACIASP // 3f2303d5 + AUTIASP // bf2303d5 + PACIBSP // 7f2303d5 + AUTIBSP // ff2303d5 + AUTIA1716 // 9f2103d5 + AUTIB1716 // df2103d5 + END diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s index 55890ce3e631a4..ce88e3ca540f13 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64error.s +++ b/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -422,4 +422,10 @@ TEXT errors(SB),$0 SHA1H V1.B16, V2.B16 // ERROR "invalid operands" BTI // ERROR "missing operand" BTI PLDL1KEEP // ERROR "illegal argument" + PACIASP C // ERROR "illegal combination" + AUTIASP R2 // ERROR "illegal combination" + PACIBSP R0 // ERROR "illegal combination" + AUTIBSP C // ERROR "illegal combination" + AUTIA1716 $45 // ERROR "illegal combination" + AUTIB1716 R0 // ERROR "illegal combination" RET diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go index 710dd64b304c12..814dba2c100b30 100644 --- a/src/cmd/internal/obj/arm64/a.out.go +++ b/src/cmd/internal/obj/arm64/a.out.go @@ -1020,6 +1020,12 @@ const ( AWORD AYIELD ABTI + APACIASP + AAUTIASP + APACIBSP + AAUTIBSP + AAUTIA1716 + AAUTIB1716 ALAST AB = obj.AJMP ABL = obj.ACALL diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index 379f53bab37cdb..497429d9985922 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -537,5 +537,11 @@ var Anames = []string{ "WORD", "YIELD", "BTI", + "PACIASP", + "AUTIASP", + "PACIBSP", + "AUTIBSP", + "AUTIA1716", + "AUTIB1716", "LAST", } diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 172c2256d7eede..cfb70cf08a3fd8 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -3017,6 +3017,13 @@ func buildop(ctxt *obj.Link) { oprangeset(ANOOP, t) oprangeset(ADRPS, t) + oprangeset(APACIASP, t) + oprangeset(AAUTIASP, t) + oprangeset(APACIBSP, t) + oprangeset(AAUTIBSP, t) + oprangeset(AAUTIA1716, t) + oprangeset(AAUTIB1716, t) + case ACBZ: oprangeset(ACBZW, t) oprangeset(ACBNZ, t) @@ -7016,6 +7023,24 @@ func (c *ctxt7) op0(p *obj.Prog, a obj.As) uint32 { case ASEVL: return SYSHINT(5) + + case APACIASP: + return SYSHINT(25) + + case AAUTIASP: + return SYSHINT(29) + + case APACIBSP: + return SYSHINT(27) + + case AAUTIBSP: + return SYSHINT(31) + + case AAUTIA1716: + return SYSHINT(12) + + case AAUTIB1716: + return SYSHINT(14) } c.ctxt.Diag("%v: bad op0 %v", p, a) From ee5af46172e64eceddb56018de8ea850fe0a6cae Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Sat, 11 Oct 2025 11:57:46 -0700 Subject: [PATCH 141/152] encoding/json: avoid misleading errors under goexperiment.jsonv2 The jsontext package represents the location of JSON errors using a JSON Pointer (RFC 6901). This uses the JSON type system. Unfortunately the v1 json.UnmarshalTypeError assumes a Go struct-based mechanism for reporting the location of errors (and has historically never been implemented correctly since it was a weird mix of both JSON and Go namespaces; see #43126). Trying to map a JSON Pointer into UnmarshalTypeError.{Struct,Field} is difficult to get right without teaching jsontext about the Go type system. To reduce the probability of misleading errors, check whether the last token looks like a JSON array index and if so, elide the phrase "into Go struct field". Fixes #74801 Change-Id: Id2088ffb9c339a9238ed38c90223d86a89422842 Reviewed-on: https://go-review.googlesource.com/c/go/+/710676 LUCI-TryBot-Result: Go LUCI Reviewed-by: Dmitri Shuralyov Reviewed-by: Damien Neil --- src/encoding/json/v2_decode.go | 16 +++++++++++++++- src/encoding/json/v2_decode_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/src/encoding/json/v2_decode.go b/src/encoding/json/v2_decode.go index 1041ec7ee402d4..f17d7ebccada0a 100644 --- a/src/encoding/json/v2_decode.go +++ b/src/encoding/json/v2_decode.go @@ -14,6 +14,7 @@ import ( "fmt" "reflect" "strconv" + "strings" "encoding/json/internal/jsonwire" "encoding/json/jsontext" @@ -119,7 +120,20 @@ type UnmarshalTypeError struct { func (e *UnmarshalTypeError) Error() string { var s string if e.Struct != "" || e.Field != "" { - s = "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + // The design of UnmarshalTypeError overly assumes a struct-based + // Go representation for the JSON value. + // The logic in jsontext represents paths using a JSON Pointer, + // which is agnostic to the Go type system. + // Trying to convert a JSON Pointer into a UnmarshalTypeError.Field + // is difficult. As a heuristic, if the last path token looks like + // an index into a JSON array (e.g., ".foo.bar.0"), + // avoid the phrase "Go struct field ". + intoWhat := "Go struct field " + i := strings.LastIndexByte(e.Field, '.') + len(".") + if len(e.Field[i:]) > 0 && strings.TrimRight(e.Field[i:], "0123456789") == "" { + intoWhat = "" // likely a Go slice or array + } + s = "json: cannot unmarshal " + e.Value + " into " + intoWhat + e.Struct + "." + e.Field + " of type " + e.Type.String() } else { s = "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() } diff --git a/src/encoding/json/v2_decode_test.go b/src/encoding/json/v2_decode_test.go index 28c57ec8bf5c73..26b4448721e4ec 100644 --- a/src/encoding/json/v2_decode_test.go +++ b/src/encoding/json/v2_decode_test.go @@ -2363,6 +2363,34 @@ func TestUnmarshalTypeError(t *testing.T) { } } +func TestUnmarshalTypeErrorMessage(t *testing.T) { + err := &UnmarshalTypeError{ + Value: "number 5", + Type: reflect.TypeFor[int](), + Offset: 1234, + Struct: "Root", + } + + for _, tt := range []struct { + field string + want string + }{ + {"", "json: cannot unmarshal number 5 into Go struct field Root. of type int"}, + {"1", "json: cannot unmarshal number 5 into Root.1 of type int"}, + {"foo", "json: cannot unmarshal number 5 into Go struct field Root.foo of type int"}, + {"foo.1", "json: cannot unmarshal number 5 into Root.foo.1 of type int"}, + {"foo.bar", "json: cannot unmarshal number 5 into Go struct field Root.foo.bar of type int"}, + {"foo.bar.1", "json: cannot unmarshal number 5 into Root.foo.bar.1 of type int"}, + {"foo.bar.baz", "json: cannot unmarshal number 5 into Go struct field Root.foo.bar.baz of type int"}, + } { + err.Field = tt.field + got := err.Error() + if got != tt.want { + t.Errorf("Error:\n\tgot: %v\n\twant: %v", got, tt.want) + } + } +} + func TestUnmarshalSyntax(t *testing.T) { var x any tests := []struct { From 3bc9d9fa8348064c33f6e69f376994af4380d3ab Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 14 Oct 2025 12:11:10 -0700 Subject: [PATCH 142/152] Revert "cmd/compile: make wasm match other platforms for FP->int32/64 conversions" This reverts commit 8d810286b3121b601480426159c04d178fa29166. Reason for revert: we need to do this more carefully, at minimum gated by a module version Change-Id: Ia951e2e5ecdd455ea0f17567963c6fab0f4540dc Reviewed-on: https://go-review.googlesource.com/c/go/+/711840 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/_gen/Wasm.rules | 9 ++-- src/cmd/compile/internal/ssa/_gen/WasmOps.go | 17 ++---- src/cmd/compile/internal/ssa/opGen.go | 56 -------------------- src/cmd/compile/internal/ssa/rewriteWasm.go | 8 +-- src/cmd/compile/internal/wasm/ssa.go | 11 ---- test/convert5.go | 5 ++ 6 files changed, 17 insertions(+), 89 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules index bb123bc3cc48e7..f632a01109f764 100644 --- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules @@ -76,14 +76,13 @@ (Cvt32Uto(64|32)F x) => (F(64|32)ConvertI64U (ZeroExt32to64 x)) (Cvt64Uto(64|32)F ...) => (F(64|32)ConvertI64U ...) -(Cvt32Fto32 ...) => (I32TruncSatF32S ...) +(Cvt32Fto32 ...) => (I64TruncSatF32S ...) (Cvt32Fto64 ...) => (I64TruncSatF32S ...) -(Cvt64Fto32 ...) => (I32TruncSatF64S ...) +(Cvt64Fto32 ...) => (I64TruncSatF64S ...) (Cvt64Fto64 ...) => (I64TruncSatF64S ...) - -(Cvt32Fto32U ...) => (I32TruncSatF32U ...) +(Cvt32Fto32U ...) => (I64TruncSatF32U ...) (Cvt32Fto64U ...) => (I64TruncSatF32U ...) -(Cvt64Fto32U ...) => (I32TruncSatF64U ...) +(Cvt64Fto32U ...) => (I64TruncSatF64U ...) (Cvt64Fto64U ...) => (I64TruncSatF64U ...) (Cvt32Fto64F ...) => (F64PromoteF32 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/WasmOps.go b/src/cmd/compile/internal/ssa/_gen/WasmOps.go index b63f28a2193a2f..45bbed5f520201 100644 --- a/src/cmd/compile/internal/ssa/_gen/WasmOps.go +++ b/src/cmd/compile/internal/ssa/_gen/WasmOps.go @@ -222,19 +222,10 @@ func init() { {name: "F64Mul", asm: "F64Mul", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 * arg1 {name: "F64Div", asm: "F64Div", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 / arg1 - {name: "I64TruncSatF64S", asm: "I64TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) - {name: "I64TruncSatF64U", asm: "I64TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Uint64"}, // truncates the float arg0 to an unsigned integer (saturating) - {name: "I64TruncSatF32S", asm: "I64TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) - {name: "I64TruncSatF32U", asm: "I64TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Uint64"}, // truncates the float arg0 to an unsigned integer (saturating) - - // It appears to be wasm convention that everything lands in a 64-bit register; - // the WASM instructions for these operations produce 32-bit width results, but - // wasm/ssa.go widens them appropriately to 64-bit results. - {name: "I32TruncSatF64S", asm: "I32TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) - {name: "I32TruncSatF64U", asm: "I32TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Uint64"}, // truncates the float arg0 to an unsigned integer (saturating) - {name: "I32TruncSatF32S", asm: "I32TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) - {name: "I32TruncSatF32U", asm: "I32TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Uint64"}, // truncates the float arg0 to an unsigned integer (saturating) - + {name: "I64TruncSatF64S", asm: "I64TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) + {name: "I64TruncSatF64U", asm: "I64TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating) + {name: "I64TruncSatF32S", asm: "I64TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) + {name: "I64TruncSatF32U", asm: "I64TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating) {name: "F32ConvertI64S", asm: "F32ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the signed integer arg0 to a float {name: "F32ConvertI64U", asm: "F32ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the unsigned integer arg0 to a float {name: "F64ConvertI64S", asm: "F64ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the signed integer arg0 to a float diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 3ed1619e4a6ab9..ee0eb657dcb14f 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2980,10 +2980,6 @@ const ( OpWasmI64TruncSatF64U OpWasmI64TruncSatF32S OpWasmI64TruncSatF32U - OpWasmI32TruncSatF64S - OpWasmI32TruncSatF64U - OpWasmI32TruncSatF32S - OpWasmI32TruncSatF32U OpWasmF32ConvertI64S OpWasmF32ConvertI64U OpWasmF64ConvertI64S @@ -40294,58 +40290,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "I32TruncSatF64S", - argLen: 1, - asm: wasm.AI32TruncSatF64S, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "I32TruncSatF64U", - argLen: 1, - asm: wasm.AI32TruncSatF64U, - reg: regInfo{ - inputs: []inputInfo{ - {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "I32TruncSatF32S", - argLen: 1, - asm: wasm.AI32TruncSatF32S, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "I32TruncSatF32U", - argLen: 1, - asm: wasm.AI32TruncSatF32U, - reg: regInfo{ - inputs: []inputInfo{ - {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 - }, - outputs: []outputInfo{ - {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "F32ConvertI64S", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index dd73d3a5e705ed..a164a6eee555b9 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -120,10 +120,10 @@ func rewriteValueWasm(v *Value) bool { v.Op = OpWasmI64Ctz return true case OpCvt32Fto32: - v.Op = OpWasmI32TruncSatF32S + v.Op = OpWasmI64TruncSatF32S return true case OpCvt32Fto32U: - v.Op = OpWasmI32TruncSatF32U + v.Op = OpWasmI64TruncSatF32U return true case OpCvt32Fto64: v.Op = OpWasmI64TruncSatF32S @@ -143,13 +143,13 @@ func rewriteValueWasm(v *Value) bool { case OpCvt32to64F: return rewriteValueWasm_OpCvt32to64F(v) case OpCvt64Fto32: - v.Op = OpWasmI32TruncSatF64S + v.Op = OpWasmI64TruncSatF64S return true case OpCvt64Fto32F: v.Op = OpWasmF32DemoteF64 return true case OpCvt64Fto32U: - v.Op = OpWasmI32TruncSatF64U + v.Op = OpWasmI64TruncSatF64U return true case OpCvt64Fto64: v.Op = OpWasmI64TruncSatF64S diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index 8ebc90288572a3..1e3b318e8c9fe0 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -430,17 +430,6 @@ func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) { getValue64(s, v.Args[0]) s.Prog(v.Op.Asm()) - // 32-bit integer conversion results - case ssa.OpWasmI32TruncSatF32S, ssa.OpWasmI32TruncSatF64S: - getValue64(s, v.Args[0]) - s.Prog(v.Op.Asm()) - s.Prog(wasm.AI64ExtendI32S) - - case ssa.OpWasmI32TruncSatF32U, ssa.OpWasmI32TruncSatF64U: - getValue64(s, v.Args[0]) - s.Prog(v.Op.Asm()) - s.Prog(wasm.AI64ExtendI32U) - case ssa.OpWasmF32DemoteF64: getValue64(s, v.Args[0]) s.Prog(v.Op.Asm()) diff --git a/test/convert5.go b/test/convert5.go index 4688fae85587d4..27aa7867f42824 100644 --- a/test/convert5.go +++ b/test/convert5.go @@ -4,6 +4,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !wasm + +// TODO fix this to work for wasm +// Doing more than this, however, expands the change. + package main import ( From bb2a14252b989f89665d17b66417eff815200e3b Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 14 Oct 2025 12:12:12 -0700 Subject: [PATCH 143/152] Revert "runtime: adjust softfloat corner cases to match amd64/arm64" This reverts commit b9f3accdcf973ca41069e22e6859b9436801aae5. Reason for revert: we need to do this more carefully, at minimum gated by a module version (This should follow the WASM FP conversion revert) Change-Id: Ib98ce7d243348f69c9944db8537397b225c2cc33 Reviewed-on: https://go-review.googlesource.com/c/go/+/711841 Reviewed-by: Keith Randall TryBot-Bypass: David Chase Reviewed-by: Keith Randall --- src/runtime/export_test.go | 1 - src/runtime/softfloat64.go | 93 ++++++++------------------------- src/runtime/softfloat64_test.go | 33 +----------- test/convert5.go | 5 -- 4 files changed, 25 insertions(+), 107 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index f61cac763cef3d..9f2fcacc30ee5c 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -25,7 +25,6 @@ var F32to64 = f32to64 var Fcmp64 = fcmp64 var Fintto64 = fintto64 var F64toint = f64toint -var F64touint = f64touint64 var Entersyscall = entersyscall var Exitsyscall = exitsyscall diff --git a/src/runtime/softfloat64.go b/src/runtime/softfloat64.go index 7b9409f75be380..42ef0092970b3e 100644 --- a/src/runtime/softfloat64.go +++ b/src/runtime/softfloat64.go @@ -26,11 +26,6 @@ const ( neg32 uint32 = 1 << (expbits32 + mantbits32) ) -// If F is not NaN and not Inf, then f == (-1)**sign * mantissa * 2**(exp-52) -// The mantissa and exp are adjusted from their stored representation so -// that the mantissa includes the formerly implicit 1, the exponent bias -// is removed, and denormalized floats to put a 1 in the expected -// (1<= 63: // |f| >= 2^63, including infinity + case fe > 63: // f >= 2^63 + if fs != 0 && fm == 0 { // f == -2^63 + return -1 << 63, true + } if fs != 0 { - return -0x8000_0000_0000_0000, true + return 0, false } - return 0x7fff_ffff_ffff_ffff, true + return 0, false } for fe > int(mantbits64) { @@ -406,51 +400,12 @@ func f64toint(f uint64) (val int64, isNan bool) { fm >>= 1 } val = int64(fm) - if val < 0 { - if fs != 0 { - return -0x8000_0000_0000_0000, true - } - return 0x7fff_ffff_ffff_ffff, true - } if fs != 0 { val = -val } return val, true } -// returns saturated-conversion uint64 value of f -// and whether the input was NaN (in which case it -// may not match the "hardware" conversion). -func f64touint(f uint64) (val uint64, isNan bool) { - fs, fm, fe, fi, fn := funpack64(f) - - switch { - - case fn: // NaN - return 0xffff_ffff_ffff_ffff, false - - case fs != 0: // all negative, including -Inf, are zero - return 0, true - - case fi || fe >= 64: // positive infinity or f >= 2^64 - return 0xffff_ffff_ffff_ffff, true - - case fe < -1: // f < 0.5 - return 0, true - } - - for fe > int(mantbits64) { - fe-- - fm <<= 1 - } - for fe < int(mantbits64) { - fe++ - fm >>= 1 - } - val = fm - return val, true -} - func fintto64(val int64) (f uint64) { fs := uint64(val) & (1 << 63) mant := uint64(val) @@ -609,12 +564,6 @@ func fint64to64(x int64) uint64 { func f32toint32(x uint32) int32 { val, _ := f64toint(f32to64(x)) - if val >= 0x7fffffff { - return 0x7fffffff - } - if val < -0x80000000 { - return -0x80000000 - } return int32(val) } @@ -625,12 +574,6 @@ func f32toint64(x uint32) int64 { func f64toint32(x uint64) int32 { val, _ := f64toint(x) - if val >= 0x7fffffff { - return 0x7fffffff - } - if val < -0x80000000 { - return -0x80000000 - } return int32(val) } @@ -640,13 +583,23 @@ func f64toint64(x uint64) int64 { } func f64touint64(x uint64) uint64 { - val, _ := f64touint(x) - return val + var m uint64 = 0x43e0000000000000 // float64 1<<63 + if fgt64(m, x) { + return uint64(f64toint64(x)) + } + y := fadd64(x, -m) + z := uint64(f64toint64(y)) + return z | (1 << 63) } func f32touint64(x uint32) uint64 { - val, _ := f64touint(f32to64(x)) - return val + var m uint32 = 0x5f000000 // float32 1<<63 + if fgt32(m, x) { + return uint64(f32toint64(x)) + } + y := fadd32(x, -m) + z := uint64(f32toint64(y)) + return z | (1 << 63) } func fuint64to64(x uint64) uint64 { diff --git a/src/runtime/softfloat64_test.go b/src/runtime/softfloat64_test.go index 233d5e01c0ea60..3f53e8bc55810c 100644 --- a/src/runtime/softfloat64_test.go +++ b/src/runtime/softfloat64_test.go @@ -28,15 +28,6 @@ func div(x, y float64) float64 { return x / y } func TestFloat64(t *testing.T) { base := []float64{ 0, - 1, - -9223372036854775808, - -9223372036854775808 + 4096, - 18446744073709551615, - 18446744073709551615 + 1, - 18446744073709551615 - 1, - 9223372036854775808 + 4096, - 0.5, - 0.75, math.Copysign(0, -1), -1, 1, @@ -44,8 +35,6 @@ func TestFloat64(t *testing.T) { math.Inf(+1), math.Inf(-1), 0.1, - 0.5, - 0.75, 1.5, 1.9999999999999998, // all 1s mantissa 1.3333333333333333, // 1.010101010101... @@ -81,7 +70,7 @@ func TestFloat64(t *testing.T) { 1e+307, 1e+308, } - all := make([]float64, 250) + all := make([]float64, 200) copy(all, base) for i := len(base); i < len(all); i++ { all[i] = rand.NormFloat64() @@ -93,7 +82,6 @@ func TestFloat64(t *testing.T) { test(t, "*", mul, fop(Fmul64), all) test(t, "/", div, fop(Fdiv64), all) } - } // 64 -hw-> 32 -hw-> 64 @@ -116,11 +104,6 @@ func hwint64(f float64) float64 { return float64(int64(f)) } -// float64 -hw-> uint64 -hw-> float64 -func hwuint64(f float64) float64 { - return float64(uint64(f)) -} - // float64 -hw-> int32 -hw-> float64 func hwint32(f float64) float64 { return float64(int32(f)) @@ -130,23 +113,13 @@ func hwint32(f float64) float64 { func toint64sw(f float64) float64 { i, ok := F64toint(math.Float64bits(f)) if !ok { - // There's no right answer for NaN. + // There's no right answer for out of range. // Match the hardware to pass the test. i = int64(f) } return float64(i) } -func touint64sw(f float64) float64 { - i := F64touint(math.Float64bits(f)) - if f != f { - // There's no right answer for NaN. - // Match the hardware to pass the test. - i = uint64(f) - } - return float64(i) -} - // float64 -hw-> int64 -sw-> float64 func fromint64sw(f float64) float64 { return math.Float64frombits(Fintto64(int64(f))) @@ -177,7 +150,6 @@ func test(t *testing.T, op string, hw, sw func(float64, float64) float64, all [] testu(t, "to32", trunc32, to32sw, h) testu(t, "to64", trunc32, to64sw, h) testu(t, "toint64", hwint64, toint64sw, h) - testu(t, "touint64", hwuint64, touint64sw, h) testu(t, "fromint64", hwint64, fromint64sw, h) testcmp(t, f, h) testcmp(t, h, f) @@ -191,7 +163,6 @@ func testu(t *testing.T, op string, hw, sw func(float64) float64, v float64) { h := hw(v) s := sw(v) if !same(h, s) { - s = sw(v) // debug me err(t, "%s %g = sw %g, hw %g\n", op, v, s, h) } } diff --git a/test/convert5.go b/test/convert5.go index 27aa7867f42824..57585ef76e1673 100644 --- a/test/convert5.go +++ b/test/convert5.go @@ -62,8 +62,6 @@ func main() { p64_plus4k_plus1 := id(float64(p64 + 4096 + 1)) // want this to be precise and fit in 53 bits mantissa n32_minus4k := id(float32(n32 - 4096)) n64_minus4k := id(float64(n64 - 4096)) - n32_plus4k := id(float32(n32 + 4096)) - n64_plus4k := id(float64(n64 + 4096)) inf_32 := id(float32(one / 0)) inf_64 := id(float64(one / 0)) ninf_32 := id(float32(-one / 0)) @@ -81,7 +79,6 @@ func main() { {"p64_plus4k_plus1", p64_plus4k_plus1, p32}, {"n32_minus4k", n32_minus4k, n32}, {"n64_minus4k", n64_minus4k, n32}, - {"n32_plus4k", n32_plus4k, n32 + 4096}, {"inf_32", inf_32, p32}, {"inf_64", inf_64, p32}, {"ninf_32", ninf_32, n32}, @@ -111,8 +108,6 @@ func main() { {"p64_plus4k_plus1", p64_plus4k_plus1, p64}, {"n32_minus4k", n32_minus4k, n32 - 4096}, {"n64_minus4k", n64_minus4k, n64}, - {"n32_plus4k", n32_plus4k, n32 + 4096}, - {"n64_plus4k", n64_plus4k, n64 + 4096}, {"inf_32", inf_32, p64}, {"inf_64", inf_64, p64}, {"ninf_32", ninf_32, n64}, From 6d5b13793f51c77e7ea730ca140d5c1cb583af92 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 14 Oct 2025 12:13:14 -0700 Subject: [PATCH 144/152] Revert "cmd/compile: make 386 float-to-int conversions match amd64" This reverts commit 78d75b37992be01326b9bd2666195aaba9bf2ae2. Reason for revert: we need to do this more carefully, at minimum gated by a module version (This should follow the softfloat FP conversion revert) Change-Id: I736bec6cd860285dcc3b11fac85b377a149435c3 Reviewed-on: https://go-review.googlesource.com/c/go/+/711842 LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/_gen/386.rules | 6 +- src/cmd/compile/internal/ssa/_gen/386Ops.go | 2 - src/cmd/compile/internal/ssa/opGen.go | 13 --- src/cmd/compile/internal/ssa/rewrite386.go | 99 +-------------------- src/cmd/compile/internal/x86/ssa.go | 7 -- src/runtime/asm_386.s | 10 +++ src/runtime/stubs_386.go | 1 + src/runtime/vlrt.go | 63 +------------ test/convert5.go | 4 +- 9 files changed, 23 insertions(+), 182 deletions(-) diff --git a/src/cmd/compile/internal/ssa/_gen/386.rules b/src/cmd/compile/internal/ssa/_gen/386.rules index 4e3d3203c79041..5f1150241929eb 100644 --- a/src/cmd/compile/internal/ssa/_gen/386.rules +++ b/src/cmd/compile/internal/ssa/_gen/386.rules @@ -88,10 +88,8 @@ (Cvt32to32F ...) => (CVTSL2SS ...) (Cvt32to64F ...) => (CVTSL2SD ...) -(Cvt32Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) -(Cvt64Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) -(Cvt32Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SL x) -(Cvt64Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SL x) +(Cvt32Fto32 ...) => (CVTTSS2SL ...) +(Cvt64Fto32 ...) => (CVTTSD2SL ...) (Cvt32Fto64F ...) => (CVTSS2SD ...) (Cvt64Fto32F ...) => (CVTSD2SS ...) diff --git a/src/cmd/compile/internal/ssa/_gen/386Ops.go b/src/cmd/compile/internal/ssa/_gen/386Ops.go index 86c2f9c8f080f8..60599a33abb587 100644 --- a/src/cmd/compile/internal/ssa/_gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/386Ops.go @@ -342,8 +342,6 @@ func init() { {name: "MOVWLSX", argLength: 1, reg: gp11, asm: "MOVWLSX"}, // sign extend arg0 from int16 to int32 {name: "MOVWLZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int32 - {name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"}, // move 32 bits from float to int reg, zero extend - {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32 diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ee0eb657dcb14f..9b38e66a23f019 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -499,7 +499,6 @@ const ( Op386MOVBLZX Op386MOVWLSX Op386MOVWLZX - Op386MOVLf2i Op386MOVLconst Op386CVTTSD2SL Op386CVTTSS2SL @@ -5602,18 +5601,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "MOVLf2i", - argLen: 1, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 - }, - outputs: []outputInfo{ - {0, 239}, // AX CX DX BX BP SI DI - }, - }, - }, { name: "MOVLconst", auxType: auxInt32, diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 4845f1e0250ea8..0495438710659e 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -3,7 +3,6 @@ package ssa import "math" -import "cmd/compile/internal/base" import "cmd/compile/internal/types" func rewriteValue386(v *Value) bool { @@ -341,7 +340,8 @@ func rewriteValue386(v *Value) bool { v.Op = Op386BSFL return true case OpCvt32Fto32: - return rewriteValue386_OpCvt32Fto32(v) + v.Op = Op386CVTTSS2SL + return true case OpCvt32Fto64F: v.Op = Op386CVTSS2SD return true @@ -352,7 +352,8 @@ func rewriteValue386(v *Value) bool { v.Op = Op386CVTSL2SD return true case OpCvt64Fto32: - return rewriteValue386_OpCvt64Fto32(v) + v.Op = Op386CVTTSD2SL + return true case OpCvt64Fto32F: v.Op = Op386CVTSD2SS return true @@ -7963,98 +7964,6 @@ func rewriteValue386_OpCtz8(v *Value) bool { return true } } -func rewriteValue386_OpCvt32Fto32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Cvt32Fto32 x) - // cond: base.ConvertHash.MatchPos(v.Pos, nil) - // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) - for { - t := v.Type - x := v_0 - if !(base.ConvertHash.MatchPos(v.Pos, nil)) { - break - } - v.reset(Op386XORL) - v.Type = t - v0 := b.NewValue0(v.Pos, Op386SARLconst, t) - v0.AuxInt = int32ToAuxInt(31) - v1 := b.NewValue0(v.Pos, Op386ANDL, t) - y := b.NewValue0(v.Pos, Op386CVTTSS2SL, t) - y.AddArg(x) - v3 := b.NewValue0(v.Pos, Op386NOTL, typ.Int32) - v4 := b.NewValue0(v.Pos, Op386MOVLf2i, typ.UInt32) - v4.AddArg(x) - v3.AddArg(v4) - v1.AddArg2(y, v3) - v0.AddArg(v1) - v.AddArg2(y, v0) - return true - } - // match: (Cvt32Fto32 x) - // cond: !base.ConvertHash.MatchPos(v.Pos, nil) - // result: (CVTTSS2SL x) - for { - t := v.Type - x := v_0 - if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { - break - } - v.reset(Op386CVTTSS2SL) - v.Type = t - v.AddArg(x) - return true - } - return false -} -func rewriteValue386_OpCvt64Fto32(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Cvt64Fto32 x) - // cond: base.ConvertHash.MatchPos(v.Pos, nil) - // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) - for { - t := v.Type - x := v_0 - if !(base.ConvertHash.MatchPos(v.Pos, nil)) { - break - } - v.reset(Op386XORL) - v.Type = t - v0 := b.NewValue0(v.Pos, Op386SARLconst, t) - v0.AuxInt = int32ToAuxInt(31) - v1 := b.NewValue0(v.Pos, Op386ANDL, t) - y := b.NewValue0(v.Pos, Op386CVTTSD2SL, t) - y.AddArg(x) - v3 := b.NewValue0(v.Pos, Op386NOTL, typ.Int32) - v4 := b.NewValue0(v.Pos, Op386MOVLf2i, typ.UInt32) - v5 := b.NewValue0(v.Pos, Op386CVTSD2SS, typ.Float32) - v5.AddArg(x) - v4.AddArg(v5) - v3.AddArg(v4) - v1.AddArg2(y, v3) - v0.AddArg(v1) - v.AddArg2(y, v0) - return true - } - // match: (Cvt64Fto32 x) - // cond: !base.ConvertHash.MatchPos(v.Pos, nil) - // result: (CVTTSD2SL x) - for { - t := v.Type - x := v_0 - if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { - break - } - v.reset(Op386CVTTSD2SL) - v.Type = t - v.AddArg(x) - return true - } - return false -} func rewriteValue386_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index 2858a81b4b977b..d0aad088496351 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -538,13 +538,6 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() ssagen.AddAux(&p.To, v) - case ssa.Op386MOVLf2i: - var p *obj.Prog - p = s.Prog(x86.AMOVL) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[0].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() case ssa.Op386ADDLconstmodify: sc := v.AuxValAndOff() val := sc.Val() diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index 2a6de64f9fbdb3..df32e90fda8416 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -1407,6 +1407,16 @@ TEXT runtime·uint32tofloat64(SB),NOSPLIT,$8-12 FMOVDP F0, ret+4(FP) RET +TEXT runtime·float64touint32(SB),NOSPLIT,$12-12 + FMOVD a+0(FP), F0 + FSTCW 0(SP) + FLDCW runtime·controlWord64trunc(SB) + FMOVVP F0, 4(SP) + FLDCW 0(SP) + MOVL 4(SP), AX + MOVL AX, ret+8(FP) + RET + // gcWriteBarrier informs the GC about heap pointer writes. // // gcWriteBarrier returns space in a write barrier buffer which diff --git a/src/runtime/stubs_386.go b/src/runtime/stubs_386.go index 4f3dcd4fd9b250..a1dd023974a0c6 100644 --- a/src/runtime/stubs_386.go +++ b/src/runtime/stubs_386.go @@ -6,6 +6,7 @@ package runtime import "unsafe" +func float64touint32(a float64) uint32 func uint32tofloat64(a uint32) float64 // stackcheck checks that SP is in range [g->stack.lo, g->stack.hi). diff --git a/src/runtime/vlrt.go b/src/runtime/vlrt.go index 511eb0dd4edf40..4b12f593c8a8ec 100644 --- a/src/runtime/vlrt.go +++ b/src/runtime/vlrt.go @@ -40,17 +40,10 @@ func float64toint64(d float64) (y uint64) { } func float64touint64(d float64) (y uint64) { - _d2vu(&y, d) + _d2v(&y, d) return } -func float64touint32(a float64) uint32 { - if a >= 0xffffffff { - return 0xffffffff - } - return uint32(float64touint64(a)) -} - func int64tofloat64(y int64) float64 { if y < 0 { return -uint64tofloat64(-uint64(y)) @@ -124,16 +117,12 @@ func _d2v(y *uint64, d float64) { } else { /* v = (hi||lo) << -sh */ sh := uint32(-sh) - if sh <= 10 { + if sh <= 11 { ylo = xlo << sh yhi = xhi<>(32-sh) } else { - if x&sign64 != 0 { - *y = 0x8000000000000000 - } else { - *y = 0x7fffffffffffffff - } - return + /* overflow */ + yhi = uint32(d) /* causes something awful */ } } if x&sign64 != 0 { @@ -147,50 +136,6 @@ func _d2v(y *uint64, d float64) { *y = uint64(yhi)<<32 | uint64(ylo) } -func _d2vu(y *uint64, d float64) { - x := *(*uint64)(unsafe.Pointer(&d)) - if x&sign64 != 0 { - *y = 0 - return - } - - xhi := uint32(x>>32)&0xfffff | 0x100000 - xlo := uint32(x) - sh := 1075 - int32(uint32(x>>52)&0x7ff) - - var ylo, yhi uint32 - if sh >= 0 { - sh := uint32(sh) - /* v = (hi||lo) >> sh */ - if sh < 32 { - if sh == 0 { - ylo = xlo - yhi = xhi - } else { - ylo = xlo>>sh | xhi<<(32-sh) - yhi = xhi >> sh - } - } else { - if sh == 32 { - ylo = xhi - } else if sh < 64 { - ylo = xhi >> (sh - 32) - } - } - } else { - /* v = (hi||lo) << -sh */ - sh := uint32(-sh) - if sh <= 11 { - ylo = xlo << sh - yhi = xhi<>(32-sh) - } else { - /* overflow */ - *y = 0xffffffffffffffff - return - } - } - *y = uint64(yhi)<<32 | uint64(ylo) -} func uint64div(n, d uint64) uint64 { // Check for 32 bit operands if uint32(n>>32) == 0 && uint32(d>>32) == 0 { diff --git a/test/convert5.go b/test/convert5.go index 57585ef76e1673..1bd74abdad0715 100644 --- a/test/convert5.go +++ b/test/convert5.go @@ -4,9 +4,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !wasm +//go:build !wasm && !386 && !arm && !mips -// TODO fix this to work for wasm +// TODO fix this to work for wasm and 32-bit architectures. // Doing more than this, however, expands the change. package main From 7056c71d320bea2e1cafbb4f8863513ed50a4256 Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 14 Oct 2025 16:18:49 -0400 Subject: [PATCH 145/152] cmd/compile: disable use of new saturating float-to-int conversions The new conversions can be activated (or bisected) with -gcflags=all=-d=converthash=PATTERN where PATTERN is either a hash string or n, qn, y, qy for no, quietly no, yes, quietly yes. This CL makes the default pattern be "qn" instead of the default-default which is an efficient encoding of "qy". Updates #75834 Change-Id: I88a9fd7880bc999132420c8d0a22a8fdc1e95a2a Reviewed-on: https://go-review.googlesource.com/c/go/+/711845 Reviewed-by: Cherry Mui TryBot-Bypass: David Chase --- src/cmd/compile/internal/base/flag.go | 3 +++ test/codegen/math.go | 2 +- test/convert5.go | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index 1ac2cecc61ec7e..1d211e0a2dd9f4 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -264,6 +264,9 @@ func ParseFlags() { if Debug.Converthash != "" { ConvertHash = NewHashDebug("converthash", Debug.Converthash, nil) + } else { + // quietly disable the convert hash changes + ConvertHash = NewHashDebug("converthash", "qn", nil) } if Debug.Fmahash != "" { FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil) diff --git a/test/codegen/math.go b/test/codegen/math.go index 5787657d2bc3df..9ef881a9afc2f5 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -1,4 +1,4 @@ -// asmcheck +// asmcheck -gcflags=-d=converthash=qy // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/test/convert5.go b/test/convert5.go index 1bd74abdad0715..df247ca0b9b238 100644 --- a/test/convert5.go +++ b/test/convert5.go @@ -1,4 +1,4 @@ -// run +// run -gcflags=-d=converthash=qy // Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style From 36086e85f842e8ed2c03be2542a6cc211603abbc Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Thu, 2 Oct 2025 11:15:34 -0400 Subject: [PATCH 146/152] cmd/go: create temporary cleanup script This is a large series of sed commands to cleanup after successful use of the `rf inject` command. This script will be used to refactor the codebase to eliminate global state within the module loader. Once that effort is complete, this script will be removed. This commit is part of the overall effort to eliminate global modloader state. Change-Id: If04926b5ca5b7230f91ac98fe4a82c20ef5f73ee Reviewed-on: https://go-review.googlesource.com/c/go/+/709978 Reviewed-by: Michael Matloob TryBot-Bypass: Ian Alexander Commit-Queue: Ian Alexander Reviewed-by: Michael Pratt --- src/cmd/go/internal/rf-cleanup.zsh | 43 ++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100755 src/cmd/go/internal/rf-cleanup.zsh diff --git a/src/cmd/go/internal/rf-cleanup.zsh b/src/cmd/go/internal/rf-cleanup.zsh new file mode 100755 index 00000000000000..c805db56e3611e --- /dev/null +++ b/src/cmd/go/internal/rf-cleanup.zsh @@ -0,0 +1,43 @@ +#!/usr/bin/env zsh + +set -eu -o pipefail + +# This is a large series of sed commands to cleanup after successful use of the +# `rf inject` command. This script will be used to refactor the codebase to +# eliminate global state within the module loader. Once that effort is +# complete, this script will be removed. + +find . -name '*.go' -exec \ + sed -i ' + # + # CompileAction does not use loaderstate. + # + s/CompileAction(loaderstate[^ ]* \*modload.State, /CompileAction(/g + s/CompileAction(modload.LoaderState[^,]*, /CompileAction(/g + s/CompileAction(loaderstate[^,]*, /CompileAction(/g + # + # cgoAction does not use loaderstate. + # + s/cgoAction(loaderstate \*modload\.State, /cgoAction(/g + s/cgoAction(loaderstate, /cgoAction(/g + s/cgoAction(loaderstate_, /cgoAction(/g + # + # Remove redundant mentions of LoaderState from function call sites. + # + s/(modload\.LoaderState_*, loaderstate,/(loaderstate,/g + s/(modload\.LoaderState_*, moduleLoaderState,/(moduleLoaderState,/g + s/(modload\.LoaderState_*, modload\.LoaderState/(modload.LoaderState/g + s/(modload\.LoaderState_*, loaderstate,/(loaderstate,/g + s/(modload\.LoaderState_*, moduleLoaderState,/(moduleLoaderState,/g + s/(modload\.LoaderState_*, modload\.LoaderState,/(modload.LoaderState,/g + s/(loaderstate_* \*modload.State, loaderstate \*modload.State/(loaderstate *modload.State/g + s/(loaderstate_* \*State, loaderstate \*State/(loaderstate *State/g + s/(loaderstate_*, loaderstate,/(loaderstate,/g + s/(LoaderState_*, loaderstate,/(loaderstate,/g + s/(LoaderState_*, loaderState,/(loaderState,/g + s/(LoaderState_*, LoaderState,/(LoaderState,/g + s/(LoaderState_*, LoaderState,/(LoaderState,/g + s/(moduleLoaderState_*, loaderstate,/(loaderstate,/g + s/(moduleLoaderState_*, moduleLoaderState,/(moduleLoaderState,/g + ' {} \; + From 51134968050270a08d6a2456d0ea72c8a99b6e96 Mon Sep 17 00:00:00 2001 From: matloob Date: Wed, 15 Oct 2025 11:23:59 -0400 Subject: [PATCH 147/152] runtime/pprof: skip flaky test TestProfilerStackDepth/heap for now The test has been causing a lot of flakes on the builders. Skip it while I'm debugging it. For #74029 Change-Id: I6a6a696450c23f65bc310a2d0ab61b22dba88f00 Reviewed-on: https://go-review.googlesource.com/c/go/+/712060 TryBot-Bypass: Michael Matloob Reviewed-by: Michael Matloob Reviewed-by: Michael Knyszek --- src/runtime/pprof/pprof_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 6e6f4313a83dfa..23d3cf585e59f1 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -2549,6 +2549,9 @@ func TestProfilerStackDepth(t *testing.T) { for _, test := range tests { t.Run(test.profiler, func(t *testing.T) { + if test.profiler == "heap" { + testenv.SkipFlaky(t, 74029) + } var buf bytes.Buffer if err := Lookup(test.profiler).WriteTo(&buf, 0); err != nil { t.Fatalf("failed to write heap profile: %v", err) From 5b29875c8eb1002c4509eb9ebd9a4d32cfd7d494 Mon Sep 17 00:00:00 2001 From: Ian Alexander Date: Wed, 20 Aug 2025 21:59:08 -0400 Subject: [PATCH 148/152] cmd/go: inject State parameter into `run.runRun` This command modifies the call tree starting at `run.runRun` to inject a `State` parameter to every function that is currently using the global `modload.LoaderState` variable. By explicilty passing a `State` parameter, we can begin to eliminate the usage of the global `modload.LoaderState`. This commit is part of the overall effort to eliminate global modloader state. [git-generate] cd src/cmd/go/internal/run rf 'inject modload.LoaderState runRun' cd .. ./rf-cleanup.zsh Change-Id: I337323c087ed4e43af28973fad27152791eefbc2 Reviewed-on: https://go-review.googlesource.com/c/go/+/698063 TryBot-Bypass: Ian Alexander Reviewed-by: Michael Matloob Reviewed-by: Michael Matloob --- src/cmd/go/internal/bug/bug.go | 3 +- src/cmd/go/internal/clean/clean.go | 6 +- src/cmd/go/internal/envcmd/env.go | 12 +- src/cmd/go/internal/fmtcmd/fmt.go | 6 +- src/cmd/go/internal/generate/generate.go | 6 +- src/cmd/go/internal/list/list.go | 18 +- src/cmd/go/internal/load/flag.go | 3 +- src/cmd/go/internal/load/godebug.go | 8 +- src/cmd/go/internal/load/pkg.go | 118 +++--- src/cmd/go/internal/load/search.go | 10 +- src/cmd/go/internal/load/test.go | 11 +- src/cmd/go/internal/modcmd/download.go | 18 +- src/cmd/go/internal/modcmd/graph.go | 2 +- src/cmd/go/internal/modcmd/tidy.go | 2 +- src/cmd/go/internal/modcmd/vendor.go | 22 +- src/cmd/go/internal/modcmd/verify.go | 2 +- src/cmd/go/internal/modcmd/why.go | 8 +- .../modfetch/zip_sum_test/zip_sum_test.go | 2 +- src/cmd/go/internal/modget/get.go | 46 +-- src/cmd/go/internal/modget/query.go | 2 +- src/cmd/go/internal/modload/build.go | 56 +-- src/cmd/go/internal/modload/buildlist.go | 200 +++++----- src/cmd/go/internal/modload/edit.go | 30 +- src/cmd/go/internal/modload/import.go | 68 ++-- src/cmd/go/internal/modload/import_test.go | 4 +- src/cmd/go/internal/modload/init.go | 342 +++++++++--------- src/cmd/go/internal/modload/list.go | 32 +- src/cmd/go/internal/modload/load.go | 268 +++++++------- src/cmd/go/internal/modload/modfile.go | 102 +++--- src/cmd/go/internal/modload/mvs.go | 14 +- src/cmd/go/internal/modload/query.go | 72 ++-- src/cmd/go/internal/modload/query_test.go | 2 +- src/cmd/go/internal/modload/search.go | 44 +-- src/cmd/go/internal/modload/vendor.go | 24 +- src/cmd/go/internal/run/run.go | 18 +- .../internal/telemetrystats/telemetrystats.go | 2 +- src/cmd/go/internal/test/test.go | 16 +- src/cmd/go/internal/tool/tool.go | 14 +- src/cmd/go/internal/toolchain/select.go | 8 +- src/cmd/go/internal/vet/vet.go | 6 +- src/cmd/go/internal/work/action.go | 27 +- src/cmd/go/internal/work/build.go | 26 +- src/cmd/go/internal/work/exec.go | 16 +- src/cmd/go/internal/work/init.go | 4 +- src/cmd/go/internal/workcmd/edit.go | 4 +- src/cmd/go/internal/workcmd/init.go | 4 +- src/cmd/go/internal/workcmd/sync.go | 14 +- src/cmd/go/internal/workcmd/use.go | 4 +- src/cmd/go/internal/workcmd/vendor.go | 4 +- 49 files changed, 867 insertions(+), 863 deletions(-) diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go index 4ff45d2d888c96..4e9ae1e9b499ca 100644 --- a/src/cmd/go/internal/bug/bug.go +++ b/src/cmd/go/internal/bug/bug.go @@ -21,6 +21,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/envcmd" + "cmd/go/internal/modload" "cmd/go/internal/web" "cmd/go/internal/work" ) @@ -44,7 +45,7 @@ func runBug(ctx context.Context, cmd *base.Command, args []string) { if len(args) > 0 { base.Fatalf("go: bug takes no arguments") } - work.BuildInit() + work.BuildInit(modload.LoaderState) var buf strings.Builder buf.WriteString(bugHeader) diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go index c6f311e0263af8..1c05977de554f8 100644 --- a/src/cmd/go/internal/clean/clean.go +++ b/src/cmd/go/internal/clean/clean.go @@ -120,7 +120,7 @@ func init() { } func runClean(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if len(args) > 0 { cacheFlag := "" switch { @@ -142,13 +142,13 @@ func runClean(ctx context.Context, cmd *base.Command, args []string) { // either the flags and arguments explicitly imply a package, // or no other target (such as a cache) was requested to be cleaned. cleanPkg := len(args) > 0 || cleanI || cleanR - if (!modload.Enabled() || modload.HasModRoot()) && + if (!modload.Enabled(modload.LoaderState) || modload.HasModRoot(modload.LoaderState)) && !cleanCache && !cleanModcache && !cleanTestcache && !cleanFuzzcache { cleanPkg = true } if cleanPkg { - for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { + for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{}, args) { clean(pkg) } } diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index 6ad6954dd52125..13708ae170c1d8 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -191,14 +191,14 @@ func findEnv(env []cfg.EnvVar, name string) string { // ExtraEnvVars returns environment variables that should not leak into child processes. func ExtraEnvVars() []cfg.EnvVar { gomod := "" - modload.Init() - if modload.HasModRoot() { + modload.Init(modload.LoaderState) + if modload.HasModRoot(modload.LoaderState) { gomod = modload.ModFilePath() - } else if modload.Enabled() { + } else if modload.Enabled(modload.LoaderState) { gomod = os.DevNull } - modload.InitWorkfile() - gowork := modload.WorkFilePath() + modload.InitWorkfile(modload.LoaderState) + gowork := modload.WorkFilePath(modload.LoaderState) // As a special case, if a user set off explicitly, report that in GOWORK. if cfg.Getenv("GOWORK") == "off" { gowork = "off" @@ -336,7 +336,7 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { } } if needCostly { - work.BuildInit() + work.BuildInit(modload.LoaderState) env = append(env, ExtraEnvVarsCostly()...) } diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go index 83fba9661a66fc..a42e7753050356 100644 --- a/src/cmd/go/internal/fmtcmd/fmt.go +++ b/src/cmd/go/internal/fmtcmd/fmt.go @@ -59,8 +59,8 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { baseGofmtArgs := len(gofmtArgs) baseGofmtArgLen := gofmtArgLen - for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{}, args) { + if modload.Enabled(modload.LoaderState) && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not formatting packages in dependency modules\n") printed = true @@ -70,7 +70,7 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { if pkg.Error != nil { if _, ok := errors.AsType[*load.NoGoError](pkg.Error); ok { // Skip this error, as we will format all files regardless. - } else if _, ok := errors.AsType[*load.EmbedError](pkg.Error); ok && len(pkg.InternalAllGoFiles()) > 0 { + } else if _, ok := errors.AsType[*load.EmbedError](pkg.Error); ok && len(pkg.InternalAllGoFiles()) > 0 { // Skip this error, as we will format all files regardless. } else { base.Errorf("%v", pkg.Error) diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go index 0f4b4a972e9107..4250916b8d09d6 100644 --- a/src/cmd/go/internal/generate/generate.go +++ b/src/cmd/go/internal/generate/generate.go @@ -182,7 +182,7 @@ func init() { } func runGenerate(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if generateRunFlag != "" { var err error @@ -204,8 +204,8 @@ func runGenerate(ctx context.Context, cmd *base.Command, args []string) { // Even if the arguments are .go files, this loop suffices. printed := false pkgOpts := load.PackageOpts{IgnoreImports: true} - for _, pkg := range load.PackagesAndErrors(ctx, pkgOpts, args) { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args) { + if modload.Enabled(modload.LoaderState) && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n") printed = true diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index bee7dc8053ee6f..0bf86ae004a26e 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -419,7 +419,7 @@ func (v *jsonFlag) needAny(fields ...string) bool { var nl = []byte{'\n'} func runList(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if *listFmt != "" && listJson { base.Fatalf("go list -f cannot be used with -json") @@ -427,11 +427,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if *listReuse != "" && !*listM { base.Fatalf("go list -reuse cannot be used without -m") } - if *listReuse != "" && modload.HasModRoot() { + if *listReuse != "" && modload.HasModRoot(modload.LoaderState) { base.Fatalf("go list -reuse cannot be used inside a module") } - work.BuildInit() + work.BuildInit(modload.LoaderState) out := newTrackingWriter(os.Stdout) defer out.w.Flush() @@ -496,12 +496,12 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } } - modload.Init() + modload.Init(modload.LoaderState) if *listRetracted { if cfg.BuildMod == "vendor" { base.Fatalf("go list -retracted cannot be used when vendoring is enabled") } - if !modload.Enabled() { + if !modload.Enabled(modload.LoaderState) { base.Fatalf("go list -retracted can only be used in module-aware mode") } } @@ -525,11 +525,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go list -test cannot be used with -m") } - if modload.Init(); !modload.Enabled() { + if modload.Init(modload.LoaderState); !modload.Enabled(modload.LoaderState) { base.Fatalf("go: list -m cannot be used with GO111MODULE=off") } - modload.LoadModFile(ctx) // Sets cfg.BuildMod as a side-effect. + modload.LoadModFile(modload.LoaderState, ctx) // Sets cfg.BuildMod as a side-effect. if cfg.BuildMod == "vendor" { const actionDisabledFormat = "go: can't %s using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)" @@ -613,7 +613,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { SuppressBuildInfo: !*listExport && !listJsonFields.needAny("Stale", "StaleReason"), SuppressEmbedFiles: !*listExport && !listJsonFields.needAny("EmbedFiles", "TestEmbedFiles", "XTestEmbedFiles"), } - pkgs := load.PackagesAndErrors(ctx, pkgOpts, args) + pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args) if !*listE { w := 0 for _, pkg := range pkgs { @@ -727,7 +727,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { b.NeedExport = *listExport b.NeedCompiledGoFiles = *listCompiled if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(modload.LoaderState, pkgs) } a := &work.Action{} // TODO: Use pkgsFilter? diff --git a/src/cmd/go/internal/load/flag.go b/src/cmd/go/internal/load/flag.go index 55bdab013505ab..86a922bc103a82 100644 --- a/src/cmd/go/internal/load/flag.go +++ b/src/cmd/go/internal/load/flag.go @@ -6,6 +6,7 @@ package load import ( "cmd/go/internal/base" + "cmd/go/internal/modload" "cmd/internal/quoted" "fmt" "strings" @@ -63,7 +64,7 @@ func (f *PerPackageFlag) set(v, cwd string) error { return fmt.Errorf("parameter may not start with quote character %c", v[0]) } pattern := strings.TrimSpace(v[:i]) - match = MatchPackage(pattern, cwd) + match = MatchPackage(modload.LoaderState, pattern, cwd) v = v[i+1:] } flags, err := quoted.Split(v) diff --git a/src/cmd/go/internal/load/godebug.go b/src/cmd/go/internal/load/godebug.go index c795d42f117f0e..817cc4faebf7b4 100644 --- a/src/cmd/go/internal/load/godebug.go +++ b/src/cmd/go/internal/load/godebug.go @@ -45,12 +45,12 @@ func ParseGoDebug(text string) (key, value string, err error) { // defaultGODEBUG returns the default GODEBUG setting for the main package p. // When building a test binary, directives, testDirectives, and xtestDirectives // list additional directives from the package under test. -func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []build.Directive) string { +func defaultGODEBUG(loaderstate *modload.State, p *Package, directives, testDirectives, xtestDirectives []build.Directive) string { if p.Name != "main" { return "" } - goVersion := modload.LoaderState.MainModules.GoVersion() - if modload.LoaderState.RootMode == modload.NoRoot && p.Module != nil { + goVersion := loaderstate.MainModules.GoVersion(loaderstate) + if loaderstate.RootMode == modload.NoRoot && p.Module != nil { // This is go install pkg@version or go run pkg@version. // Use the Go version from the package. // If there isn't one, then assume Go 1.20, @@ -73,7 +73,7 @@ func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []bu } // Add directives from main module go.mod. - for _, g := range modload.LoaderState.MainModules.Godebugs() { + for _, g := range loaderstate.MainModules.Godebugs(loaderstate) { if m == nil { m = make(map[string]string) } diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index a894affc844066..cfaece2072dbb9 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -686,8 +686,8 @@ const ( ) // LoadPackage does Load import, but without a parent package load context -func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { - p, err := loadImport(ctx, opts, nil, path, srcDir, nil, stk, importPos, mode) +func LoadPackage(loaderstate *modload.State, ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { + p, err := loadImport(loaderstate, ctx, opts, nil, path, srcDir, nil, stk, importPos, mode) if err != nil { base.Fatalf("internal error: loadImport of %q with nil parent returned an error", path) } @@ -703,7 +703,7 @@ func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk // The returned PackageError, if any, describes why parent is not allowed // to import the named package, with the error referring to importPos. // The PackageError can only be non-nil when parent is not nil. -func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { +func loadImport(loaderstate *modload.State, ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { ctx, span := trace.StartSpan(ctx, "modload.loadImport "+path) defer span.Done() @@ -718,9 +718,9 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi parentRoot = parent.Root parentIsStd = parent.Standard } - bp, loaded, err := loadPackageData(ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode) + bp, loaded, err := loadPackageData(loaderstate, ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode) if loaded && pre != nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } if bp == nil { p := &Package{ @@ -771,7 +771,7 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi // Load package. // loadPackageData may return bp != nil even if an error occurs, // in order to return partial information. - p.load(ctx, opts, path, stk, importPos, bp, err) + p.load(loaderstate, ctx, opts, path, stk, importPos, bp, err) if !cfg.ModulesEnabled && path != cleanImport(path) { p.Error = &PackageError{ @@ -784,7 +784,7 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi } // Checked on every import because the rules depend on the code doing the importing. - if perr := disallowInternal(ctx, srcDir, parent, parentPath, p, stk); perr != nil { + if perr := disallowInternal(loaderstate, ctx, srcDir, parent, parentPath, p, stk); perr != nil { perr.setPos(importPos) return p, perr } @@ -838,7 +838,7 @@ func extractFirstImport(importPos []token.Position) *token.Position { // // loadPackageData returns a boolean, loaded, which is true if this is the // first time the package was loaded. Callers may preload imports in this case. -func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { +func loadPackageData(loaderstate *modload.State, ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { ctx, span := trace.StartSpan(ctx, "load.loadPackageData "+path) defer span.Done() @@ -883,7 +883,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo r.path = newPath r.dir = dir } else if cfg.ModulesEnabled { - r.dir, r.path, r.err = modload.Lookup(parentPath, parentIsStd, path) + r.dir, r.path, r.err = modload.Lookup(loaderstate, parentPath, parentIsStd, path) } else if build.IsLocalImport(path) { r.dir = filepath.Join(parentDir, path) r.path = dirToImportPath(r.dir) @@ -892,7 +892,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo // find out the key to use in packageCache without the // overhead of repeated calls to buildContext.Import. // The code is also needed in a few other places anyway. - r.path = resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd) + r.path = resolveImportPath(loaderstate, path, parentPath, parentDir, parentRoot, parentIsStd) } else if mode&ResolveModule != 0 { r.path = moduleImportPath(path, parentPath, parentDir, parentRoot) } @@ -921,7 +921,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo } else { buildContext.GOPATH = "" // Clear GOPATH so packages are imported as pure module packages } - modroot := modload.PackageModRoot(ctx, r.path) + modroot := modload.PackageModRoot(loaderstate, ctx, r.path) if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) { modroot = cfg.GOROOTsrc gorootSrcCmd := filepath.Join(cfg.GOROOTsrc, "cmd") @@ -942,7 +942,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo if cfg.ModulesEnabled { // Override data.p.Root, since ImportDir sets it to $GOPATH, if // the module is inside $GOPATH/src. - if info := modload.PackageModuleInfo(ctx, path); info != nil { + if info := modload.PackageModuleInfo(loaderstate, ctx, path); info != nil { data.p.Root = info.Dir } } @@ -989,7 +989,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo if cfg.GOBIN != "" { data.p.BinDir = cfg.GOBIN } else if cfg.ModulesEnabled { - data.p.BinDir = modload.BinDir() + data.p.BinDir = modload.BinDir(loaderstate) } } @@ -1068,7 +1068,7 @@ func newPreload() *preload { // preloadMatches loads data for package paths matched by patterns. // When preloadMatches returns, some packages may not be loaded yet, but // loadPackageData and loadImport are always safe to call. -func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matches []*search.Match) { +func (pre *preload) preloadMatches(loaderstate *modload.State, ctx context.Context, opts PackageOpts, matches []*search.Match) { for _, m := range matches { for _, pkg := range m.Pkgs { select { @@ -1077,10 +1077,10 @@ func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matche case pre.sema <- struct{}{}: go func(pkg string) { mode := 0 // don't use vendoring or module import resolution - bp, loaded, err := loadPackageData(ctx, pkg, "", base.Cwd(), "", false, mode) + bp, loaded, err := loadPackageData(loaderstate, ctx, pkg, "", base.Cwd(), "", false, mode) <-pre.sema if bp != nil && loaded && err == nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } }(pkg) } @@ -1091,7 +1091,7 @@ func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matche // preloadImports queues a list of imports for preloading. // When preloadImports returns, some packages may not be loaded yet, // but loadPackageData and loadImport are always safe to call. -func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) { +func (pre *preload) preloadImports(loaderstate *modload.State, ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) { parentIsStd := parent.Goroot && parent.ImportPath != "" && search.IsStandardImportPath(parent.ImportPath) for _, path := range imports { if path == "C" || path == "unsafe" { @@ -1102,10 +1102,10 @@ func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, import return case pre.sema <- struct{}{}: go func(path string) { - bp, loaded, err := loadPackageData(ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) + bp, loaded, err := loadPackageData(loaderstate, ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) <-pre.sema if bp != nil && loaded && err == nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } }(path) } @@ -1160,12 +1160,12 @@ func ResolveImportPath(parent *Package, path string) (found string) { parentRoot = parent.Root parentIsStd = parent.Standard } - return resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd) + return resolveImportPath(modload.LoaderState, path, parentPath, parentDir, parentRoot, parentIsStd) } -func resolveImportPath(path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) { +func resolveImportPath(loaderstate *modload.State, path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) { if cfg.ModulesEnabled { - if _, p, e := modload.Lookup(parentPath, parentIsStd, path); e == nil { + if _, p, e := modload.Lookup(loaderstate, parentPath, parentIsStd, path); e == nil { return p } return path @@ -1463,7 +1463,7 @@ func reusePackage(p *Package, stk *ImportStack) *Package { // is allowed to import p. // If the import is allowed, disallowInternal returns the original package p. // If not, it returns a new package containing just an appropriate error. -func disallowInternal(ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError { +func disallowInternal(loaderstate *modload.State, ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError { // golang.org/s/go14internal: // An import of a path containing the element “internal” // is disallowed if the importing code is outside the tree @@ -1552,7 +1552,7 @@ func disallowInternal(ctx context.Context, srcDir string, importer *Package, imp // directory containing them. // If the directory is outside the main modules, this will resolve to ".", // which is not a prefix of any valid module. - importerPath, _ = modload.LoaderState.MainModules.DirImportPath(ctx, importer.Dir) + importerPath, _ = loaderstate.MainModules.DirImportPath(loaderstate, ctx, importer.Dir) } parentOfInternal := p.ImportPath[:i] if str.HasPathPrefix(importerPath, parentOfInternal) { @@ -1771,7 +1771,7 @@ func (p *Package) DefaultExecName() string { // load populates p using information from bp, err, which should // be the result of calling build.Context.Import. // stk contains the import stack, not including path itself. -func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { +func (p *Package) load(loaderstate *modload.State, ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { p.copyBuild(opts, bp) // The localPrefix is the path we interpret ./ imports relative to, @@ -1835,7 +1835,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * elem = full } if p.Internal.Build.BinDir == "" && cfg.ModulesEnabled { - p.Internal.Build.BinDir = modload.BinDir() + p.Internal.Build.BinDir = modload.BinDir(loaderstate) } if p.Internal.Build.BinDir != "" { // Install to GOBIN or bin of GOPATH entry. @@ -1973,9 +1973,9 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * pkgPath = "command-line-arguments" } if cfg.ModulesEnabled { - p.Module = modload.PackageModuleInfo(ctx, pkgPath) + p.Module = modload.PackageModuleInfo(loaderstate, ctx, pkgPath) } - p.DefaultGODEBUG = defaultGODEBUG(p, nil, nil, nil) + p.DefaultGODEBUG = defaultGODEBUG(loaderstate, p, nil, nil, nil) if !opts.SuppressEmbedFiles { p.EmbedFiles, p.Internal.Embed, err = resolveEmbed(p.Dir, p.EmbedPatterns) @@ -2026,7 +2026,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * if path == "C" { continue } - p1, err := loadImport(ctx, opts, nil, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) + p1, err := loadImport(loaderstate, ctx, opts, nil, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) if err != nil && p.Error == nil { p.Error = err p.Incomplete = true @@ -2813,7 +2813,7 @@ func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) [] } walkTest := func(root *Package, path string) { var stk ImportStack - p1, err := loadImport(ctx, opts, nil, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) + p1, err := loadImport(modload.LoaderState, ctx, opts, nil, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) if err != nil && root.Error == nil { // Assign error importing the package to the importer. root.Error = err @@ -2840,16 +2840,16 @@ func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) [] // dependencies (like sync/atomic for coverage). // TODO(jayconrod): delete this function and set flags automatically // in LoadImport instead. -func LoadImportWithFlags(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { - p, err := loadImport(context.TODO(), PackageOpts{}, nil, path, srcDir, parent, stk, importPos, mode) +func LoadImportWithFlags(loaderstate *modload.State, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { + p, err := loadImport(loaderstate, context.TODO(), PackageOpts{}, nil, path, srcDir, parent, stk, importPos, mode) setToolFlags(p) return p, err } // LoadPackageWithFlags is the same as LoadImportWithFlags but without a parent. // It's then guaranteed to not return an error -func LoadPackageWithFlags(path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { - p := LoadPackage(context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode) +func LoadPackageWithFlags(loaderstate *modload.State, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { + p := LoadPackage(loaderstate, context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode) setToolFlags(p) return p } @@ -2899,7 +2899,7 @@ type PackageOpts struct { // // To obtain a flat list of packages, use PackageList. // To report errors loading packages, use ReportPackageErrors. -func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) []*Package { +func PackagesAndErrors(loaderstate *modload.State, ctx context.Context, opts PackageOpts, patterns []string) []*Package { ctx, span := trace.StartSpan(ctx, "load.PackagesAndErrors") defer span.Done() @@ -2911,7 +2911,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) // We need to test whether the path is an actual Go file and not a // package path or pattern ending in '.go' (see golang.org/issue/34653). if fi, err := fsys.Stat(p); err == nil && !fi.IsDir() { - pkgs := []*Package{GoFilesPackage(ctx, opts, patterns)} + pkgs := []*Package{GoFilesPackage(loaderstate, ctx, opts, patterns)} setPGOProfilePath(pkgs) return pkgs } @@ -2919,13 +2919,13 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) } var matches []*search.Match - if modload.Init(); cfg.ModulesEnabled { + if modload.Init(loaderstate); cfg.ModulesEnabled { modOpts := modload.PackageOpts{ ResolveMissingImports: true, LoadTests: opts.ModResolveTests, SilencePackageErrors: true, } - matches, _ = modload.LoadPackages(ctx, modOpts, patterns...) + matches, _ = modload.LoadPackages(loaderstate, ctx, modOpts, patterns...) } else { matches = search.ImportPaths(patterns) } @@ -2938,7 +2938,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) pre := newPreload() defer pre.flush() - pre.preloadMatches(ctx, opts, matches) + pre.preloadMatches(loaderstate, ctx, opts, matches) for _, m := range matches { for _, pkg := range m.Pkgs { @@ -2952,7 +2952,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) // a literal and also a non-literal pattern. mode |= cmdlinePkgLiteral } - p, perr := loadImport(ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode) + p, perr := loadImport(loaderstate, ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode) if perr != nil { base.Fatalf("internal error: loadImport of %q with nil parent returned an error", pkg) } @@ -3243,8 +3243,8 @@ func setToolFlags(pkgs ...*Package) { // GoFilesPackage creates a package for building a collection of Go files // (typically named on the command line). The target is named p.a for // package p or named after the first Go file for package main. -func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Package { - modload.Init() +func GoFilesPackage(loaderstate *modload.State, ctx context.Context, opts PackageOpts, gofiles []string) *Package { + modload.Init(loaderstate) for _, f := range gofiles { if !strings.HasSuffix(f, ".go") { @@ -3289,7 +3289,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa ctxt.ReadDir = func(string) ([]fs.FileInfo, error) { return dirent, nil } if cfg.ModulesEnabled { - modload.ImportFromFiles(ctx, gofiles) + modload.ImportFromFiles(loaderstate, ctx, gofiles) } var err error @@ -3305,7 +3305,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa pkg := new(Package) pkg.Internal.Local = true pkg.Internal.CmdlineFiles = true - pkg.load(ctx, opts, "command-line-arguments", &stk, nil, bp, err) + pkg.load(loaderstate, ctx, opts, "command-line-arguments", &stk, nil, bp, err) if !cfg.ModulesEnabled { pkg.Internal.LocalPrefix = dirToImportPath(dir) } @@ -3319,7 +3319,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa if cfg.GOBIN != "" { pkg.Target = filepath.Join(cfg.GOBIN, exe) } else if cfg.ModulesEnabled { - pkg.Target = filepath.Join(modload.BinDir(), exe) + pkg.Target = filepath.Join(modload.BinDir(loaderstate), exe) } } @@ -3347,11 +3347,11 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa // module, but its go.mod file (if it has one) must not contain directives that // would cause it to be interpreted differently if it were the main module // (replace, exclude). -func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { - if !modload.LoaderState.ForceUseModules { +func PackagesAndErrorsOutsideModule(loaderstate *modload.State, ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { + if !loaderstate.ForceUseModules { panic("modload.ForceUseModules must be true") } - if modload.LoaderState.RootMode != modload.NoRoot { + if loaderstate.RootMode != modload.NoRoot { panic("modload.RootMode must be NoRoot") } @@ -3404,12 +3404,12 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args allowed = nil } noneSelected := func(path string) (version string) { return "none" } - qrs, err := modload.QueryPackages(ctx, patterns[0], version, noneSelected, allowed) + qrs, err := modload.QueryPackages(loaderstate, ctx, patterns[0], version, noneSelected, allowed) if err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } rootMod := qrs[0].Mod - deprecation, err := modload.CheckDeprecation(ctx, rootMod) + deprecation, err := modload.CheckDeprecation(loaderstate, ctx, rootMod) if err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } @@ -3438,12 +3438,12 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args // Since we are in NoRoot mode, the build list initially contains only // the dummy command-line-arguments module. Add a requirement on the // module that provides the packages named on the command line. - if _, err := modload.EditBuildList(ctx, nil, []module.Version{rootMod}); err != nil { + if _, err := modload.EditBuildList(loaderstate, ctx, nil, []module.Version{rootMod}); err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } // Load packages for all arguments. - pkgs := PackagesAndErrors(ctx, opts, patterns) + pkgs := PackagesAndErrors(loaderstate, ctx, opts, patterns) // Check that named packages are all provided by the same module. for _, pkg := range pkgs { @@ -3471,14 +3471,14 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args } // EnsureImport ensures that package p imports the named package. -func EnsureImport(p *Package, pkg string) { +func EnsureImport(loaderstate *modload.State, p *Package, pkg string) { for _, d := range p.Internal.Imports { if d.Name == pkg { return } } - p1, err := LoadImportWithFlags(pkg, p.Dir, p, &ImportStack{}, nil, 0) + p1, err := LoadImportWithFlags(loaderstate, pkg, p.Dir, p, &ImportStack{}, nil, 0) if err != nil { base.Fatalf("load %s: %v", pkg, err) } @@ -3494,7 +3494,7 @@ func EnsureImport(p *Package, pkg string) { // "go test -cover"). It walks through the packages being built (and // dependencies) and marks them for coverage instrumentation when // appropriate, and possibly adding additional deps where needed. -func PrepareForCoverageBuild(pkgs []*Package) { +func PrepareForCoverageBuild(loaderstate *modload.State, pkgs []*Package) { var match []func(*Package) bool matchMainModAndCommandLine := func(p *Package) bool { @@ -3507,7 +3507,7 @@ func PrepareForCoverageBuild(pkgs []*Package) { // the specific packages selected by the user-specified pattern(s). match = make([]func(*Package) bool, len(cfg.BuildCoverPkg)) for i := range cfg.BuildCoverPkg { - match[i] = MatchPackage(cfg.BuildCoverPkg[i], base.Cwd()) + match[i] = MatchPackage(loaderstate, cfg.BuildCoverPkg[i], base.Cwd()) } } else { // Without -coverpkg, instrument only packages in the main module @@ -3519,10 +3519,10 @@ func PrepareForCoverageBuild(pkgs []*Package) { // Visit the packages being built or installed, along with all of // their dependencies, and mark them to be instrumented, taking // into account the matchers we've set up in the sequence above. - SelectCoverPackages(PackageList(pkgs), match, "build") + SelectCoverPackages(loaderstate, PackageList(pkgs), match, "build") } -func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op string) []*Package { +func SelectCoverPackages(loaderstate *modload.State, roots []*Package, match []func(*Package) bool, op string) []*Package { var warntag string var includeMain bool switch op { @@ -3602,7 +3602,7 @@ func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op strin // Force import of sync/atomic into package if atomic mode. if cfg.BuildCoverMode == "atomic" { - EnsureImport(p, "sync/atomic") + EnsureImport(loaderstate, p, "sync/atomic") } } diff --git a/src/cmd/go/internal/load/search.go b/src/cmd/go/internal/load/search.go index 51c8cc0932406e..09e32a4f46a69b 100644 --- a/src/cmd/go/internal/load/search.go +++ b/src/cmd/go/internal/load/search.go @@ -14,7 +14,7 @@ import ( ) // MatchPackage(pattern, cwd)(p) reports whether package p matches pattern in the working directory cwd. -func MatchPackage(pattern, cwd string) func(*Package) bool { +func MatchPackage(loaderstate *modload.State, pattern, cwd string) func(*Package) bool { switch { case search.IsRelativePath(pattern): // Split pattern into leading pattern-free directory path @@ -54,13 +54,13 @@ func MatchPackage(pattern, cwd string) func(*Package) bool { return func(p *Package) bool { return p.Standard } case pattern == "cmd": return func(p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") } - case pattern == "tool" && modload.Enabled(): + case pattern == "tool" && modload.Enabled(loaderstate): return func(p *Package) bool { - return modload.LoaderState.MainModules.Tools()[p.ImportPath] + return loaderstate.MainModules.Tools()[p.ImportPath] } - case pattern == "work" && modload.Enabled(): + case pattern == "work" && modload.Enabled(loaderstate): return func(p *Package) bool { - return p.Module != nil && modload.LoaderState.MainModules.Contains(p.Module.Path) + return p.Module != nil && loaderstate.MainModules.Contains(p.Module.Path) } default: diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go index 9849ee138a5781..9019545b4b8d82 100644 --- a/src/cmd/go/internal/load/test.go +++ b/src/cmd/go/internal/load/test.go @@ -24,6 +24,7 @@ import ( "unicode/utf8" "cmd/go/internal/fsys" + "cmd/go/internal/modload" "cmd/go/internal/str" "cmd/go/internal/trace" ) @@ -106,7 +107,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p defer pre.flush() allImports := append([]string{}, p.TestImports...) allImports = append(allImports, p.XTestImports...) - pre.preloadImports(ctx, opts, allImports, p.Internal.Build) + pre.preloadImports(modload.LoaderState, ctx, opts, allImports, p.Internal.Build) var ptestErr, pxtestErr *PackageError var imports, ximports []*Package @@ -116,7 +117,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p stk.Push(ImportInfo{Pkg: p.ImportPath + " (test)"}) rawTestImports := str.StringList(p.TestImports) for i, path := range p.TestImports { - p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) + p1, err := loadImport(modload.LoaderState, ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) if err != nil && ptestErr == nil { ptestErr = err incomplete = true @@ -145,7 +146,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p var pxtestIncomplete bool rawXTestImports := str.StringList(p.XTestImports) for i, path := range p.XTestImports { - p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) + p1, err := loadImport(modload.LoaderState, ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) if err != nil && pxtestErr == nil { pxtestErr = err } @@ -292,7 +293,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p } pb := p.Internal.Build - pmain.DefaultGODEBUG = defaultGODEBUG(pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives) + pmain.DefaultGODEBUG = defaultGODEBUG(modload.LoaderState, pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives) if pmain.Internal.BuildInfo == nil || pmain.DefaultGODEBUG != p.DefaultGODEBUG { // Either we didn't generate build info for the package under test (because it wasn't package main), or // the DefaultGODEBUG used to build the test main package is different from the DefaultGODEBUG @@ -321,7 +322,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p if dep == ptest.ImportPath { pmain.Internal.Imports = append(pmain.Internal.Imports, ptest) } else { - p1, err := loadImport(ctx, opts, pre, dep, "", nil, &stk, nil, 0) + p1, err := loadImport(modload.LoaderState, ctx, opts, pre, dep, "", nil, &stk, nil, 0) if err != nil && pmain.Error == nil { pmain.Error = err pmain.Incomplete = true diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 8df11bfa59fa43..f4ed4b45834e89 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -109,15 +109,15 @@ type ModuleJSON struct { } func runDownload(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) // Check whether modules are enabled and whether we're in a module. modload.LoaderState.ForceUseModules = true modload.ExplicitWriteGoMod = true haveExplicitArgs := len(args) > 0 - if modload.HasModRoot() || modload.WorkFilePath() != "" { - modload.LoadModFile(ctx) // to fill MainModules + if modload.HasModRoot(modload.LoaderState) || modload.WorkFilePath(modload.LoaderState) != "" { + modload.LoadModFile(modload.LoaderState, ctx) // to fill MainModules if haveExplicitArgs { for _, mainModule := range modload.LoaderState.MainModules.Versions() { @@ -130,7 +130,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } } } - } else if modload.WorkFilePath() != "" { + } else if modload.WorkFilePath(modload.LoaderState) != "" { // TODO(#44435): Think about what the correct query is to download the // right set of modules. Also see code review comment at // https://go-review.googlesource.com/c/go/+/359794/comments/ce946a80_6cf53992. @@ -169,7 +169,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } if len(args) == 0 { - if modload.HasModRoot() { + if modload.HasModRoot(modload.LoaderState) { os.Stderr.WriteString("go: no module dependencies to download\n") } else { base.Errorf("go: no modules specified (see 'go help mod download')") @@ -177,7 +177,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { base.Exit() } - if *downloadReuse != "" && modload.HasModRoot() { + if *downloadReuse != "" && modload.HasModRoot(modload.LoaderState) { base.Fatalf("go mod download -reuse cannot be used inside a module") } @@ -220,7 +220,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // when we can. } - if !haveExplicitArgs && modload.WorkFilePath() == "" { + if !haveExplicitArgs && modload.WorkFilePath(modload.LoaderState) == "" { // 'go mod download' is sometimes run without arguments to pre-populate the // module cache. In modules that aren't at go 1.17 or higher, it may fetch // modules that aren't needed to build packages in the main module. This is @@ -291,7 +291,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // with no arguments we download the module pattern "all", // which may include dependencies that are normally pruned out // of the individual modules in the workspace. - if haveExplicitArgs || modload.WorkFilePath() != "" { + if haveExplicitArgs || modload.WorkFilePath(modload.LoaderState) != "" { var sw toolchain.Switcher // Add errors to the Switcher in deterministic order so that they will be // logged deterministically. @@ -347,7 +347,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // // Don't save sums for 'go mod download' without arguments unless we're in // workspace mode; see comment above. - if haveExplicitArgs || modload.WorkFilePath() != "" { + if haveExplicitArgs || modload.WorkFilePath(modload.LoaderState) != "" { if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil { base.Error(err) } diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index 5f47260e188292..3bc6009b57b595 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -52,7 +52,7 @@ func init() { } func runGraph(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if len(args) > 0 { base.Fatalf("go: 'go mod graph' accepts no arguments") diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index 0314dcef250d81..c693bd52a38af9 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -130,7 +130,7 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { }) } - modload.LoadPackages(ctx, modload.PackageOpts{ + modload.LoadPackages(modload.LoaderState, ctx, modload.PackageOpts{ TidyGoVersion: tidyGo.String(), Tags: imports.AnyTags(), Tidy: true, diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index df673e885c1473..8d9672d5365523 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -66,8 +66,8 @@ func init() { } func runVendor(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - if modload.WorkFilePath() != "" { + modload.InitWorkfile(modload.LoaderState) + if modload.WorkFilePath(modload.LoaderState) != "" { base.Fatalf("go: 'go mod vendor' cannot be run in workspace mode. Run 'go work vendor' to vendor the workspace or set 'GOWORK=off' to exit workspace mode.") } RunVendor(ctx, vendorE, vendorO, args) @@ -88,7 +88,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) AllowErrors: vendorE, SilenceMissingStdImports: true, } - _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") + _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all") var vdir string switch { @@ -97,7 +97,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) case vendorO != "": vdir = filepath.Join(base.Cwd(), vendorO) default: - vdir = filepath.Join(modload.VendorDir()) + vdir = filepath.Join(modload.VendorDir(modload.LoaderState)) } if err := os.RemoveAll(vdir); err != nil { base.Fatal(err) @@ -116,8 +116,8 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) includeAllReplacements := false includeGoVersions := false isExplicit := map[module.Version]bool{} - gv := modload.LoaderState.MainModules.GoVersion() - if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(base.Cwd()) != "" || modload.ModFile().Go != nil) { + gv := modload.LoaderState.MainModules.GoVersion(modload.LoaderState) + if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(modload.LoaderState, base.Cwd()) != "" || modload.ModFile().Go != nil) { // If the Go version is at least 1.14, annotate all explicit 'require' and // 'replace' targets found in the go.mod file so that we can perform a // stronger consistency check when -mod=vendor is set. @@ -162,7 +162,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) replacementWritten := make(map[module.Version]bool) for _, m := range vendorMods { - replacement := modload.Replacement(m) + replacement := modload.Replacement(modload.LoaderState, m) line := moduleLine(m, replacement) replacementWritten[m] = true io.WriteString(w, line) @@ -215,7 +215,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) continue } replacementWritten[r.Old] = true - rNew := modload.Replacement(r.Old) + rNew := modload.Replacement(modload.LoaderState, r.Old) if rNew == (module.Version{}) { // There is no replacement. Don't try to write it. continue @@ -269,7 +269,7 @@ func moduleLine(m, r module.Version) string { } func vendorPkg(vdir, pkg string) { - src, realPath, _ := modload.Lookup("", false, pkg) + src, realPath, _ := modload.Lookup(modload.LoaderState, "", false, pkg) if src == "" { base.Errorf("internal error: no pkg for %s\n", pkg) return @@ -315,7 +315,7 @@ func vendorPkg(vdir, pkg string) { } } var embedPatterns []string - if gover.Compare(modload.LoaderState.MainModules.GoVersion(), "1.22") >= 0 { + if gover.Compare(modload.LoaderState.MainModules.GoVersion(modload.LoaderState), "1.22") >= 0 { embedPatterns = bp.EmbedPatterns } else { // Maintain the behavior of https://github.com/golang/go/issues/63473 @@ -431,7 +431,7 @@ func matchPotentialSourceFile(dir string, info fs.DirEntry) bool { return false } if info.Name() == "go.mod" || info.Name() == "go.sum" { - if gv := modload.LoaderState.MainModules.GoVersion(); gover.Compare(gv, "1.17") >= 0 { + if gv := modload.LoaderState.MainModules.GoVersion(modload.LoaderState); gover.Compare(gv, "1.17") >= 0 { // As of Go 1.17, we strip go.mod and go.sum files from dependency modules. // Otherwise, 'go' commands invoked within the vendor subtree may misidentify // an arbitrary directory within the vendor tree as a module root. diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index 8de444ff06ad1d..d8227bcd5455a3 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -44,7 +44,7 @@ func init() { } func runVerify(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if len(args) != 0 { // NOTE(rsc): Could take a module pattern. diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index 62a5387ed8841c..b37d9fded0f47c 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -63,7 +63,7 @@ func init() { } func runWhy(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) modload.LoaderState.ForceUseModules = true modload.LoaderState.RootMode = modload.NeedRoot modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules @@ -89,7 +89,7 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { } byModule := make(map[string][]string) - _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") + _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all") for _, path := range pkgs { m := modload.PackageModule(path) if m.Path != "" { @@ -120,9 +120,9 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { } } else { // Resolve to packages. - matches, _ := modload.LoadPackages(ctx, loadOpts, args...) + matches, _ := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, args...) - modload.LoadPackages(ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages) + modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages) sep := "" for _, m := range matches { diff --git a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go index 16cc1457058933..9d59d1a8ea8218 100644 --- a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go +++ b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go @@ -94,7 +94,7 @@ func TestZipSums(t *testing.T) { cfg.GOPROXY = "direct" cfg.GOSUMDB = "off" - modload.Init() + modload.Init(modload.LoaderState) // Shard tests by downloading only every nth module when shard flags are set. // This makes it easier to test small groups of modules quickly. We avoid diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 5017f878ed3647..141e1708fa6ded 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -307,14 +307,14 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // Allow looking up modules for import paths when outside of a module. // 'go get' is expected to do this, unlike other commands. - modload.AllowMissingModuleImports() + modload.AllowMissingModuleImports(modload.LoaderState) // 'go get' no longer builds or installs packages, so there's nothing to do // if there's no go.mod file. // TODO(#40775): make modload.Init return ErrNoModRoot instead of exiting. // We could handle that here by printing a different message. - modload.Init() - if !modload.HasModRoot() { + modload.Init(modload.LoaderState) + if !modload.HasModRoot(modload.LoaderState) { base.Fatalf("go: go.mod file not found in current directory or any parent directory.\n" + "\t'go get' is no longer supported outside a module.\n" + "\tTo build and install a command, use 'go install' with a version,\n" + @@ -424,9 +424,9 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { newReqs := reqsFromGoMod(modload.ModFile()) r.reportChanges(oldReqs, newReqs) - if gowork := modload.FindGoWork(base.Cwd()); gowork != "" { + if gowork := modload.FindGoWork(modload.LoaderState, base.Cwd()); gowork != "" { wf, err := modload.ReadWorkFile(gowork) - if err == nil && modload.UpdateWorkGoVersion(wf, modload.LoaderState.MainModules.GoVersion()) { + if err == nil && modload.UpdateWorkGoVersion(wf, modload.LoaderState.MainModules.GoVersion(modload.LoaderState)) { modload.WriteWorkFile(gowork, wf) } } @@ -448,7 +448,7 @@ func updateTools(ctx context.Context, queries []*query, opts *modload.WriteOpts) patterns = append(patterns, q.pattern) } - matches, _ := modload.LoadPackages(ctx, pkgOpts, patterns...) + matches, _ := modload.LoadPackages(modload.LoaderState, ctx, pkgOpts, patterns...) for i, m := range matches { if queries[i].version == "none" { opts.DropTools = append(opts.DropTools, m.Pkgs...) @@ -574,7 +574,7 @@ func newResolver(ctx context.Context, queries []*query) *resolver { buildListVersion: initialVersion, initialVersion: initialVersion, nonesByPath: map[string]*query{}, - workspace: loadWorkspace(modload.FindGoWork(base.Cwd())), + workspace: loadWorkspace(modload.FindGoWork(modload.LoaderState, base.Cwd())), } for _, q := range queries { @@ -645,7 +645,7 @@ func (r *resolver) noneForPath(mPath string) (nq *query, found bool) { // allowed versions. func (r *resolver) queryModule(ctx context.Context, mPath, query string, selected func(string) string) (module.Version, error) { current := r.initialSelected(mPath) - rev, err := modload.Query(ctx, mPath, query, current, r.checkAllowedOr(query, selected)) + rev, err := modload.Query(modload.LoaderState, ctx, mPath, query, current, r.checkAllowedOr(query, selected)) if err != nil { return module.Version{}, err } @@ -655,7 +655,7 @@ func (r *resolver) queryModule(ctx context.Context, mPath, query string, selecte // queryPackages wraps modload.QueryPackage, substituting r.checkAllowedOr to // decide allowed versions. func (r *resolver) queryPackages(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, err error) { - results, err := modload.QueryPackages(ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) + results, err := modload.QueryPackages(modload.LoaderState, ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) if len(results) > 0 { pkgMods = make([]module.Version, 0, len(results)) for _, qr := range results { @@ -668,7 +668,7 @@ func (r *resolver) queryPackages(ctx context.Context, pattern, query string, sel // queryPattern wraps modload.QueryPattern, substituting r.checkAllowedOr to // decide allowed versions. func (r *resolver) queryPattern(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, mod module.Version, err error) { - results, modOnly, err := modload.QueryPattern(ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) + results, modOnly, err := modload.QueryPattern(modload.LoaderState, ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) if len(results) > 0 { pkgMods = make([]module.Version, 0, len(results)) for _, qr := range results { @@ -721,7 +721,7 @@ func (r *resolver) queryNone(ctx context.Context, q *query) { if !q.isWildcard() { q.pathOnce(q.pattern, func() pathSet { - hasModRoot := modload.HasModRoot() + hasModRoot := modload.HasModRoot(modload.LoaderState) if hasModRoot && modload.LoaderState.MainModules.Contains(q.pattern) { v := module.Version{Path: q.pattern} // The user has explicitly requested to downgrade their own module to @@ -746,7 +746,7 @@ func (r *resolver) queryNone(ctx context.Context, q *query) { continue } q.pathOnce(curM.Path, func() pathSet { - if modload.HasModRoot() && curM.Version == "" && modload.LoaderState.MainModules.Contains(curM.Path) { + if modload.HasModRoot(modload.LoaderState) && curM.Version == "" && modload.LoaderState.MainModules.Contains(curM.Path) { return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{curM}, Pattern: q.pattern, Query: q.version}) } return pathSet{mod: module.Version{Path: curM.Path, Version: "none"}} @@ -766,7 +766,7 @@ func (r *resolver) performLocalQueries(ctx context.Context) { // Absolute paths like C:\foo and relative paths like ../foo... are // restricted to matching packages in the main module. - pkgPattern, mainModule := modload.LoaderState.MainModules.DirImportPath(ctx, q.pattern) + pkgPattern, mainModule := modload.LoaderState.MainModules.DirImportPath(modload.LoaderState, ctx, q.pattern) if pkgPattern == "." { modload.MustHaveModRoot() versions := modload.LoaderState.MainModules.Versions() @@ -1275,13 +1275,13 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack return nil } - _, pkgs := modload.LoadPackages(ctx, opts, patterns...) + _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, opts, patterns...) for _, pkgPath := range pkgs { const ( parentPath = "" parentIsStd = false ) - _, _, err := modload.Lookup(parentPath, parentIsStd, pkgPath) + _, _, err := modload.Lookup(modload.LoaderState, parentPath, parentIsStd, pkgPath) if err == nil { continue } @@ -1651,7 +1651,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin AllowErrors: true, SilenceNoGoErrors: true, } - matches, pkgs := modload.LoadPackages(ctx, pkgOpts, pkgPatterns...) + matches, pkgs := modload.LoadPackages(modload.LoaderState, ctx, pkgOpts, pkgPatterns...) for _, m := range matches { if len(m.Errs) > 0 { base.SetExitStatus(1) @@ -1659,7 +1659,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } } for _, pkg := range pkgs { - if dir, _, err := modload.Lookup("", false, pkg); err != nil { + if dir, _, err := modload.Lookup(modload.LoaderState, "", false, pkg); err != nil { if dir != "" && errors.Is(err, imports.ErrNoGo) { // Since dir is non-empty, we must have located source files // associated with either the package or its test — ErrNoGo must @@ -1690,7 +1690,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } } - reqs := modload.LoadModFile(ctx) + reqs := modload.LoadModFile(modload.LoaderState, ctx) for m := range relevantMods { if reqs.IsDirect(m.Path) { relevantMods[m] |= direct @@ -1714,7 +1714,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin for i := range retractions { i := i r.work.Add(func() { - err := modload.CheckRetractions(ctx, retractions[i].m) + err := modload.CheckRetractions(modload.LoaderState, ctx, retractions[i].m) if _, ok := errors.AsType[*modload.ModuleRetractedError](err); ok { retractions[i].message = err.Error() } @@ -1735,7 +1735,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin for i := range deprecations { i := i r.work.Add(func() { - deprecation, err := modload.CheckDeprecation(ctx, deprecations[i].m) + deprecation, err := modload.CheckDeprecation(modload.LoaderState, ctx, deprecations[i].m) if err != nil || deprecation == "" { return } @@ -1765,7 +1765,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin i := i m := r.buildList[i] mActual := m - if mRepl := modload.Replacement(m); mRepl.Path != "" { + if mRepl := modload.Replacement(modload.LoaderState, m); mRepl.Path != "" { mActual = mRepl } old := module.Version{Path: m.Path, Version: r.initialVersion[m.Path]} @@ -1773,7 +1773,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin continue } oldActual := old - if oldRepl := modload.Replacement(old); oldRepl.Path != "" { + if oldRepl := modload.Replacement(modload.LoaderState, old); oldRepl.Path != "" { oldActual = oldRepl } if mActual == oldActual || mActual.Version == "" || !modfetch.HaveSum(oldActual) { @@ -1986,7 +1986,7 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi } } - changed, err := modload.EditBuildList(ctx, additions, resolved) + changed, err := modload.EditBuildList(modload.LoaderState, ctx, additions, resolved) if err != nil { if errors.Is(err, gover.ErrTooNew) { toolchain.SwitchOrFatal(ctx, err) diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go index 59f3023ffc4f41..7076bbadce898b 100644 --- a/src/cmd/go/internal/modget/query.go +++ b/src/cmd/go/internal/modget/query.go @@ -184,7 +184,7 @@ func (q *query) validate() error { if q.pattern == "all" { // If there is no main module, "all" is not meaningful. - if !modload.HasModRoot() { + if !modload.HasModRoot(modload.LoaderState) { return fmt.Errorf(`cannot match "all": %v`, modload.ErrNoModRoot) } if !versionOkForMainModule(q.version) { diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index a8ab82d1ecb246..5b1b643d272dfd 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -51,8 +51,8 @@ func findStandardImportPath(path string) string { // a given package. If modules are not enabled or if the package is in the // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, nil is returned. -func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePublic { - if isStandardImportPath(pkgpath) || !Enabled() { +func PackageModuleInfo(loaderstate *State, ctx context.Context, pkgpath string) *modinfo.ModulePublic { + if isStandardImportPath(pkgpath) || !Enabled(loaderstate) { return nil } m, ok := findModule(loaded, pkgpath) @@ -60,23 +60,23 @@ func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePubli return nil } - rs := LoadModFile(ctx) - return moduleInfo(ctx, rs, m, 0, nil) + rs := LoadModFile(loaderstate, ctx) + return moduleInfo(loaderstate, ctx, rs, m, 0, nil) } // PackageModRoot returns the module root directory for the module that provides // a given package. If modules are not enabled or if the package is in the // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, the empty string is returned. -func PackageModRoot(ctx context.Context, pkgpath string) string { - if isStandardImportPath(pkgpath) || !Enabled() || cfg.BuildMod == "vendor" { +func PackageModRoot(loaderstate *State, ctx context.Context, pkgpath string) string { + if isStandardImportPath(pkgpath) || !Enabled(loaderstate) || cfg.BuildMod == "vendor" { return "" } m, ok := findModule(loaded, pkgpath) if !ok { return "" } - root, _, err := fetch(ctx, m) + root, _, err := fetch(loaderstate, ctx, m) if err != nil { return "" } @@ -84,26 +84,26 @@ func PackageModRoot(ctx context.Context, pkgpath string) string { } func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { - if !Enabled() { + if !Enabled(LoaderState) { return nil } if path, vers, found := strings.Cut(path, "@"); found { m := module.Version{Path: path, Version: vers} - return moduleInfo(ctx, nil, m, 0, nil) + return moduleInfo(LoaderState, ctx, nil, m, 0, nil) } - rs := LoadModFile(ctx) + rs := LoadModFile(LoaderState, ctx) var ( v string ok bool ) if rs.pruning == pruned { - v, ok = rs.rootSelected(path) + v, ok = rs.rootSelected(LoaderState, path) } if !ok { - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(LoaderState, ctx) if err != nil { base.Fatal(err) } @@ -119,7 +119,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { } } - return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0, nil) + return moduleInfo(LoaderState, ctx, rs, module.Version{Path: path, Version: v}, 0, nil) } // addUpdate fills in m.Update if an updated version is available. @@ -128,7 +128,7 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { return } - info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed) + info, err := Query(LoaderState, ctx, m.Path, "upgrade", m.Version, CheckAllowed) if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) || errors.Is(err, ErrDisallowed) { @@ -221,7 +221,7 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo if listRetracted { allowed = CheckExclusions } - v, origin, err := versions(ctx, m.Path, allowed) + v, origin, err := versions(LoaderState, ctx, m.Path, allowed) if err != nil && m.Error == nil { m.Error = &modinfo.ModuleError{Err: err.Error()} } @@ -231,12 +231,12 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo // addRetraction fills in m.Retracted if the module was retracted by its author. // m.Error is set if there's an error loading retraction information. -func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { +func addRetraction(loaderstate *State, ctx context.Context, m *modinfo.ModulePublic) { if m.Version == "" { return } - err := CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version}) + err := CheckRetractions(loaderstate, ctx, module.Version{Path: m.Path, Version: m.Version}) if err == nil { return } else if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) { @@ -263,7 +263,7 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { // addDeprecation fills in m.Deprecated if the module was deprecated by its // author. m.Error is set if there's an error loading deprecation information. func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { - deprecation, err := CheckDeprecation(ctx, module.Version{Path: m.Path, Version: m.Version}) + deprecation, err := CheckDeprecation(LoaderState, ctx, module.Version{Path: m.Path, Version: m.Version}) if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) { // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. @@ -287,8 +287,8 @@ func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { // moduleInfo returns information about module m, loaded from the requirements // in rs (which may be nil to indicate that m was not loaded from a requirement // graph). -func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic { - if m.Version == "" && LoaderState.MainModules.Contains(m.Path) { +func moduleInfo(loaderstate *State, ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { info := &modinfo.ModulePublic{ Path: m.Path, Version: m.Version, @@ -299,7 +299,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } else { panic("internal error: GoVersion not set for main module") } - if modRoot := LoaderState.MainModules.ModRoot(m); modRoot != "" { + if modRoot := loaderstate.MainModules.ModRoot(m); modRoot != "" { info.Dir = modRoot info.GoMod = modFilePath(modRoot) } @@ -322,7 +322,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } checksumOk := func(suffix string) bool { - return rs == nil || m.Version == "" || !mustHaveSums() || + return rs == nil || m.Version == "" || !mustHaveSums(loaderstate) || modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix}) } @@ -330,7 +330,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if m.Version != "" { if old := reuse[mod]; old != nil { - if err := checkReuse(ctx, mod, old.Origin); err == nil { + if err := checkReuse(loaderstate, ctx, mod, old.Origin); err == nil { *m = *old m.Query = "" m.Dir = "" @@ -338,7 +338,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } } - if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil { + if q, err := Query(loaderstate, ctx, m.Path, m.Version, "", nil); err != nil { m.Error = &modinfo.ModuleError{Err: err.Error()} } else { m.Version = q.Version @@ -349,7 +349,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if m.GoVersion == "" && checksumOk("/go.mod") { // Load the go.mod file to determine the Go version, since it hasn't // already been populated from rawGoVersion. - if summary, err := rawGoModSummary(mod); err == nil && summary.goVersion != "" { + if summary, err := rawGoModSummary(loaderstate, mod); err == nil && summary.goVersion != "" { m.GoVersion = summary.goVersion } } @@ -377,7 +377,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } if mode&ListRetracted != 0 { - addRetraction(ctx, m) + addRetraction(loaderstate, ctx, m) } } } @@ -389,7 +389,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li return info } - r := Replacement(m) + r := Replacement(loaderstate, m) if r.Path == "" { if cfg.BuildMod == "vendor" { // It's tempting to fill in the "Dir" field to point within the vendor @@ -418,7 +418,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if filepath.IsAbs(r.Path) { info.Replace.Dir = r.Path } else { - info.Replace.Dir = filepath.Join(replaceRelativeTo(), r.Path) + info.Replace.Dir = filepath.Join(replaceRelativeTo(loaderstate), r.Path) } info.Replace.GoMod = filepath.Join(info.Replace.Dir, "go.mod") } diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index cf64ee1dc210f9..54ec4d23ebe08e 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -104,21 +104,21 @@ func mustHaveGoRoot(roots []module.Version) { // // If vendoring is in effect, the caller must invoke initVendor on the returned // *Requirements before any other method. -func newRequirements(pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements { +func newRequirements(loaderstate *State, pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements { mustHaveGoRoot(rootModules) if pruning != workspace { - if LoaderState.workFilePath != "" { + if loaderstate.workFilePath != "" { panic("in workspace mode, but pruning is not workspace in newRequirements") } } if pruning != workspace { - if LoaderState.workFilePath != "" { + if loaderstate.workFilePath != "" { panic("in workspace mode, but pruning is not workspace in newRequirements") } for i, m := range rootModules { - if m.Version == "" && LoaderState.MainModules.Contains(m.Path) { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is a main module", i)) } if m.Path == "" || m.Version == "" { @@ -162,10 +162,10 @@ func (rs *Requirements) String() string { // initVendor initializes rs.graph from the given list of vendored module // dependencies, overriding the graph that would normally be loaded from module // requirements. -func (rs *Requirements) initVendor(vendorList []module.Version) { +func (rs *Requirements) initVendor(loaderstate *State, vendorList []module.Version) { rs.graphOnce.Do(func() { - roots := LoaderState.MainModules.Versions() - if inWorkspaceMode() { + roots := loaderstate.MainModules.Versions() + if inWorkspaceMode(loaderstate) { // Use rs.rootModules to pull in the go and toolchain roots // from the go.work file and preserve the invariant that all // of rs.rootModules are in mg.g. @@ -176,7 +176,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { } if rs.pruning == pruned { - mainModule := LoaderState.MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) // The roots of a single pruned module should already include every module in the // vendor list, because the vendored modules are the same as those needed // for graph pruning. @@ -184,7 +184,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { // Just to be sure, we'll double-check that here. inconsistent := false for _, m := range vendorList { - if v, ok := rs.rootSelected(m.Path); !ok || v != m.Version { + if v, ok := rs.rootSelected(loaderstate, m.Path); !ok || v != m.Version { base.Errorf("go: vendored module %v should be required explicitly in go.mod", m) inconsistent = true } @@ -208,15 +208,15 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { // graph, but still distinguishes between direct and indirect // dependencies. vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""} - if inWorkspaceMode() { - for _, m := range LoaderState.MainModules.Versions() { - reqs, _ := rootsFromModFile(m, LoaderState.MainModules.ModFile(m), omitToolchainRoot) + if inWorkspaceMode(loaderstate) { + for _, m := range loaderstate.MainModules.Versions() { + reqs, _ := rootsFromModFile(loaderstate, m, loaderstate.MainModules.ModFile(m), omitToolchainRoot) mg.g.Require(m, append(reqs, vendorMod)) } mg.g.Require(vendorMod, vendorList) } else { - mainModule := LoaderState.MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) mg.g.Require(mainModule, append(rs.rootModules, vendorMod)) mg.g.Require(vendorMod, vendorList) } @@ -227,8 +227,8 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { } // GoVersion returns the Go language version for the Requirements. -func (rs *Requirements) GoVersion() string { - v, _ := rs.rootSelected("go") +func (rs *Requirements) GoVersion(loaderstate *State) string { + v, _ := rs.rootSelected(loaderstate, "go") if v == "" { panic("internal error: missing go version in modload.Requirements") } @@ -238,8 +238,8 @@ func (rs *Requirements) GoVersion() string { // rootSelected returns the version of the root dependency with the given module // path, or the zero module.Version and ok=false if the module is not a root // dependency. -func (rs *Requirements) rootSelected(path string) (version string, ok bool) { - if LoaderState.MainModules.Contains(path) { +func (rs *Requirements) rootSelected(loaderstate *State, path string) (version string, ok bool) { + if loaderstate.MainModules.Contains(path) { return "", true } if v, ok := rs.maxRootVersion[path]; ok { @@ -252,9 +252,9 @@ func (rs *Requirements) rootSelected(path string) (version string, ok bool) { // of the same module or a requirement on any version of the main module. // Redundant requirements should be pruned, but they may influence version // selection. -func (rs *Requirements) hasRedundantRoot() bool { +func (rs *Requirements) hasRedundantRoot(loaderstate *State) bool { for i, m := range rs.rootModules { - if LoaderState.MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { + if loaderstate.MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { return true } } @@ -269,9 +269,9 @@ func (rs *Requirements) hasRedundantRoot() bool { // // If the requirements of any relevant module fail to load, Graph also // returns a non-nil error of type *mvs.BuildListError. -func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) { +func (rs *Requirements) Graph(loaderstate *State, ctx context.Context) (*ModuleGraph, error) { rs.graphOnce.Do(func() { - mg, mgErr := readModGraph(ctx, rs.pruning, rs.rootModules, nil) + mg, mgErr := readModGraph(loaderstate, ctx, rs.pruning, rs.rootModules, nil) rs.graph.Store(&cachedGraph{mg, mgErr}) }) cached := rs.graph.Load() @@ -307,7 +307,7 @@ var readModGraphDebugOnce sync.Once // // Unlike LoadModGraph, readModGraph does not attempt to diagnose or update // inconsistent roots. -func readModGraph(ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) { +func readModGraph(loaderstate *State, ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) { mustHaveGoRoot(roots) if pruning == pruned { // Enable diagnostics for lazy module loading @@ -333,10 +333,10 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio } var graphRoots []module.Version - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { graphRoots = roots } else { - graphRoots = LoaderState.MainModules.Versions() + graphRoots = loaderstate.MainModules.Versions() } var ( mu sync.Mutex // guards mg.g and hasError during loading @@ -347,10 +347,10 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio ) if pruning != workspace { - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { panic("pruning is not workspace in workspace mode") } - mg.g.Require(LoaderState.MainModules.mustGetSingleMainModule(), roots) + mg.g.Require(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), roots) } type dedupKey struct { @@ -367,7 +367,7 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio // m's go.mod file indicates that it supports graph pruning. loadOne := func(m module.Version) (*modFileSummary, error) { return mg.loadCache.Do(m, func() (*modFileSummary, error) { - summary, err := goModSummary(m) + summary, err := goModSummary(loaderstate, m) mu.Lock() if err == nil { @@ -527,12 +527,12 @@ func (mg *ModuleGraph) findError() error { return nil } -func (mg *ModuleGraph) allRootsSelected() bool { +func (mg *ModuleGraph) allRootsSelected(loaderstate *State) bool { var roots []module.Version - if inWorkspaceMode() { - roots = LoaderState.MainModules.Versions() + if inWorkspaceMode(loaderstate) { + roots = loaderstate.MainModules.Versions() } else { - roots, _ = mg.g.RequiredBy(LoaderState.MainModules.mustGetSingleMainModule()) + roots, _ = mg.g.RequiredBy(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) } for _, m := range roots { if mg.Selected(m.Path) != m.Version { @@ -553,13 +553,13 @@ func (mg *ModuleGraph) allRootsSelected() bool { // LoadModGraph need only be called if LoadPackages is not, // typically in commands that care about modules but no particular package. func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { - rs, err := loadModFile(ctx, nil) + rs, err := loadModFile(LoaderState, ctx, nil) if err != nil { return nil, err } if goVersion != "" { - v, _ := rs.rootSelected("go") + v, _ := rs.rootSelected(LoaderState, "go") if gover.Compare(v, gover.GoStrictVersion) >= 0 && gover.Compare(goVersion, v) < 0 { return nil, fmt.Errorf("requested Go version %s cannot load module graph (requires Go >= %s)", goVersion, v) } @@ -569,13 +569,13 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { // Use newRequirements instead of convertDepth because convertDepth // also updates roots; here, we want to report the unmodified roots // even though they may seem inconsistent. - rs = newRequirements(unpruned, rs.rootModules, rs.direct) + rs = newRequirements(LoaderState, unpruned, rs.rootModules, rs.direct) } - return rs.Graph(ctx) + return rs.Graph(LoaderState, ctx) } - rs, mg, err := expandGraph(ctx, rs) + rs, mg, err := expandGraph(LoaderState, ctx, rs) if err != nil { return nil, err } @@ -594,22 +594,22 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { // from those roots and any error encountered while loading that graph. // expandGraph returns non-nil requirements and a non-nil graph regardless of // errors. On error, the roots might not be updated to be consistent. -func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) { - mg, mgErr := rs.Graph(ctx) +func expandGraph(loaderstate *State, ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) { + mg, mgErr := rs.Graph(loaderstate, ctx) if mgErr != nil { // Without the graph, we can't update the roots: we don't know which // versions of transitive dependencies would be selected. return rs, mg, mgErr } - if !mg.allRootsSelected() { + if !mg.allRootsSelected(loaderstate) { // The roots of rs are not consistent with the rest of the graph. Update // them. In an unpruned module this is a no-op for the build list as a whole — // it just promotes what were previously transitive requirements to be // roots — but in a pruned module it may pull in previously-irrelevant // transitive dependencies. - newRS, rsErr := updateRoots(ctx, rs.direct, rs, nil, nil, false) + newRS, rsErr := updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false) if rsErr != nil { // Failed to update roots, perhaps because of an error in a transitive // dependency needed for the update. Return the original Requirements @@ -617,7 +617,7 @@ func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleG return rs, mg, rsErr } rs = newRS - mg, mgErr = rs.Graph(ctx) + mg, mgErr = rs.Graph(loaderstate, ctx) } return rs, mg, mgErr @@ -639,16 +639,16 @@ func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleG // On success, EditBuildList reports whether the selected version of any module // in the build list may have been changed (possibly to or from "none") as a // result. -func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) { - rs, changed, err := editRequirements(ctx, LoadModFile(ctx), add, mustSelect) +func EditBuildList(loaderstate *State, ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) { + rs, changed, err := editRequirements(loaderstate, ctx, LoadModFile(loaderstate, ctx), add, mustSelect) if err != nil { return false, err } - LoaderState.requirements = rs + loaderstate.requirements = rs return changed, nil } -func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Version) *Requirements { +func overrideRoots(loaderstate *State, ctx context.Context, rs *Requirements, replace []module.Version) *Requirements { drop := make(map[string]bool) for _, m := range replace { drop[m.Path] = true @@ -661,7 +661,7 @@ func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Versi } roots = append(roots, replace...) gover.ModSort(roots) - return newRequirements(rs.pruning, roots, rs.direct) + return newRequirements(loaderstate, rs.pruning, roots, rs.direct) } // A ConstraintError describes inconsistent constraints in EditBuildList @@ -765,28 +765,28 @@ func (c Conflict) String() string { // tidyRoots trims the root dependencies to the minimal requirements needed to // both retain the same versions of all packages in pkgs and satisfy the // graph-pruning invariants (if applicable). -func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { - mainModule := LoaderState.MainModules.mustGetSingleMainModule() +func tidyRoots(loaderstate *State, ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) if rs.pruning == unpruned { - return tidyUnprunedRoots(ctx, mainModule, rs, pkgs) + return tidyUnprunedRoots(loaderstate, ctx, mainModule, rs, pkgs) } - return tidyPrunedRoots(ctx, mainModule, rs, pkgs) + return tidyPrunedRoots(loaderstate, ctx, mainModule, rs, pkgs) } -func updateRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { +func updateRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { switch rs.pruning { case unpruned: - return updateUnprunedRoots(ctx, direct, rs, add) + return updateUnprunedRoots(loaderstate, ctx, direct, rs, add) case pruned: - return updatePrunedRoots(ctx, direct, rs, pkgs, add, rootsImported) + return updatePrunedRoots(loaderstate, ctx, direct, rs, pkgs, add, rootsImported) case workspace: - return updateWorkspaceRoots(ctx, direct, rs, add) + return updateWorkspaceRoots(loaderstate, ctx, direct, rs, add) default: panic(fmt.Sprintf("unsupported pruning mode: %v", rs.pruning)) } } -func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { +func updateWorkspaceRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { if len(add) != 0 { // add should be empty in workspace mode because workspace mode implies // -mod=readonly, which in turn implies no new requirements. The code path @@ -797,7 +797,7 @@ func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requi // return an error. panic("add is not empty") } - return newRequirements(workspace, rs.rootModules, direct), nil + return newRequirements(loaderstate, workspace, rs.rootModules, direct), nil } // tidyPrunedRoots returns a minimal set of root requirements that maintains the @@ -816,16 +816,16 @@ func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requi // To ensure that the loading process eventually converges, the caller should // add any needed roots from the tidy root set (without removing existing untidy // roots) until the set of roots has converged. -func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { +func tidyPrunedRoots(loaderstate *State, ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { var ( roots []module.Version pathIsRoot = map[string]bool{mainModule.Path: true} ) - if v, ok := old.rootSelected("go"); ok { + if v, ok := old.rootSelected(loaderstate, "go"); ok { roots = append(roots, module.Version{Path: "go", Version: v}) pathIsRoot["go"] = true } - if v, ok := old.rootSelected("toolchain"); ok { + if v, ok := old.rootSelected(loaderstate, "toolchain"); ok { roots = append(roots, module.Version{Path: "toolchain", Version: v}) pathIsRoot["toolchain"] = true } @@ -847,7 +847,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if !pkg.flags.has(pkgInAll) { continue } - if pkg.fromExternalModule() && !pathIsRoot[pkg.mod.Path] { + if pkg.fromExternalModule(loaderstate) && !pathIsRoot[pkg.mod.Path] { roots = append(roots, pkg.mod) pathIsRoot[pkg.mod.Path] = true } @@ -855,11 +855,11 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir queued[pkg] = true } gover.ModSort(roots) - tidy := newRequirements(pruned, roots, old.direct) + tidy := newRequirements(loaderstate, pruned, roots, old.direct) for len(queue) > 0 { roots = tidy.rootModules - mg, err := tidy.Graph(ctx) + mg, err := tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -892,12 +892,12 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if len(roots) > len(tidy.rootModules) { gover.ModSort(roots) - tidy = newRequirements(pruned, roots, tidy.direct) + tidy = newRequirements(loaderstate, pruned, roots, tidy.direct) } } roots = tidy.rootModules - _, err := tidy.Graph(ctx) + _, err := tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -921,7 +921,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir pkg := pkg q.Add(func() { skipModFile := true - _, _, _, _, err := importFromModules(ctx, pkg.path, tidy, nil, skipModFile) + _, _, _, _, err := importFromModules(loaderstate, ctx, pkg.path, tidy, nil, skipModFile) if _, ok := errors.AsType[*AmbiguousImportError](err); ok { disambiguateRoot.Store(pkg.mod, true) } @@ -938,8 +938,8 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if len(roots) > len(tidy.rootModules) { module.Sort(roots) - tidy = newRequirements(pruned, roots, tidy.direct) - _, err = tidy.Graph(ctx) + tidy = newRequirements(loaderstate, pruned, roots, tidy.direct) + _, err = tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -999,7 +999,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir // // (See https://golang.org/design/36460-lazy-module-loading#invariants for more // detail.) -func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { +func updatePrunedRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { roots := rs.rootModules rootsUpgraded := false @@ -1009,7 +1009,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // either pkgInAll or pkgIsRoot is included as a root.” needSort := false for _, pkg := range pkgs { - if !pkg.fromExternalModule() { + if !pkg.fromExternalModule(loaderstate) { // pkg was not loaded from a module dependency, so we don't need // to do anything special to maintain that dependency. continue @@ -1058,7 +1058,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem continue } - if _, ok := rs.rootSelected(pkg.mod.Path); ok { + if _, ok := rs.rootSelected(loaderstate, pkg.mod.Path); ok { // It is possible that the main module's go.mod file is incomplete or // otherwise erroneous — for example, perhaps the author forgot to 'git // add' their updated go.mod file after adding a new package import, or @@ -1094,7 +1094,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem } for _, m := range add { - if v, ok := rs.rootSelected(m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 { + if v, ok := rs.rootSelected(loaderstate, m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 { roots = append(roots, m) rootsUpgraded = true needSort = true @@ -1111,7 +1111,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // We've added or upgraded one or more roots, so load the full module // graph so that we can update those roots to be consistent with other // requirements. - if mustHaveCompleteRequirements() { + if mustHaveCompleteRequirements(loaderstate) { // Our changes to the roots may have moved dependencies into or out of // the graph-pruning horizon, which could in turn change the selected // versions of other modules. (For pruned modules adding or removing an @@ -1119,9 +1119,9 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem return rs, errGoModDirty } - rs = newRequirements(pruned, roots, direct) + rs = newRequirements(loaderstate, pruned, roots, direct) var err error - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { return rs, err } @@ -1135,20 +1135,20 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // We've already loaded the full module graph, which includes the // requirements of all of the root modules — even the transitive // requirements, if they are unpruned! - mg, _ = rs.Graph(ctx) + mg, _ = rs.Graph(loaderstate, ctx) } else if cfg.BuildMod == "vendor" { // We can't spot-check the requirements of other modules because we // don't in general have their go.mod files available in the vendor // directory. (Fortunately this case is impossible, because mg.graph is // always non-nil in vendor mode!) panic("internal error: rs.graph is unexpectedly nil with -mod=vendor") - } else if !spotCheckRoots(ctx, rs, spotCheckRoot) { + } else if !spotCheckRoots(loaderstate, ctx, rs, spotCheckRoot) { // We spot-checked the explicit requirements of the roots that are // relevant to the packages we've loaded. Unfortunately, they're // inconsistent in some way; we need to load the full module graph // so that we can fix the roots properly. var err error - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { return rs, err } @@ -1158,7 +1158,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem roots = make([]module.Version, 0, len(rs.rootModules)) rootsUpgraded = false inRootPaths := make(map[string]bool, len(rs.rootModules)+1) - for _, mm := range LoaderState.MainModules.Versions() { + for _, mm := range loaderstate.MainModules.Versions() { inRootPaths[mm.Path] = true } for _, m := range rs.rootModules { @@ -1184,7 +1184,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem var v string if mg == nil { - v, _ = rs.rootSelected(m.Path) + v, _ = rs.rootSelected(loaderstate, m.Path) } else { v = mg.Selected(m.Path) } @@ -1218,12 +1218,12 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // preserve its cached ModuleGraph (if any). return rs, nil } - return newRequirements(pruned, roots, direct), nil + return newRequirements(loaderstate, pruned, roots, direct), nil } // spotCheckRoots reports whether the versions of the roots in rs satisfy the // explicit requirements of the modules in mods. -func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool { +func spotCheckRoots(loaderstate *State, ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -1235,14 +1235,14 @@ func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Versi return } - summary, err := goModSummary(m) + summary, err := goModSummary(loaderstate, m) if err != nil { cancel() return } for _, r := range summary.require { - if v, ok := rs.rootSelected(r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 { + if v, ok := rs.rootSelected(loaderstate, r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 { cancel() return } @@ -1264,7 +1264,7 @@ func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Versi // the selected version of every module that provided or lexically could have // provided a package in pkgs, and includes the selected version of every such // module in direct as a root. -func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { +func tidyUnprunedRoots(loaderstate *State, ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { var ( // keep is a set of modules that provide packages or are needed to // disambiguate imports. @@ -1292,16 +1292,16 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ // without its sum. See #47738. altMods = map[string]string{} ) - if v, ok := old.rootSelected("go"); ok { + if v, ok := old.rootSelected(loaderstate, "go"); ok { keep = append(keep, module.Version{Path: "go", Version: v}) keptPath["go"] = true } - if v, ok := old.rootSelected("toolchain"); ok { + if v, ok := old.rootSelected(loaderstate, "toolchain"); ok { keep = append(keep, module.Version{Path: "toolchain", Version: v}) keptPath["toolchain"] = true } for _, pkg := range pkgs { - if !pkg.fromExternalModule() { + if !pkg.fromExternalModule(loaderstate) { continue } if m := pkg.mod; !keptPath[m.Path] { @@ -1350,7 +1350,7 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ } } - return newRequirements(unpruned, min, old.direct), nil + return newRequirements(loaderstate, unpruned, min, old.direct), nil } // updateUnprunedRoots returns a set of root requirements that includes the selected @@ -1367,8 +1367,8 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ // by a dependency in add. // 4. Every version in add is selected at its given version unless upgraded by // (the dependencies of) an existing root or another module in add. -func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { - mg, err := rs.Graph(ctx) +func updateUnprunedRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // We can't ignore errors in the module graph even if the user passed the -e // flag to try to push past them. If we can't load the complete module @@ -1376,7 +1376,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir return rs, err } - if mustHaveCompleteRequirements() { + if mustHaveCompleteRequirements(loaderstate) { // Instead of actually updating the requirements, just check that no updates // are needed. if rs == nil { @@ -1396,7 +1396,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir } } for mPath := range direct { - if _, ok := rs.rootSelected(mPath); !ok { + if _, ok := rs.rootSelected(loaderstate, mPath); !ok { // Module m is supposed to be listed explicitly, but isn't. // // Note that this condition is also detected (and logged with more @@ -1435,7 +1435,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir // This is only for convenience and clarity for end users: in an unpruned module, // the choice of explicit vs. implicit dependency has no impact on MVS // selection (for itself or any other module). - keep := append(mg.BuildList()[LoaderState.MainModules.Len():], add...) + keep := append(mg.BuildList()[loaderstate.MainModules.Len():], add...) for _, m := range keep { if direct[m.Path] && !inRootPaths[m.Path] { rootPaths = append(rootPaths, m.Path) @@ -1444,14 +1444,14 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir } var roots []module.Version - for _, mainModule := range LoaderState.MainModules.Versions() { + for _, mainModule := range loaderstate.MainModules.Versions() { min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{roots: keep}) if err != nil { return rs, err } roots = append(roots, min...) } - if LoaderState.MainModules.Len() > 1 { + if loaderstate.MainModules.Len() > 1 { gover.ModSort(roots) } if rs.pruning == unpruned && slices.Equal(roots, rs.rootModules) && maps.Equal(direct, rs.direct) { @@ -1460,12 +1460,12 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir return rs, nil } - return newRequirements(unpruned, roots, direct), nil + return newRequirements(loaderstate, unpruned, roots, direct), nil } // convertPruning returns a version of rs with the given pruning behavior. // If rs already has the given pruning, convertPruning returns rs unmodified. -func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) { +func convertPruning(loaderstate *State, ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) { if rs.pruning == pruning { return rs, nil } else if rs.pruning == workspace || pruning == workspace { @@ -1477,7 +1477,7 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) ( // pruned module graph are a superset of the roots of an unpruned one, so // we don't need to add any new roots — we just need to drop the ones that // are redundant, which is exactly what updateUnprunedRoots does. - return updateUnprunedRoots(ctx, rs.direct, rs, nil) + return updateUnprunedRoots(loaderstate, ctx, rs.direct, rs, nil) } // We are converting an unpruned module to a pruned one. @@ -1487,9 +1487,9 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) ( // root set! “Include the transitive dependencies of every module in the build // list” is exactly what happens in a pruned module if we promote every module // in the build list to a root. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return rs, err } - return newRequirements(pruned, mg.BuildList()[LoaderState.MainModules.Len():], rs.direct), nil + return newRequirements(loaderstate, pruned, mg.BuildList()[loaderstate.MainModules.Len():], rs.direct), nil } diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go index 96d864545d042a..72d0f754224456 100644 --- a/src/cmd/go/internal/modload/edit.go +++ b/src/cmd/go/internal/modload/edit.go @@ -42,7 +42,7 @@ import ( // If pruning is enabled, the roots of the edited requirements include an // explicit entry for each module path in tryUpgrade, mustSelect, and the roots // of rs, unless the selected version for the module path is "none". -func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) { +func editRequirements(loaderstate *State, ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) { if rs.pruning == workspace { panic("editRequirements cannot edit workspace requirements") } @@ -82,7 +82,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel } if rootPruning != rs.pruning { - rs, err = convertPruning(ctx, rs, rootPruning) + rs, err = convertPruning(loaderstate, ctx, rs, rootPruning) if err != nil { return orig, false, err } @@ -100,13 +100,13 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // dependencies, so we need to treat everything in the build list as // potentially relevant — that is, as what would be a “root” in a module // with graph pruning enabled. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // If we couldn't load the graph, we don't know what its requirements were // to begin with, so we can't edit those requirements in a coherent way. return orig, false, err } - bl := mg.BuildList()[LoaderState.MainModules.Len():] + bl := mg.BuildList()[loaderstate.MainModules.Len():] selectedRoot = make(map[string]string, len(bl)) for _, m := range bl { selectedRoot[m.Path] = m.Version @@ -224,7 +224,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // of every root. The upgraded roots are in addition to the original // roots, so we will have enough information to trace a path to each // conflict we discover from one or more of the original roots. - mg, upgradedRoots, err := extendGraph(ctx, rootPruning, roots, selectedRoot) + mg, upgradedRoots, err := extendGraph(loaderstate, ctx, rootPruning, roots, selectedRoot) if err != nil { if mg == nil { return orig, false, err @@ -391,7 +391,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // the edit. We want to make sure we consider keeping it as-is, // even if it wouldn't normally be included. (For example, it might // be a pseudo-version or pre-release.) - origMG, _ := orig.Graph(ctx) + origMG, _ := orig.Graph(loaderstate, ctx) origV := origMG.Selected(m.Path) if conflict.Err != nil && origV == m.Version { @@ -415,7 +415,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel rejectedRoot[m] = true prev := m for { - prev, err = previousVersion(ctx, prev) + prev, err = previousVersion(loaderstate, ctx, prev) if gover.ModCompare(m.Path, m.Version, origV) > 0 && (gover.ModCompare(m.Path, prev.Version, origV) < 0 || err != nil) { // previousVersion skipped over origV. Insert it into the order. prev.Version = origV @@ -515,13 +515,13 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // The modules in mustSelect are always promoted to be explicit. for _, m := range mustSelect { - if m.Version != "none" && !LoaderState.MainModules.Contains(m.Path) { + if m.Version != "none" && !loaderstate.MainModules.Contains(m.Path) { rootPaths = append(rootPaths, m.Path) } } for _, m := range roots { - if v, ok := rs.rootSelected(m.Path); ok && (v == m.Version || rs.direct[m.Path]) { + if v, ok := rs.rootSelected(loaderstate, m.Path); ok && (v == m.Version || rs.direct[m.Path]) { // m.Path was formerly a root, and either its version hasn't changed or // we believe that it provides a package directly imported by a package // or test in the main module. For now we'll assume that it is still @@ -532,7 +532,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel } } - roots, err = mvs.Req(LoaderState.MainModules.mustGetSingleMainModule(), rootPaths, &mvsReqs{roots: roots}) + roots, err = mvs.Req(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), rootPaths, &mvsReqs{roots: roots}) if err != nil { return nil, false, err } @@ -563,7 +563,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel direct[m.Path] = true } } - edited = newRequirements(rootPruning, roots, direct) + edited = newRequirements(loaderstate, rootPruning, roots, direct) // If we ended up adding a dependency that upgrades our go version far enough // to activate pruning, we must convert the edited Requirements in order to @@ -578,7 +578,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // those two modules will never be downgraded due to a conflict with any other // constraint. if rootPruning == unpruned { - if v, ok := edited.rootSelected("go"); ok && pruningForGoVersion(v) == pruned { + if v, ok := edited.rootSelected(loaderstate, "go"); ok && pruningForGoVersion(v) == pruned { // Since we computed the edit with the unpruned graph, and the pruned // graph is a strict subset of the unpruned graph, this conversion // preserves the exact (edited) build list that we already computed. @@ -587,7 +587,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // the graph. 'go get' will check for that sort of transition and log a // message reminding the user how to clean up this mess we're about to // make. 😅 - edited, err = convertPruning(ctx, edited, pruned) + edited, err = convertPruning(loaderstate, ctx, edited, pruned) if err != nil { return orig, false, err } @@ -607,9 +607,9 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // The extended graph is useful for diagnosing version conflicts: for each // selected module version, it can provide a complete path of requirements from // some root to that version. -func extendGraph(ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) { +func extendGraph(loaderstate *State, ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) { for { - mg, err = readModGraph(ctx, rootPruning, roots, upgradedRoot) + mg, err = readModGraph(loaderstate, ctx, rootPruning, roots, upgradedRoot) // We keep on going even if err is non-nil until we reach a steady state. // (Note that readModGraph returns a non-nil *ModuleGraph even in case of // errors.) The caller may be able to fix the errors by adjusting versions, diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 392fe3edd77b52..a2a98289b0ed77 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -262,7 +262,7 @@ func (e *invalidImportError) Unwrap() error { // (https://go.dev/issue/56222) for modules with 'go' versions between 1.17 and // 1.20, preventing unnecessary go.sum churn and network access in those // modules. -func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) { +func importFromModules(loaderstate *State, ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) { invalidf := func(format string, args ...interface{}) (module.Version, string, string, []module.Version, error) { return module.Version{}, "", "", nil, &invalidImportError{ importPath: path, @@ -299,12 +299,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // Is the package in the standard library? pathIsStd := search.IsStandardImportPath(path) if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { - for _, mainModule := range LoaderState.MainModules.Versions() { - if LoaderState.MainModules.InGorootSrc(mainModule) { - if dir, ok, err := dirInModule(path, LoaderState.MainModules.PathPrefix(mainModule), LoaderState.MainModules.ModRoot(mainModule), true); err != nil { - return module.Version{}, LoaderState.MainModules.ModRoot(mainModule), dir, nil, err + for _, mainModule := range loaderstate.MainModules.Versions() { + if loaderstate.MainModules.InGorootSrc(mainModule) { + if dir, ok, err := dirInModule(path, loaderstate.MainModules.PathPrefix(mainModule), loaderstate.MainModules.ModRoot(mainModule), true); err != nil { + return module.Version{}, loaderstate.MainModules.ModRoot(mainModule), dir, nil, err } else if ok { - return mainModule, LoaderState.MainModules.ModRoot(mainModule), dir, nil, nil + return mainModule, loaderstate.MainModules.ModRoot(mainModule), dir, nil, nil } } } @@ -321,10 +321,10 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // Everything must be in the main modules or the main module's or workspace's vendor directory. if cfg.BuildMod == "vendor" { var mainErr error - for _, mainModule := range LoaderState.MainModules.Versions() { - modRoot := LoaderState.MainModules.ModRoot(mainModule) + for _, mainModule := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mainModule) if modRoot != "" { - dir, mainOK, err := dirInModule(path, LoaderState.MainModules.PathPrefix(mainModule), modRoot, true) + dir, mainOK, err := dirInModule(path, loaderstate.MainModules.PathPrefix(mainModule), modRoot, true) if mainErr == nil { mainErr = err } @@ -336,8 +336,8 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M } } - if HasModRoot() { - vendorDir := VendorDir() + if HasModRoot(loaderstate) { + vendorDir := VendorDir(loaderstate) dir, inVendorDir, _ := dirInModule(path, "", vendorDir, false) if inVendorDir { readVendorList(vendorDir) @@ -345,13 +345,13 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // vendor/modules.txt does not exist or the user manually added directories to the vendor directory. // Go 1.23 and later require vendored packages to be present in modules.txt to be imported. _, ok := vendorPkgModule[path] - if ok || (gover.Compare(LoaderState.MainModules.GoVersion(), gover.ExplicitModulesTxtImportVersion) < 0) { + if ok || (gover.Compare(loaderstate.MainModules.GoVersion(loaderstate), gover.ExplicitModulesTxtImportVersion) < 0) { mods = append(mods, vendorPkgModule[path]) dirs = append(dirs, dir) roots = append(roots, vendorDir) } else { subCommand := "mod" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { subCommand = "work" } fmt.Fprintf(os.Stderr, "go: ignoring package %s which exists in the vendor directory but is missing from vendor/modules.txt. To sync the vendor directory run go %s vendor.\n", path, subCommand) @@ -399,7 +399,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M ok bool ) if mg == nil { - v, ok = rs.rootSelected(prefix) + v, ok = rs.rootSelected(loaderstate, prefix) } else { v, ok = mg.Selected(prefix), true } @@ -408,7 +408,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M } m := module.Version{Path: prefix, Version: v} - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(loaderstate, ctx, m) if err != nil { if _, ok := errors.AsType[*sumMissingError](err); ok { // We are missing a sum needed to fetch a module in the build list. @@ -471,8 +471,8 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // If the module graph is pruned and this is a test-only dependency // of a package in "all", we didn't necessarily load that file // when we read the module graph, so do it now to be sure. - if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !LoaderState.MainModules.Contains(mods[0].Path) { - if _, err := goModSummary(mods[0]); err != nil { + if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !loaderstate.MainModules.Contains(mods[0].Path) { + if _, err := goModSummary(loaderstate, mods[0]); err != nil { return module.Version{}, "", "", nil, err } } @@ -483,7 +483,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // We checked the full module graph and still didn't find the // requested package. var queryErr error - if !HasModRoot() { + if !HasModRoot(loaderstate) { queryErr = ErrNoModRoot } return module.Version{}, "", "", nil, &ImportMissingError{Path: path, QueryErr: queryErr, isStd: pathIsStd} @@ -491,7 +491,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // So far we've checked the root dependencies. // Load the full module graph and try again. - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { // We might be missing one or more transitive (implicit) dependencies from // the module graph, so we can't return an ImportMissingError here — one @@ -507,12 +507,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // // Unlike QueryPattern, queryImport prefers to add a replaced version of a // module *before* checking the proxies for a version to add. -func queryImport(ctx context.Context, path string, rs *Requirements) (module.Version, error) { +func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requirements) (module.Version, error) { // To avoid spurious remote fetches, try the latest replacement for each // module (golang.org/issue/26241). var mods []module.Version - if LoaderState.MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. - for mp, mv := range LoaderState.MainModules.HighestReplaced() { + if loaderstate.MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. + for mp, mv := range loaderstate.MainModules.HighestReplaced() { if !maybeInModule(path, mp) { continue } @@ -528,7 +528,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver mv = module.ZeroPseudoVersion("v0") } } - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return module.Version{}, err } @@ -547,7 +547,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver return len(mods[i].Path) > len(mods[j].Path) }) for _, m := range mods { - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(loaderstate, ctx, m) if err != nil { if _, ok := errors.AsType[*sumMissingError](err); ok { return module.Version{}, &ImportMissingSumError{importPath: path} @@ -567,7 +567,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver // The package path is not valid to fetch remotely, // so it can only exist in a replaced module, // and we know from the above loop that it is not. - replacement := Replacement(mods[0]) + replacement := Replacement(loaderstate, mods[0]) return module.Version{}, &PackageNotInModuleError{ Mod: mods[0], Query: "latest", @@ -607,12 +607,12 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver // and return m, dir, ImportMissingError. fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return module.Version{}, err } - candidates, err := QueryPackages(ctx, path, "latest", mg.Selected, CheckAllowed) + candidates, err := QueryPackages(loaderstate, ctx, path, "latest", mg.Selected, CheckAllowed) if err != nil { if errors.Is(err, fs.ErrNotExist) { // Return "cannot find module providing package […]" instead of whatever @@ -747,15 +747,15 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile // // The isLocal return value reports whether the replacement, // if any, is local to the filesystem. -func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) { - if modRoot := LoaderState.MainModules.ModRoot(mod); modRoot != "" { +func fetch(loaderstate *State, ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) { + if modRoot := loaderstate.MainModules.ModRoot(mod); modRoot != "" { return modRoot, true, nil } - if r := Replacement(mod); r.Path != "" { + if r := Replacement(loaderstate, mod); r.Path != "" { if r.Version == "" { dir = r.Path if !filepath.IsAbs(dir) { - dir = filepath.Join(replaceRelativeTo(), dir) + dir = filepath.Join(replaceRelativeTo(loaderstate), dir) } // Ensure that the replacement directory actually exists: // dirInModule does not report errors for missing modules, @@ -780,7 +780,7 @@ func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, e mod = r } - if mustHaveSums() && !modfetch.HaveSum(mod) { + if mustHaveSums(loaderstate) && !modfetch.HaveSum(mod) { return "", false, module.VersionError(mod, &sumMissingError{}) } @@ -790,8 +790,8 @@ func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, e // mustHaveSums reports whether we require that all checksums // needed to load or build packages are already present in the go.sum file. -func mustHaveSums() bool { - return HasModRoot() && cfg.BuildMod == "readonly" && !inWorkspaceMode() +func mustHaveSums(loaderstate *State) bool { + return HasModRoot(loaderstate) && cfg.BuildMod == "readonly" && !inWorkspaceMode(loaderstate) } type sumMissingError struct { diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go index f6b8bb90992735..a5c4b837a0be12 100644 --- a/src/cmd/go/internal/modload/import_test.go +++ b/src/cmd/go/internal/modload/import_test.go @@ -69,12 +69,12 @@ func TestQueryImport(t *testing.T) { LoaderState.RootMode = NoRoot ctx := context.Background() - rs := LoadModFile(ctx) + rs := LoadModFile(LoaderState, ctx) for _, tt := range importTests { t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { // Note that there is no build list, so Import should always fail. - m, err := queryImport(ctx, tt.path, rs) + m, err := queryImport(LoaderState, ctx, tt.path, rs) if tt.err == "" { if err != nil { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 31fe6327735736..20751528862ce2 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -64,7 +64,7 @@ func EnterModule(ctx context.Context, enterModroot string) { modfetch.Reset() LoaderState.modRoots = []string{enterModroot} - LoadModFile(ctx) + LoadModFile(LoaderState, ctx) } // EnterWorkspace enters workspace mode from module mode, applying the updated requirements to the main @@ -73,9 +73,9 @@ func EnterModule(ctx context.Context, enterModroot string) { // EnterWorkspace will modify the global state they depend on in a non-thread-safe way. func EnterWorkspace(ctx context.Context) (exit func(), err error) { // Find the identity of the main module that will be updated before we reset modload state. - mm := LoaderState.MainModules.mustGetSingleMainModule() + mm := LoaderState.MainModules.mustGetSingleMainModule(LoaderState) // Get the updated modfile we will use for that module. - _, _, updatedmodfile, err := UpdateGoModFromReqs(ctx, WriteOpts{}) + _, _, updatedmodfile, err := UpdateGoModFromReqs(LoaderState, ctx, WriteOpts{}) if err != nil { return nil, err } @@ -85,12 +85,12 @@ func EnterWorkspace(ctx context.Context) (exit func(), err error) { LoaderState.ForceUseModules = true // Load in workspace mode. - InitWorkfile() - LoadModFile(ctx) + InitWorkfile(LoaderState) + LoadModFile(LoaderState, ctx) // Update the content of the previous main module, and recompute the requirements. *LoaderState.MainModules.ModFile(mm) = *updatedmodfile - LoaderState.requirements = requirementsFromModFiles(ctx, LoaderState.MainModules.workFile, slices.Collect(maps.Values(LoaderState.MainModules.modFiles)), nil) + LoaderState.requirements = requirementsFromModFiles(LoaderState, ctx, LoaderState.MainModules.workFile, slices.Collect(maps.Values(LoaderState.MainModules.modFiles)), nil) return func() { setState(oldstate) @@ -182,12 +182,12 @@ func (mms *MainModuleSet) InGorootSrc(m module.Version) bool { return mms.inGorootSrc[m] } -func (mms *MainModuleSet) mustGetSingleMainModule() module.Version { +func (mms *MainModuleSet) mustGetSingleMainModule(loaderstate *State) module.Version { if mms == nil || len(mms.versions) == 0 { panic("internal error: mustGetSingleMainModule called in context with no main modules") } if len(mms.versions) != 1 { - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { panic("internal error: mustGetSingleMainModule called in workspace mode") } else { panic("internal error: multiple main modules present outside of workspace mode") @@ -196,14 +196,14 @@ func (mms *MainModuleSet) mustGetSingleMainModule() module.Version { return mms.versions[0] } -func (mms *MainModuleSet) GetSingleIndexOrNil() *modFileIndex { +func (mms *MainModuleSet) GetSingleIndexOrNil(loaderstate *State) *modFileIndex { if mms == nil { return nil } if len(mms.versions) == 0 { return nil } - return mms.indices[mms.mustGetSingleMainModule()] + return mms.indices[mms.mustGetSingleMainModule(loaderstate)] } func (mms *MainModuleSet) Index(m module.Version) *modFileIndex { @@ -246,12 +246,12 @@ func (mms *MainModuleSet) HighestReplaced() map[string]string { // GoVersion returns the go version set on the single module, in module mode, // or the go.work file in workspace mode. -func (mms *MainModuleSet) GoVersion() string { - if inWorkspaceMode() { +func (mms *MainModuleSet) GoVersion(loaderstate *State) string { + if inWorkspaceMode(loaderstate) { return gover.FromGoWork(mms.workFile) } if mms != nil && len(mms.versions) == 1 { - f := mms.ModFile(mms.mustGetSingleMainModule()) + f := mms.ModFile(mms.mustGetSingleMainModule(loaderstate)) if f == nil { // Special case: we are outside a module, like 'go run x.go'. // Assume the local Go version. @@ -266,15 +266,15 @@ func (mms *MainModuleSet) GoVersion() string { // Godebugs returns the godebug lines set on the single module, in module mode, // or on the go.work file in workspace mode. // The caller must not modify the result. -func (mms *MainModuleSet) Godebugs() []*modfile.Godebug { - if inWorkspaceMode() { +func (mms *MainModuleSet) Godebugs(loaderstate *State) []*modfile.Godebug { + if inWorkspaceMode(loaderstate) { if mms.workFile != nil { return mms.workFile.Godebug } return nil } if mms != nil && len(mms.versions) == 1 { - f := mms.ModFile(mms.mustGetSingleMainModule()) + f := mms.ModFile(mms.mustGetSingleMainModule(loaderstate)) if f == nil { // Special case: we are outside a module, like 'go run x.go'. return nil @@ -315,16 +315,16 @@ const ( // To make permanent changes to the require statements // in go.mod, edit it before loading. func ModFile() *modfile.File { - Init() - modFile := LoaderState.MainModules.ModFile(LoaderState.MainModules.mustGetSingleMainModule()) + Init(LoaderState) + modFile := LoaderState.MainModules.ModFile(LoaderState.MainModules.mustGetSingleMainModule(LoaderState)) if modFile == nil { - die() + die(LoaderState) } return modFile } -func BinDir() string { - Init() +func BinDir(loaderstate *State) string { + Init(loaderstate) if cfg.GOBIN != "" { return cfg.GOBIN } @@ -337,13 +337,13 @@ func BinDir() string { // InitWorkfile initializes the workFilePath variable for commands that // operate in workspace mode. It should not be called by other commands, // for example 'go mod tidy', that don't operate in workspace mode. -func InitWorkfile() { +func InitWorkfile(loaderstate *State) { // Initialize fsys early because we need overlay to read go.work file. fips140.Init() if err := fsys.Init(); err != nil { base.Fatal(err) } - LoaderState.workFilePath = FindGoWork(base.Cwd()) + loaderstate.workFilePath = FindGoWork(loaderstate, base.Cwd()) } // FindGoWork returns the name of the go.work file for this command, @@ -351,8 +351,8 @@ func InitWorkfile() { // Most code should use Init and Enabled rather than use this directly. // It is exported mainly for Go toolchain switching, which must process // the go.work very early at startup. -func FindGoWork(wd string) string { - if LoaderState.RootMode == NoRoot { +func FindGoWork(loaderstate *State, wd string) string { + if loaderstate.RootMode == NoRoot { return "" } @@ -371,8 +371,8 @@ func FindGoWork(wd string) string { // WorkFilePath returns the absolute path of the go.work file, or "" if not in // workspace mode. WorkFilePath must be called after InitWorkfile. -func WorkFilePath() string { - return LoaderState.workFilePath +func WorkFilePath(loaderstate *State) string { + return loaderstate.workFilePath } // Reset clears all the initialized, cached state about the use of modules, @@ -451,11 +451,11 @@ var LoaderState = NewState() // current module (if any), sets environment variables for Git subprocesses, and // configures the cfg, codehost, load, modfetch, and search packages for use // with modules. -func Init() { - if LoaderState.initialized { +func Init(loaderstate *State) { + if loaderstate.initialized { return } - LoaderState.initialized = true + loaderstate.initialized = true fips140.Init() @@ -468,11 +468,11 @@ func Init() { default: base.Fatalf("go: unknown environment setting GO111MODULE=%s", env) case "auto": - mustUseModules = LoaderState.ForceUseModules + mustUseModules = loaderstate.ForceUseModules case "on", "": mustUseModules = true case "off": - if LoaderState.ForceUseModules { + if loaderstate.ForceUseModules { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } mustUseModules = false @@ -496,15 +496,15 @@ func Init() { if os.Getenv("GCM_INTERACTIVE") == "" { os.Setenv("GCM_INTERACTIVE", "never") } - if LoaderState.modRoots != nil { + if loaderstate.modRoots != nil { // modRoot set before Init was called ("go mod init" does this). // No need to search for go.mod. - } else if LoaderState.RootMode == NoRoot { + } else if loaderstate.RootMode == NoRoot { if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") { base.Fatalf("go: -modfile cannot be used with commands that ignore the current module") } - LoaderState.modRoots = nil - } else if LoaderState.workFilePath != "" { + loaderstate.modRoots = nil + } else if loaderstate.workFilePath != "" { // We're in workspace mode, which implies module mode. if cfg.ModFile != "" { base.Fatalf("go: -modfile cannot be used in workspace mode") @@ -514,7 +514,7 @@ func Init() { if cfg.ModFile != "" { base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.") } - if LoaderState.RootMode == NeedRoot { + if loaderstate.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { @@ -529,14 +529,14 @@ func Init() { // It's a bit of a peculiar thing to disallow but quite mysterious // when it happens. See golang.org/issue/26708. fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) - if LoaderState.RootMode == NeedRoot { + if loaderstate.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { return } } else { - LoaderState.modRoots = []string{modRoot} + loaderstate.modRoots = []string{modRoot} } } if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") { @@ -545,13 +545,13 @@ func Init() { // We're in module mode. Set any global variables that need to be set. cfg.ModulesEnabled = true - setDefaultBuildMod() + setDefaultBuildMod(loaderstate) list := filepath.SplitList(cfg.BuildContext.GOPATH) if len(list) > 0 && list[0] != "" { gopath = list[0] if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil { fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in $GOPATH %v\n", gopath) - if LoaderState.RootMode == NeedRoot { + if loaderstate.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { @@ -622,49 +622,49 @@ func FindGoMod(wd string) string { // If modules are enabled but there is no main module, Enabled returns true // and then the first use of module information will call die // (usually through MustModRoot). -func Enabled() bool { - Init() - return LoaderState.modRoots != nil || cfg.ModulesEnabled +func Enabled(loaderstate *State) bool { + Init(loaderstate) + return loaderstate.modRoots != nil || cfg.ModulesEnabled } -func VendorDir() string { - if inWorkspaceMode() { - return filepath.Join(filepath.Dir(WorkFilePath()), "vendor") +func VendorDir(loaderstate *State) string { + if inWorkspaceMode(loaderstate) { + return filepath.Join(filepath.Dir(WorkFilePath(loaderstate)), "vendor") } // Even if -mod=vendor, we could be operating with no mod root (and thus no // vendor directory). As long as there are no dependencies that is expected // to work. See script/vendor_outside_module.txt. - modRoot := LoaderState.MainModules.ModRoot(LoaderState.MainModules.mustGetSingleMainModule()) + modRoot := loaderstate.MainModules.ModRoot(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) if modRoot == "" { panic("vendor directory does not exist when in single module mode outside of a module") } return filepath.Join(modRoot, "vendor") } -func inWorkspaceMode() bool { - if !LoaderState.initialized { +func inWorkspaceMode(loaderstate *State) bool { + if !loaderstate.initialized { panic("inWorkspaceMode called before modload.Init called") } - if !Enabled() { + if !Enabled(loaderstate) { return false } - return LoaderState.workFilePath != "" + return loaderstate.workFilePath != "" } // HasModRoot reports whether a main module or main modules are present. // HasModRoot may return false even if Enabled returns true: for example, 'get' // does not require a main module. -func HasModRoot() bool { - Init() - return LoaderState.modRoots != nil +func HasModRoot(loaderstate *State) bool { + Init(loaderstate) + return loaderstate.modRoots != nil } // MustHaveModRoot checks that a main module or main modules are present, // and calls base.Fatalf if there are no main modules. func MustHaveModRoot() { - Init() - if !HasModRoot() { - die() + Init(LoaderState) + if !HasModRoot(LoaderState) { + die(LoaderState) } } @@ -686,11 +686,11 @@ func modFilePath(modRoot string) string { return filepath.Join(modRoot, "go.mod") } -func die() { +func die(loaderstate *State) { if cfg.Getenv("GO111MODULE") == "off" { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } - if !inWorkspaceMode() { + if !inWorkspaceMode(loaderstate) { if dir, name := findAltConfig(base.Cwd()); dir != "" { rel, err := filepath.Rel(base.Cwd(), dir) if err != nil { @@ -711,7 +711,7 @@ func die() { type noMainModulesError struct{} func (e noMainModulesError) Error() string { - if inWorkspaceMode() { + if inWorkspaceMode(LoaderState) { return "no modules were found in the current workspace; see 'go help work'" } return "go.mod file not found in current directory or any parent directory; see 'go help modules'" @@ -868,33 +868,33 @@ func UpdateWorkFile(wf *modfile.WorkFile) { // other, but unlike LoadModGraph does not load the full module graph or check // it for global consistency. Most callers outside of the modload package should // use LoadModGraph instead. -func LoadModFile(ctx context.Context) *Requirements { - rs, err := loadModFile(ctx, nil) +func LoadModFile(loaderstate *State, ctx context.Context) *Requirements { + rs, err := loadModFile(loaderstate, ctx, nil) if err != nil { base.Fatal(err) } return rs } -func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) { - if LoaderState.requirements != nil { - return LoaderState.requirements, nil +func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*Requirements, error) { + if loaderstate.requirements != nil { + return loaderstate.requirements, nil } - Init() + Init(loaderstate) var workFile *modfile.WorkFile - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { var err error - workFile, LoaderState.modRoots, err = LoadWorkFile(LoaderState.workFilePath) + workFile, loaderstate.modRoots, err = LoadWorkFile(loaderstate.workFilePath) if err != nil { return nil, err } - for _, modRoot := range LoaderState.modRoots { + for _, modRoot := range loaderstate.modRoots { sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum" modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile) } - modfetch.GoSumFile = LoaderState.workFilePath + ".sum" - } else if len(LoaderState.modRoots) == 0 { + modfetch.GoSumFile = loaderstate.workFilePath + ".sum" + } else if len(loaderstate.modRoots) == 0 { // We're in module mode, but not inside a module. // // Commands like 'go build', 'go run', 'go list' have no go.mod file to @@ -913,25 +913,25 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // // See golang.org/issue/32027. } else { - modfetch.GoSumFile = strings.TrimSuffix(modFilePath(LoaderState.modRoots[0]), ".mod") + ".sum" + modfetch.GoSumFile = strings.TrimSuffix(modFilePath(loaderstate.modRoots[0]), ".mod") + ".sum" } - if len(LoaderState.modRoots) == 0 { + if len(loaderstate.modRoots) == 0 { // TODO(#49228): Instead of creating a fake module with an empty modroot, // make MainModules.Len() == 0 mean that we're in module mode but not inside // any module. mainModule := module.Version{Path: "command-line-arguments"} - LoaderState.MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) + loaderstate.MainModules = makeMainModules(loaderstate, []module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) var ( goVersion string pruning modPruning roots []module.Version direct = map[string]bool{"go": true} ) - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { // Since we are in a workspace, the Go version for the synthetic // "command-line-arguments" module must not exceed the Go version // for the workspace. - goVersion = LoaderState.MainModules.GoVersion() + goVersion = loaderstate.MainModules.GoVersion(loaderstate) pruning = workspace roots = []module.Version{ mainModule, @@ -947,26 +947,26 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } rawGoVersion.Store(mainModule, goVersion) - LoaderState.requirements = newRequirements(pruning, roots, direct) + loaderstate.requirements = newRequirements(loaderstate, pruning, roots, direct) if cfg.BuildMod == "vendor" { // For issue 56536: Some users may have GOFLAGS=-mod=vendor set. // Make sure it behaves as though the fake module is vendored // with no dependencies. - LoaderState.requirements.initVendor(nil) + loaderstate.requirements.initVendor(loaderstate, nil) } - return LoaderState.requirements, nil + return loaderstate.requirements, nil } var modFiles []*modfile.File var mainModules []module.Version var indices []*modFileIndex var errs []error - for _, modroot := range LoaderState.modRoots { + for _, modroot := range loaderstate.modRoots { gomod := modFilePath(modroot) var fixed bool - data, f, err := ReadModFile(gomod, fixVersion(ctx, &fixed)) + data, f, err := ReadModFile(gomod, fixVersion(loaderstate, ctx, &fixed)) if err != nil { - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { if tooNew, ok := err.(*gover.TooNewError); ok && !strings.HasPrefix(cfg.CmdName, "work ") { // Switching to a newer toolchain won't help - the go.work has the wrong version. // Report this more specific error, unless we are a command like 'go work use' @@ -981,7 +981,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) errs = append(errs, err) continue } - if inWorkspaceMode() && !strings.HasPrefix(cfg.CmdName, "work ") { + if inWorkspaceMode(loaderstate) && !strings.HasPrefix(cfg.CmdName, "work ") { // Refuse to use workspace if its go version is too old. // Disable this check if we are a workspace command like work use or work sync, // which will fix the problem. @@ -993,7 +993,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } - if !inWorkspaceMode() { + if !inWorkspaceMode(loaderstate) { ok := true for _, g := range f.Godebug { if err := CheckGodebug("godebug", g.Key, g.Value); err != nil { @@ -1022,45 +1022,45 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) return nil, errors.Join(errs...) } - LoaderState.MainModules = makeMainModules(mainModules, LoaderState.modRoots, modFiles, indices, workFile) - setDefaultBuildMod() // possibly enable automatic vendoring - rs := requirementsFromModFiles(ctx, workFile, modFiles, opts) + loaderstate.MainModules = makeMainModules(loaderstate, mainModules, loaderstate.modRoots, modFiles, indices, workFile) + setDefaultBuildMod(loaderstate) // possibly enable automatic vendoring + rs := requirementsFromModFiles(loaderstate, ctx, workFile, modFiles, opts) if cfg.BuildMod == "vendor" { - readVendorList(VendorDir()) - versions := LoaderState.MainModules.Versions() + readVendorList(VendorDir(loaderstate)) + versions := loaderstate.MainModules.Versions() indexes := make([]*modFileIndex, 0, len(versions)) modFiles := make([]*modfile.File, 0, len(versions)) modRoots := make([]string, 0, len(versions)) for _, m := range versions { - indexes = append(indexes, LoaderState.MainModules.Index(m)) - modFiles = append(modFiles, LoaderState.MainModules.ModFile(m)) - modRoots = append(modRoots, LoaderState.MainModules.ModRoot(m)) + indexes = append(indexes, loaderstate.MainModules.Index(m)) + modFiles = append(modFiles, loaderstate.MainModules.ModFile(m)) + modRoots = append(modRoots, loaderstate.MainModules.ModRoot(m)) } - checkVendorConsistency(indexes, modFiles, modRoots) - rs.initVendor(vendorList) + checkVendorConsistency(loaderstate, indexes, modFiles, modRoots) + rs.initVendor(loaderstate, vendorList) } - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { // We don't need to update the mod file so return early. - LoaderState.requirements = rs + loaderstate.requirements = rs return rs, nil } - mainModule := LoaderState.MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) - if rs.hasRedundantRoot() { + if rs.hasRedundantRoot(loaderstate) { // If any module path appears more than once in the roots, we know that the // go.mod file needs to be updated even though we have not yet loaded any // transitive dependencies. var err error - rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false) + rs, err = updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false) if err != nil { return nil, err } } - if LoaderState.MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { + if loaderstate.MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { // TODO(#45551): Do something more principled instead of checking // cfg.CmdName directly here. if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" { @@ -1069,8 +1069,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) if opts != nil && opts.TidyGoVersion != "" { v = opts.TidyGoVersion } - addGoStmt(LoaderState.MainModules.ModFile(mainModule), mainModule, v) - rs = overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: v}}) + addGoStmt(loaderstate.MainModules.ModFile(mainModule), mainModule, v) + rs = overrideRoots(loaderstate, ctx, rs, []module.Version{{Path: "go", Version: v}}) // We need to add a 'go' version to the go.mod file, but we must assume // that its existing contents match something between Go 1.11 and 1.16. @@ -1079,7 +1079,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // requirements to support pruning. if gover.Compare(v, gover.ExplicitIndirectVersion) >= 0 { var err error - rs, err = convertPruning(ctx, rs, pruned) + rs, err = convertPruning(loaderstate, ctx, rs, pruned) if err != nil { return nil, err } @@ -1089,8 +1089,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } - LoaderState.requirements = rs - return LoaderState.requirements, nil + loaderstate.requirements = rs + return loaderstate.requirements, nil } func errWorkTooOld(gomod string, wf *modfile.WorkFile, goVers string) error { @@ -1126,7 +1126,7 @@ func CheckReservedModulePath(path string) error { func CreateModFile(ctx context.Context, modPath string) { modRoot := base.Cwd() LoaderState.modRoots = []string{modRoot} - Init() + Init(LoaderState) modFilePath := modFilePath(modRoot) if _, err := fsys.Stat(modFilePath); err == nil { base.Fatalf("go: %s already exists", modFilePath) @@ -1162,16 +1162,16 @@ func CreateModFile(ctx context.Context, modPath string) { fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath) modFile := new(modfile.File) modFile.AddModuleStmt(modPath) - LoaderState.MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) + LoaderState.MainModules = makeMainModules(LoaderState, []module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) addGoStmt(modFile, modFile.Module.Mod, gover.Local()) // Add the go directive before converted module requirements. - rs := requirementsFromModFiles(ctx, nil, []*modfile.File{modFile}, nil) - rs, err := updateRoots(ctx, rs.direct, rs, nil, nil, false) + rs := requirementsFromModFiles(LoaderState, ctx, nil, []*modfile.File{modFile}, nil) + rs, err := updateRoots(LoaderState, ctx, rs.direct, rs, nil, nil, false) if err != nil { base.Fatal(err) } LoaderState.requirements = rs - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + if err := commitRequirements(LoaderState, ctx, WriteOpts{}); err != nil { base.Fatal(err) } @@ -1206,7 +1206,7 @@ func CreateModFile(ctx context.Context, modPath string) { // and does nothing for versions that already appear to be canonical. // // The VersionFixer sets 'fixed' if it ever returns a non-canonical version. -func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { +func fixVersion(loaderstate *State, ctx context.Context, fixed *bool) modfile.VersionFixer { return func(path, vers string) (resolved string, err error) { defer func() { if err == nil && resolved != vers { @@ -1239,7 +1239,7 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { return vers, nil } - info, err := Query(ctx, path, vers, "", nil) + info, err := Query(loaderstate, ctx, path, vers, "", nil) if err != nil { return "", err } @@ -1254,8 +1254,8 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { // // This function affects the default cfg.BuildMod when outside of a module, // so it can only be called prior to Init. -func AllowMissingModuleImports() { - if LoaderState.initialized { +func AllowMissingModuleImports(loaderstate *State) { + if loaderstate.initialized { panic("AllowMissingModuleImports after Init") } allowMissingModuleImports = true @@ -1263,7 +1263,7 @@ func AllowMissingModuleImports() { // makeMainModules creates a MainModuleSet and associated variables according to // the given main modules. -func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet { +func makeMainModules(loaderstate *State, ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet { for _, m := range ms { if m.Version != "" { panic("mainModulesCalled with module.Version with non empty Version field: " + fmt.Sprintf("%#v", m)) @@ -1338,7 +1338,7 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile continue } var newV module.Version = r.New - if WorkFilePath() != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) { + if WorkFilePath(loaderstate) != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) { // Since we are in a workspace, we may be loading replacements from // multiple go.mod files. Relative paths in those replacement are // relative to the go.mod file, not the workspace, so the same string @@ -1380,14 +1380,14 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile // requirementsFromModFiles returns the set of non-excluded requirements from // the global modFile. -func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements { +func requirementsFromModFiles(loaderstate *State, ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements { var roots []module.Version direct := map[string]bool{} var pruning modPruning - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { pruning = workspace - roots = make([]module.Version, len(LoaderState.MainModules.Versions()), 2+len(LoaderState.MainModules.Versions())) - copy(roots, LoaderState.MainModules.Versions()) + roots = make([]module.Version, len(loaderstate.MainModules.Versions()), 2+len(loaderstate.MainModules.Versions())) + copy(roots, loaderstate.MainModules.Versions()) goVersion := gover.FromGoWork(workFile) var toolchain string if workFile.Toolchain != nil { @@ -1396,16 +1396,16 @@ func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, m roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct) direct = directRequirements(modFiles) } else { - pruning = pruningForGoVersion(LoaderState.MainModules.GoVersion()) + pruning = pruningForGoVersion(loaderstate.MainModules.GoVersion(loaderstate)) if len(modFiles) != 1 { panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles))) } modFile := modFiles[0] - roots, direct = rootsFromModFile(LoaderState.MainModules.mustGetSingleMainModule(), modFile, withToolchainRoot) + roots, direct = rootsFromModFile(loaderstate, loaderstate.MainModules.mustGetSingleMainModule(loaderstate), modFile, withToolchainRoot) } gover.ModSort(roots) - rs := newRequirements(pruning, roots, direct) + rs := newRequirements(loaderstate, pruning, roots, direct) return rs } @@ -1428,7 +1428,7 @@ func directRequirements(modFiles []*modfile.File) map[string]bool { return direct } -func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) { +func rootsFromModFile(loaderstate *State, m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) { direct = make(map[string]bool) padding := 2 // Add padding for the toolchain and go version, added upon return. if !addToolchainRoot { @@ -1436,7 +1436,7 @@ func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot } roots = make([]module.Version, 0, padding+len(modFile.Require)) for _, r := range modFile.Require { - if index := LoaderState.MainModules.Index(m); index != nil && index.exclude[r.Mod] { + if index := loaderstate.MainModules.Index(m); index != nil && index.exclude[r.Mod] { if cfg.BuildMod == "mod" { fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) } else { @@ -1477,9 +1477,9 @@ func appendGoAndToolchainRoots(roots []module.Version, goVersion, toolchain stri // setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag // wasn't provided. setDefaultBuildMod may be called multiple times. -func setDefaultBuildMod() { +func setDefaultBuildMod(loaderstate *State) { if cfg.BuildModExplicit { - if inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { + if inWorkspaceMode(loaderstate) && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { switch cfg.CmdName { case "work sync", "mod graph", "mod verify", "mod why": // These commands run with BuildMod set to mod, but they don't take the @@ -1514,7 +1514,7 @@ func setDefaultBuildMod() { cfg.BuildMod = "readonly" return } - if LoaderState.modRoots == nil { + if loaderstate.modRoots == nil { if allowMissingModuleImports { cfg.BuildMod = "mod" } else { @@ -1523,29 +1523,29 @@ func setDefaultBuildMod() { return } - if len(LoaderState.modRoots) >= 1 { + if len(loaderstate.modRoots) >= 1 { var goVersion string var versionSource string - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { versionSource = "go.work" - if wfg := LoaderState.MainModules.WorkFile().Go; wfg != nil { + if wfg := loaderstate.MainModules.WorkFile().Go; wfg != nil { goVersion = wfg.Version } } else { versionSource = "go.mod" - index := LoaderState.MainModules.GetSingleIndexOrNil() + index := loaderstate.MainModules.GetSingleIndexOrNil(loaderstate) if index != nil { goVersion = index.goVersion } } vendorDir := "" - if LoaderState.workFilePath != "" { - vendorDir = filepath.Join(filepath.Dir(LoaderState.workFilePath), "vendor") + if loaderstate.workFilePath != "" { + vendorDir = filepath.Join(filepath.Dir(loaderstate.workFilePath), "vendor") } else { - if len(LoaderState.modRoots) != 1 { - panic(fmt.Errorf("outside workspace mode, but have %v modRoots", LoaderState.modRoots)) + if len(loaderstate.modRoots) != 1 { + panic(fmt.Errorf("outside workspace mode, but have %v modRoots", loaderstate.modRoots)) } - vendorDir = filepath.Join(LoaderState.modRoots[0], "vendor") + vendorDir = filepath.Join(loaderstate.modRoots[0], "vendor") } if fi, err := fsys.Stat(vendorDir); err == nil && fi.IsDir() { if goVersion != "" { @@ -1613,8 +1613,8 @@ func modulesTextIsForWorkspace(vendorDir string) (bool, error) { return false, nil } -func mustHaveCompleteRequirements() bool { - return cfg.BuildMod != "mod" && !inWorkspaceMode() +func mustHaveCompleteRequirements(loaderstate *State) bool { + return cfg.BuildMod != "mod" && !inWorkspaceMode(loaderstate) } // addGoStmt adds a go directive to the go.mod file if it does not already @@ -1809,21 +1809,21 @@ type WriteOpts struct { // WriteGoMod writes the current build list back to go.mod. func WriteGoMod(ctx context.Context, opts WriteOpts) error { - LoaderState.requirements = LoadModFile(ctx) - return commitRequirements(ctx, opts) + LoaderState.requirements = LoadModFile(LoaderState, ctx) + return commitRequirements(LoaderState, ctx, opts) } var errNoChange = errors.New("no update needed") // UpdateGoModFromReqs returns a modified go.mod file using the current // requirements. It does not commit these changes to disk. -func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) { - if LoaderState.MainModules.Len() != 1 || LoaderState.MainModules.ModRoot(LoaderState.MainModules.Versions()[0]) == "" { +func UpdateGoModFromReqs(loaderstate *State, ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) { + if loaderstate.MainModules.Len() != 1 || loaderstate.MainModules.ModRoot(loaderstate.MainModules.Versions()[0]) == "" { // We aren't in a module, so we don't have anywhere to write a go.mod file. return nil, nil, nil, errNoChange } - mainModule := LoaderState.MainModules.mustGetSingleMainModule() - modFile = LoaderState.MainModules.ModFile(mainModule) + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) + modFile = loaderstate.MainModules.ModFile(mainModule) if modFile == nil { // command-line-arguments has no .mod file to write. return nil, nil, nil, errNoChange @@ -1836,7 +1836,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b var list []*modfile.Require toolchain := "" goVersion := "" - for _, m := range LoaderState.requirements.rootModules { + for _, m := range loaderstate.requirements.rootModules { if m.Path == "go" { goVersion = m.Version continue @@ -1847,7 +1847,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b } list = append(list, &modfile.Require{ Mod: m, - Indirect: !LoaderState.requirements.direct[m.Path], + Indirect: !loaderstate.requirements.direct[m.Path], }) } @@ -1917,13 +1917,13 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b // go.mod or go.sum are out of date in a semantically significant way. // // In workspace mode, commitRequirements only writes changes to go.work.sum. -func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { - if inWorkspaceMode() { +func commitRequirements(loaderstate *State, ctx context.Context, opts WriteOpts) (err error) { + if inWorkspaceMode(loaderstate) { // go.mod files aren't updated in workspace mode, but we still want to // update the go.work.sum file. - return modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, LoaderState.requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + return modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)) } - _, updatedGoMod, modFile, err := UpdateGoModFromReqs(ctx, opts) + _, updatedGoMod, modFile, err := UpdateGoModFromReqs(loaderstate, ctx, opts) if err != nil { if errors.Is(err, errNoChange) { return nil @@ -1931,7 +1931,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { return err } - index := LoaderState.MainModules.GetSingleIndexOrNil() + index := loaderstate.MainModules.GetSingleIndexOrNil(loaderstate) dirty := index.modFileIsDirty(modFile) || len(opts.DropTools) > 0 || len(opts.AddTools) > 0 if dirty && cfg.BuildMod != "mod" { // If we're about to fail due to -mod=readonly, @@ -1945,15 +1945,15 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { // Don't write go.mod, but write go.sum in case we added or trimmed sums. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { - if err := modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, LoaderState.requirements, addBuildListZipSums), mustHaveCompleteRequirements()); err != nil { + if err := modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)); err != nil { return err } } return nil } - mainModule := LoaderState.MainModules.mustGetSingleMainModule() - modFilePath := modFilePath(LoaderState.MainModules.ModRoot(mainModule)) + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) + modFilePath := modFilePath(loaderstate.MainModules.ModRoot(mainModule)) if fsys.Replaced(modFilePath) { if dirty { return errors.New("updates to go.mod needed, but go.mod is part of the overlay specified with -overlay") @@ -1962,13 +1962,13 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { } defer func() { // At this point we have determined to make the go.mod file on disk equal to new. - LoaderState.MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false)) + loaderstate.MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false)) // Update go.sum after releasing the side lock and refreshing the index. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { if err == nil { - err = modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, LoaderState.requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + err = modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)) } } }() @@ -2011,7 +2011,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { // including any go.mod files needed to reconstruct the MVS result // or identify go versions, // in addition to the checksums for every module in keepMods. -func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool { +func keepSums(loaderstate *State, ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool { // Every module in the full module graph contributes its requirements, // so in order to ensure that the build list itself is reproducible, // we need sums for every go.mod in the graph (regardless of whether @@ -2024,12 +2024,12 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // ambiguous import errors the next time we load the package. keepModSumsForZipSums := true if ld == nil { - if gover.Compare(LoaderState.MainModules.GoVersion(), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { + if gover.Compare(loaderstate.MainModules.GoVersion(loaderstate), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { keepModSumsForZipSums = false } } else { keepPkgGoModSums := true - if gover.Compare(ld.requirements.GoVersion(), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") { + if gover.Compare(ld.requirements.GoVersion(loaderstate), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") { keepPkgGoModSums = false keepModSumsForZipSums = false } @@ -2047,21 +2047,21 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // minor, so we maintain the previous (buggy) behavior in 'go mod tidy' to // avoid introducing unnecessary churn. if keepPkgGoModSums { - r := resolveReplacement(pkg.mod) + r := resolveReplacement(loaderstate, pkg.mod) keep[modkey(r)] = true } if rs.pruning == pruned && pkg.mod.Path != "" { - if v, ok := rs.rootSelected(pkg.mod.Path); ok && v == pkg.mod.Version { + if v, ok := rs.rootSelected(loaderstate, pkg.mod.Path); ok && v == pkg.mod.Version { // pkg was loaded from a root module, and because the main module has // a pruned module graph we do not check non-root modules for // conflicts for packages that can be found in roots. So we only need // the checksums for the root modules that may contain pkg, not all // possible modules. for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { - if v, ok := rs.rootSelected(prefix); ok && v != "none" { + if v, ok := rs.rootSelected(loaderstate, prefix); ok && v != "none" { m := module.Version{Path: prefix, Version: v} - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[r] = true } } @@ -2069,11 +2069,11 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums } } - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { if v := mg.Selected(prefix); v != "none" { m := module.Version{Path: prefix, Version: v} - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[r] = true } } @@ -2085,27 +2085,27 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // Save sums for the root modules (or their replacements), but don't // incur the cost of loading the graph just to find and retain the sums. for _, m := range rs.rootModules { - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[modkey(r)] = true if which == addBuildListZipSums { keep[r] = true } } } else { - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) mg.WalkBreadthFirst(func(m module.Version) { if _, ok := mg.RequiredBy(m); ok { // The requirements from m's go.mod file are present in the module graph, // so they are relevant to the MVS result regardless of whether m was // actually selected. - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[modkey(r)] = true } }) if which == addBuildListZipSums { for _, m := range mg.BuildList() { - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) if keepModSumsForZipSums { keep[modkey(r)] = true // we need the go version from the go.mod file to do anything useful with the zipfile } diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 803bab49ae96bb..bd28d7596e160a 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -69,7 +69,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st } } - rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse) + rs, mods, err := listModules(ctx, LoadModFile(LoaderState, ctx), args, mode, reuse) type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) @@ -88,7 +88,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st addVersions(ctx, m, mode&ListRetractedVersions != 0) } if mode&ListRetracted != 0 { - addRetraction(ctx, m) + addRetraction(LoaderState, ctx, m) } if mode&ListDeprecated != 0 { addDeprecation(ctx, m) @@ -117,7 +117,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st // but in general list -u is looking up other checksums in the checksum database // that won't be necessary later, so it makes sense not to write the go.sum back out. if !ExplicitWriteGoMod && mode&ListU == 0 { - err = commitRequirements(ctx, WriteOpts{}) + err = commitRequirements(LoaderState, ctx, WriteOpts{}) } } return mods, err @@ -130,7 +130,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List if gover.IsToolchain(m.Path) { continue } - ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse)) + ms = append(ms, moduleInfo(LoaderState, ctx, rs, m, mode, reuse)) } return rs, ms, nil } @@ -145,25 +145,25 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List } if arg == "all" || strings.Contains(arg, "...") { needFullGraph = true - if !HasModRoot() { + if !HasModRoot(LoaderState) { base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) } continue } if path, vers, found := strings.Cut(arg, "@"); found { if vers == "upgrade" || vers == "patch" { - if _, ok := rs.rootSelected(path); !ok || rs.pruning == unpruned { + if _, ok := rs.rootSelected(LoaderState, path); !ok || rs.pruning == unpruned { needFullGraph = true - if !HasModRoot() { + if !HasModRoot(LoaderState) { base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) } } } continue } - if _, ok := rs.rootSelected(arg); !ok || rs.pruning == unpruned { + if _, ok := rs.rootSelected(LoaderState, arg); !ok || rs.pruning == unpruned { needFullGraph = true - if mode&ListVersions == 0 && !HasModRoot() { + if mode&ListVersions == 0 && !HasModRoot(LoaderState) { base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, ErrNoModRoot) } } @@ -171,7 +171,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List var mg *ModuleGraph if needFullGraph { - rs, mg, mgErr = expandGraph(ctx, rs) + rs, mg, mgErr = expandGraph(LoaderState, ctx, rs) } matchedModule := map[module.Version]bool{} @@ -179,7 +179,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List if path, vers, found := strings.Cut(arg, "@"); found { var current string if mg == nil { - current, _ = rs.rootSelected(path) + current, _ = rs.rootSelected(LoaderState, path) } else { current = mg.Selected(path) } @@ -198,7 +198,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List // specific revision or used 'go list -retracted'. allowed = nil } - info, err := queryReuse(ctx, path, vers, current, allowed, reuse) + info, err := queryReuse(LoaderState, ctx, path, vers, current, allowed, reuse) if err != nil { var origin *codehost.Origin if info != nil { @@ -217,7 +217,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List // *Requirements instead. var noRS *Requirements - mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse) + mod := moduleInfo(LoaderState, ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse) if vers != mod.Version { mod.Query = vers } @@ -237,7 +237,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List var v string if mg == nil { var ok bool - v, ok = rs.rootSelected(arg) + v, ok = rs.rootSelected(LoaderState, arg) if !ok { // We checked rootSelected(arg) in the earlier args loop, so if there // is no such root we should have loaded a non-nil mg. @@ -251,7 +251,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List continue } if v != "none" { - mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse)) + mods = append(mods, moduleInfo(LoaderState, ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse)) } else if cfg.BuildMod == "vendor" { // In vendor mode, we can't determine whether a missing module is “a // known dependency” because the module graph is incomplete. @@ -292,7 +292,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List fetchedMods := make([]*modinfo.ModulePublic, len(matches)) for i, m := range matches { q.Add(func() { - fetchedMods[i] = moduleInfo(ctx, rs, m, mode, reuse) + fetchedMods[i] = moduleInfo(LoaderState, ctx, rs, m, mode, reuse) }) } <-q.Idle() diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 0d661eb2e7b0be..ad3b80bfd954aa 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -250,7 +250,7 @@ type PackageOpts struct { // LoadPackages identifies the set of packages matching the given patterns and // loads the packages in the import graph rooted at that set. -func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) { +func LoadPackages(loaderstate *State, ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) { if opts.Tags == nil { opts.Tags = imports.Tags() } @@ -271,11 +271,11 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma case m.IsLocal(): // Evaluate list of file system directories on first iteration. if m.Dirs == nil { - matchModRoots := LoaderState.modRoots + matchModRoots := loaderstate.modRoots if opts.MainModule != (module.Version{}) { - matchModRoots = []string{LoaderState.MainModules.ModRoot(opts.MainModule)} + matchModRoots = []string{loaderstate.MainModules.ModRoot(opts.MainModule)} } - matchLocalDirs(ctx, matchModRoots, m, rs) + matchLocalDirs(loaderstate, ctx, matchModRoots, m, rs) } // Make a copy of the directory list and translate to import paths. @@ -286,7 +286,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // the loader iterations. m.Pkgs = m.Pkgs[:0] for _, dir := range m.Dirs { - pkg, err := resolveLocalPackage(ctx, dir, rs) + pkg, err := resolveLocalPackage(loaderstate, ctx, dir, rs) if err != nil { if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) { continue // Don't include "builtin" or GOROOT/src in wildcard patterns. @@ -294,8 +294,8 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // If we're outside of a module, ensure that the failure mode // indicates that. - if !HasModRoot() { - die() + if !HasModRoot(loaderstate) { + die(loaderstate) } if ld != nil { @@ -311,7 +311,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma case strings.Contains(m.Pattern(), "..."): m.Errs = m.Errs[:0] - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // The module graph is (or may be) incomplete — perhaps we failed to // load the requirements of some module. This is an error in matching @@ -321,26 +321,26 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // necessarily prevent us from loading the packages we could find. m.Errs = append(m.Errs, err) } - matchPackages(ctx, m, opts.Tags, includeStd, mg.BuildList()) + matchPackages(loaderstate, ctx, m, opts.Tags, includeStd, mg.BuildList()) case m.Pattern() == "work": - matchModules := LoaderState.MainModules.Versions() + matchModules := loaderstate.MainModules.Versions() if opts.MainModule != (module.Version{}) { matchModules = []module.Version{opts.MainModule} } - matchPackages(ctx, m, opts.Tags, omitStd, matchModules) + matchPackages(loaderstate, ctx, m, opts.Tags, omitStd, matchModules) case m.Pattern() == "all": if ld == nil { // The initial roots are the packages and tools in the main module. // loadFromRoots will expand that to "all". m.Errs = m.Errs[:0] - matchModules := LoaderState.MainModules.Versions() + matchModules := loaderstate.MainModules.Versions() if opts.MainModule != (module.Version{}) { matchModules = []module.Version{opts.MainModule} } - matchPackages(ctx, m, opts.Tags, omitStd, matchModules) - for tool := range LoaderState.MainModules.Tools() { + matchPackages(loaderstate, ctx, m, opts.Tags, omitStd, matchModules) + for tool := range loaderstate.MainModules.Tools() { m.Pkgs = append(m.Pkgs, tool) } } else { @@ -355,7 +355,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } case m.Pattern() == "tool": - for tool := range LoaderState.MainModules.Tools() { + for tool := range loaderstate.MainModules.Tools() { m.Pkgs = append(m.Pkgs, tool) } default: @@ -364,12 +364,12 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } } - initialRS, err := loadModFile(ctx, &opts) + initialRS, err := loadModFile(loaderstate, ctx, &opts) if err != nil { base.Fatal(err) } - ld := loadFromRoots(ctx, loaderParams{ + ld := loadFromRoots(loaderstate, ctx, loaderParams{ PackageOpts: opts, requirements: initialRS, @@ -404,7 +404,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma if opts.Tidy { if cfg.BuildV { - mg, _ := ld.requirements.Graph(ctx) + mg, _ := ld.requirements.Graph(loaderstate, ctx) for _, m := range initialRS.rootModules { var unused bool if ld.requirements.pruning == unpruned { @@ -416,7 +416,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // m is unused if it was dropped from the roots. If it is still present // as a transitive dependency, that transitive dependency is not needed // by any package or test in the main module. - _, ok := ld.requirements.rootSelected(m.Path) + _, ok := ld.requirements.rootSelected(loaderstate, m.Path) unused = !ok } if unused { @@ -425,9 +425,9 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } } - keep := keepSums(ctx, ld, ld.requirements, loadedZipSumsOnly) + keep := keepSums(loaderstate, ctx, ld, ld.requirements, loadedZipSumsOnly) compatVersion := ld.TidyCompatibleVersion - goVersion := ld.requirements.GoVersion() + goVersion := ld.requirements.GoVersion(loaderstate) if compatVersion == "" { if gover.Compare(goVersion, gover.GoStrictVersion) < 0 { compatVersion = gover.Prev(goVersion) @@ -444,10 +444,10 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma compatVersion = goVersion } if compatPruning := pruningForGoVersion(compatVersion); compatPruning != ld.requirements.pruning { - compatRS := newRequirements(compatPruning, ld.requirements.rootModules, ld.requirements.direct) - ld.checkTidyCompatibility(ctx, compatRS, compatVersion) + compatRS := newRequirements(loaderstate, compatPruning, ld.requirements.rootModules, ld.requirements.direct) + ld.checkTidyCompatibility(loaderstate, ctx, compatRS, compatVersion) - for m := range keepSums(ctx, ld, compatRS, loadedZipSumsOnly) { + for m := range keepSums(loaderstate, ctx, ld, compatRS, loadedZipSumsOnly) { keep[m] = true } } @@ -455,8 +455,8 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma if opts.TidyDiff { cfg.BuildMod = "readonly" loaded = ld - LoaderState.requirements = loaded.requirements - currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(ctx, WriteOpts{}) + loaderstate.requirements = loaded.requirements + currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(loaderstate, ctx, WriteOpts{}) if err != nil { base.Fatal(err) } @@ -466,7 +466,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // Dropping compatibility for 1.16 may result in a strictly smaller go.sum. // Update the keep map with only the loaded.requirements. if gover.Compare(compatVersion, "1.16") > 0 { - keep = keepSums(ctx, loaded, LoaderState.requirements, addBuildListZipSums) + keep = keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums) } currentGoSum, tidyGoSum := modfetch.TidyGoSum(keep) goSumDiff := diff.Diff("current/go.sum", currentGoSum, "tidy/go.sum", tidyGoSum) @@ -490,7 +490,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // loaded.requirements, but here we may have also loaded (and want to // preserve checksums for) additional entities from compatRS, which are // only needed for compatibility with ld.TidyCompatibleVersion. - if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements()); err != nil { + if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements(loaderstate)); err != nil { base.Fatal(err) } } @@ -505,7 +505,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // to call WriteGoMod itself) or if ResolveMissingImports is false (the // command wants to examine the package graph as-is). loaded = ld - LoaderState.requirements = loaded.requirements + loaderstate.requirements = loaded.requirements for _, pkg := range ld.pkgs { if !pkg.isTest() { @@ -515,7 +515,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma sort.Strings(loadedPackages) if !ExplicitWriteGoMod && opts.ResolveMissingImports { - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil { base.Fatal(err) } } @@ -525,7 +525,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories // outside of the standard library and active modules. -func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) { +func matchLocalDirs(loaderstate *State, ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) { if !m.IsLocal() { panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern())) } @@ -543,10 +543,10 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs } modRoot := findModuleRoot(absDir) - if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(ctx, absDir, rs) == "" { + if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(loaderstate, ctx, absDir, rs) == "" { m.Dirs = []string{} scope := "main module or its selected dependencies" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { scope = "modules listed in go.work or their selected dependencies" } m.AddError(fmt.Errorf("directory prefix %s does not contain %s", base.ShortPath(absDir), scope)) @@ -558,7 +558,7 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs } // resolveLocalPackage resolves a filesystem path to a package path. -func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (string, error) { +func resolveLocalPackage(loaderstate *State, ctx context.Context, dir string, rs *Requirements) (string, error) { var absDir string if filepath.IsAbs(dir) { absDir = filepath.Clean(dir) @@ -596,13 +596,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str } } - for _, mod := range LoaderState.MainModules.Versions() { - modRoot := LoaderState.MainModules.ModRoot(mod) + for _, mod := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mod) if modRoot != "" && absDir == modRoot { if absDir == cfg.GOROOTsrc { return "", errPkgIsGorootSrc } - return LoaderState.MainModules.PathPrefix(mod), nil + return loaderstate.MainModules.PathPrefix(mod), nil } } @@ -611,8 +611,8 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str // It's not strictly necessary but helpful to keep the checks. var pkgNotFoundErr error pkgNotFoundLongestPrefix := "" - for _, mainModule := range LoaderState.MainModules.Versions() { - modRoot := LoaderState.MainModules.ModRoot(mainModule) + for _, mainModule := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mainModule) if modRoot != "" && str.HasFilePathPrefix(absDir, modRoot) && !strings.Contains(absDir[len(modRoot):], "@") { suffix := filepath.ToSlash(str.TrimFilePathPrefix(absDir, modRoot)) if pkg, found := strings.CutPrefix(suffix, "vendor/"); found { @@ -620,14 +620,14 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str return "", fmt.Errorf("without -mod=vendor, directory %s has no package path", absDir) } - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if _, ok := vendorPkgModule[pkg]; !ok { return "", fmt.Errorf("directory %s is not a package listed in vendor/modules.txt", absDir) } return pkg, nil } - mainModulePrefix := LoaderState.MainModules.PathPrefix(mainModule) + mainModulePrefix := loaderstate.MainModules.PathPrefix(mainModule) if mainModulePrefix == "" { pkg := suffix if pkg == "builtin" { @@ -668,13 +668,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str return pkg, nil } - pkg := pathInModuleCache(ctx, absDir, rs) + pkg := pathInModuleCache(loaderstate, ctx, absDir, rs) if pkg == "" { dirstr := fmt.Sprintf("directory %s", base.ShortPath(absDir)) if dirstr == "directory ." { dirstr = "current directory" } - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { if mr := findModuleRoot(absDir); mr != "" { return "", fmt.Errorf("%s is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using:\n\tgo work use %s", dirstr, base.ShortPath(mr)) } @@ -693,17 +693,17 @@ var ( // pathInModuleCache returns the import path of the directory dir, // if dir is in the module cache copy of a module in our build list. -func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string { +func pathInModuleCache(loaderstate *State, ctx context.Context, dir string, rs *Requirements) string { tryMod := func(m module.Version) (string, bool) { if gover.IsToolchain(m.Path) { return "", false } var root string var err error - if repl := Replacement(m); repl.Path != "" && repl.Version == "" { + if repl := Replacement(loaderstate, m); repl.Path != "" && repl.Version == "" { root = repl.Path if !filepath.IsAbs(root) { - root = filepath.Join(replaceRelativeTo(), root) + root = filepath.Join(replaceRelativeTo(loaderstate), root) } } else if repl.Path != "" { root, err = modfetch.DownloadDir(ctx, repl) @@ -728,7 +728,7 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string if rs.pruning == pruned { for _, m := range rs.rootModules { - if v, _ := rs.rootSelected(m.Path); v != m.Version { + if v, _ := rs.rootSelected(loaderstate, m.Path); v != m.Version { continue // m is a root, but we have a higher root for the same path. } if importPath, ok := tryMod(m); ok { @@ -747,7 +747,7 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string // versions of root modules may differ from what we already checked above. // Re-check those paths too. - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) var importPath string for _, m := range mg.BuildList() { var found bool @@ -766,8 +766,8 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string // // TODO(bcmills): Silencing errors seems off. Take a closer look at this and // figure out what the error-reporting actually ought to be. -func ImportFromFiles(ctx context.Context, gofiles []string) { - rs := LoadModFile(ctx) +func ImportFromFiles(loaderstate *State, ctx context.Context, gofiles []string) { + rs := LoadModFile(loaderstate, ctx) tags := imports.Tags() imports, testImports, err := imports.ScanFiles(gofiles, tags) @@ -775,7 +775,7 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { base.Fatal(err) } - loaded = loadFromRoots(ctx, loaderParams{ + loaded = loadFromRoots(loaderstate, ctx, loaderParams{ PackageOpts: PackageOpts{ Tags: tags, ResolveMissingImports: true, @@ -788,10 +788,10 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { return roots }, }) - LoaderState.requirements = loaded.requirements + loaderstate.requirements = loaded.requirements if !ExplicitWriteGoMod { - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil { base.Fatal(err) } } @@ -799,11 +799,11 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { // DirImportPath returns the effective import path for dir, // provided it is within a main module, or else returns ".". -func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path string, m module.Version) { - if !HasModRoot() { +func (mms *MainModuleSet) DirImportPath(loaderstate *State, ctx context.Context, dir string) (path string, m module.Version) { + if !HasModRoot(loaderstate) { return ".", module.Version{} } - LoadModFile(ctx) // Sets targetPrefix. + LoadModFile(loaderstate, ctx) // Sets targetPrefix. if !filepath.IsAbs(dir) { dir = filepath.Join(base.Cwd(), dir) @@ -820,7 +820,7 @@ func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path s return mms.PathPrefix(v), v } if str.HasFilePathPrefix(dir, modRoot) { - pathPrefix := LoaderState.MainModules.PathPrefix(v) + pathPrefix := loaderstate.MainModules.PathPrefix(v) if pathPrefix > longestPrefix { longestPrefix = pathPrefix longestPrefixVersion = v @@ -853,13 +853,13 @@ func PackageModule(path string) module.Version { // the package at path as imported from the package in parentDir. // Lookup requires that one of the Load functions in this package has already // been called. -func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) { +func Lookup(loaderstate *State, parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) { if path == "" { panic("Lookup called with empty package path") } if parentIsStd { - path = loaded.stdVendor(parentPath, path) + path = loaded.stdVendor(loaderstate, parentPath, path) } pkg, ok := loaded.pkgCache.Get(path) if !ok { @@ -957,11 +957,11 @@ func (ld *loader) exitIfErrors(ctx context.Context) { // goVersion reports the Go version that should be used for the loader's // requirements: ld.TidyGoVersion if set, or ld.requirements.GoVersion() // otherwise. -func (ld *loader) goVersion() string { +func (ld *loader) goVersion(loaderstate *State) string { if ld.TidyGoVersion != "" { return ld.TidyGoVersion } - return ld.requirements.GoVersion() + return ld.requirements.GoVersion(loaderstate) } // A loadPkg records information about a single loaded package. @@ -1064,11 +1064,11 @@ func (pkg *loadPkg) isTest() bool { // fromExternalModule reports whether pkg was loaded from a module other than // the main module. -func (pkg *loadPkg) fromExternalModule() bool { +func (pkg *loadPkg) fromExternalModule(loaderstate *State) bool { if pkg.mod.Path == "" { return false // loaded from the standard library, not a module } - return !LoaderState.MainModules.Contains(pkg.mod.Path) + return !loaderstate.MainModules.Contains(pkg.mod.Path) } var errMissing = errors.New("cannot find package") @@ -1079,7 +1079,7 @@ var errMissing = errors.New("cannot find package") // The set of root packages is returned by the params.listRoots function, and // expanded to the full set of packages by tracing imports (and possibly tests) // as needed. -func loadFromRoots(ctx context.Context, params loaderParams) *loader { +func loadFromRoots(loaderstate *State, ctx context.Context, params loaderParams) *loader { ld := &loader{ loaderParams: params, work: par.NewQueue(runtime.GOMAXPROCS(0)), @@ -1095,7 +1095,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // spot-checks in modules that do not maintain the expanded go.mod // requirements needed for graph pruning. var err error - ld.requirements, _, err = expandGraph(ctx, ld.requirements) + ld.requirements, _, err = expandGraph(loaderstate, ctx, ld.requirements) if err != nil { ld.error(err) } @@ -1103,11 +1103,11 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.exitIfErrors(ctx) updateGoVersion := func() { - goVersion := ld.goVersion() + goVersion := ld.goVersion(loaderstate) if ld.requirements.pruning != workspace { var err error - ld.requirements, err = convertPruning(ctx, ld.requirements, pruningForGoVersion(goVersion)) + ld.requirements, err = convertPruning(loaderstate, ctx, ld.requirements, pruningForGoVersion(goVersion)) if err != nil { ld.error(err) ld.exitIfErrors(ctx) @@ -1141,7 +1141,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // set of root packages does not change then we can select the correct // versions of all transitive imports on the first try and complete // loading in a single iteration. - changedBuildList := ld.preloadRootModules(ctx, rootPkgs) + changedBuildList := ld.preloadRootModules(loaderstate, ctx, rootPkgs) if changedBuildList { // The build list has changed, so the set of root packages may have also // changed. Start over to pick up the changes. (Preloading roots is much @@ -1154,7 +1154,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { inRoots := map[*loadPkg]bool{} for _, path := range rootPkgs { - root := ld.pkg(ctx, path, pkgIsRoot) + root := ld.pkg(loaderstate, ctx, path, pkgIsRoot) if !inRoots[root] { ld.roots = append(ld.roots, root) inRoots[root] = true @@ -1170,7 +1170,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.buildStacks() - changed, err := ld.updateRequirements(ctx) + changed, err := ld.updateRequirements(loaderstate, ctx) if err != nil { ld.error(err) break @@ -1184,12 +1184,12 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { continue } - if !ld.ResolveMissingImports || (!HasModRoot() && !allowMissingModuleImports) { + if !ld.ResolveMissingImports || (!HasModRoot(loaderstate) && !allowMissingModuleImports) { // We've loaded as much as we can without resolving missing imports. break } - modAddedBy, err := ld.resolveMissingImports(ctx) + modAddedBy, err := ld.resolveMissingImports(loaderstate, ctx) if err != nil { ld.error(err) break @@ -1216,7 +1216,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // iteration so we don't need to also update it here. (That would waste time // computing a "direct" map that we'll have to recompute later anyway.) direct := ld.requirements.direct - rs, err := updateRoots(ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported) + rs, err := updateRoots(loaderstate, ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported) if err != nil { // If an error was found in a newly added module, report the package // import stack instead of the module requirement stack. Packages @@ -1244,7 +1244,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // Tidy the build list, if applicable, before we report errors. // (The process of tidying may remove errors from irrelevant dependencies.) if ld.Tidy { - rs, err := tidyRoots(ctx, ld.requirements, ld.pkgs) + rs, err := tidyRoots(loaderstate, ctx, ld.requirements, ld.pkgs) if err != nil { ld.error(err) } else { @@ -1252,8 +1252,8 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // Attempt to switch to the requested Go version. We have been using its // pruning and semantics all along, but there may have been — and may // still be — requirements on higher versions in the graph. - tidy := overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}}) - mg, err := tidy.Graph(ctx) + tidy := overrideRoots(loaderstate, ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}}) + mg, err := tidy.Graph(loaderstate, ctx) if err != nil { ld.error(err) } @@ -1285,7 +1285,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { if m.Path == "go" && ld.TidyGoVersion != "" { continue } - if v, ok := ld.requirements.rootSelected(m.Path); !ok || v != m.Version { + if v, ok := ld.requirements.rootSelected(loaderstate, m.Path); !ok || v != m.Version { ld.error(fmt.Errorf("internal error: a requirement on %v is needed but was not added during package loading (selected %s)", m, v)) } } @@ -1334,7 +1334,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.error(fmt.Errorf("%s: %w", pkg.stackText(), pkg.err)) } - ld.checkMultiplePaths() + ld.checkMultiplePaths(loaderstate) return ld } @@ -1357,7 +1357,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // The "changed" return value reports whether the update changed the selected // version of any module that either provided a loaded package or may now // provide a package that was previously unresolved. -func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err error) { +func (ld *loader) updateRequirements(loaderstate *State, ctx context.Context) (changed bool, err error) { rs := ld.requirements // direct contains the set of modules believed to provide packages directly @@ -1390,16 +1390,16 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err } } } - if pkg.mod.Version != "" || !LoaderState.MainModules.Contains(pkg.mod.Path) { + if pkg.mod.Version != "" || !loaderstate.MainModules.Contains(pkg.mod.Path) { continue } for _, dep := range pkg.imports { - if !dep.fromExternalModule() { + if !dep.fromExternalModule(loaderstate) { continue } - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { // In workspace mode / workspace pruning mode, the roots are the main modules // rather than the main module's direct dependencies. The check below on the selected // roots does not apply. @@ -1412,7 +1412,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // of the vendor directory anyway. continue } - if mg, err := rs.Graph(ctx); err != nil { + if mg, err := rs.Graph(loaderstate, ctx); err != nil { return false, err } else if _, ok := mg.RequiredBy(dep.mod); !ok { // dep.mod is not an explicit dependency, but needs to be. @@ -1424,7 +1424,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err } } } else if pkg.err == nil && cfg.BuildMod != "mod" { - if v, ok := rs.rootSelected(dep.mod.Path); !ok || v != dep.mod.Version { + if v, ok := rs.rootSelected(loaderstate, dep.mod.Path); !ok || v != dep.mod.Version { // dep.mod is not an explicit dependency, but needs to be. // Because we are not in "mod" mode, we will not be able to update it. // Instead, mark the importing package with an error. @@ -1490,21 +1490,21 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // roots can only increase and the set of roots can only expand. The set // of extant root paths is finite and the set of versions of each path is // finite, so the iteration *must* reach a stable fixed-point. - tidy, err := tidyRoots(ctx, rs, ld.pkgs) + tidy, err := tidyRoots(loaderstate, ctx, rs, ld.pkgs) if err != nil { return false, err } addRoots = tidy.rootModules } - rs, err = updateRoots(ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported) + rs, err = updateRoots(loaderstate, ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported) if err != nil { // We don't actually know what even the root requirements are supposed to be, // so we can't proceed with loading. Return the error to the caller return false, err } - if rs.GoVersion() != ld.requirements.GoVersion() { + if rs.GoVersion(loaderstate) != ld.requirements.GoVersion(loaderstate) { // A change in the selected Go version may or may not affect the set of // loaded packages, but in some cases it can change the meaning of the "all" // pattern, the level of pruning in the module graph, and even the set of @@ -1515,12 +1515,12 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // The roots of the module graph have changed in some way (not just the // "direct" markings). Check whether the changes affected any of the loaded // packages. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return false, err } for _, pkg := range ld.pkgs { - if pkg.fromExternalModule() && mg.Selected(pkg.mod.Path) != pkg.mod.Version { + if pkg.fromExternalModule(loaderstate) && mg.Selected(pkg.mod.Path) != pkg.mod.Version { changed = true break } @@ -1540,7 +1540,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // // In some sense, we can think of this as ‘upgraded the module providing // pkg.path from "none" to a version higher than "none"’. - if _, _, _, _, err = importFromModules(ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil { + if _, _, _, _, err = importFromModules(loaderstate, ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil { changed = true break } @@ -1558,7 +1558,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // The newly-resolved packages are added to the addedModuleFor map, and // resolveMissingImports returns a map from each new module version to // the first missing package that module would resolve. -func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) { +func (ld *loader) resolveMissingImports(loaderstate *State, ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) { type pkgMod struct { pkg *loadPkg mod *module.Version @@ -1582,11 +1582,11 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod var mod module.Version ld.work.Add(func() { var err error - mod, err = queryImport(ctx, pkg.path, ld.requirements) + mod, err = queryImport(loaderstate, ctx, pkg.path, ld.requirements) if err != nil { if ime, ok := errors.AsType[*ImportMissingError](err); ok { for curstack := pkg.stack; curstack != nil; curstack = curstack.stack { - if LoaderState.MainModules.Contains(curstack.mod.Path) { + if loaderstate.MainModules.Contains(curstack.mod.Path) { ime.ImportingMainModule = curstack.mod break } @@ -1658,7 +1658,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod // ld.work queue, and its test (if requested) will also be populated once // imports have been resolved. When ld.work goes idle, all transitive imports of // the requested package (and its test, if requested) will have been loaded. -func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loadPkg { +func (ld *loader) pkg(loaderstate *State, ctx context.Context, path string, flags loadPkgFlags) *loadPkg { if flags.has(pkgImportsLoaded) { panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set") } @@ -1667,20 +1667,20 @@ func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loa pkg := &loadPkg{ path: path, } - ld.applyPkgFlags(ctx, pkg, flags) + ld.applyPkgFlags(loaderstate, ctx, pkg, flags) - ld.work.Add(func() { ld.load(ctx, pkg) }) + ld.work.Add(func() { ld.load(loaderstate, ctx, pkg) }) return pkg }) - ld.applyPkgFlags(ctx, pkg, flags) + ld.applyPkgFlags(loaderstate, ctx, pkg, flags) return pkg } // applyPkgFlags updates pkg.flags to set the given flags and propagate the // (transitive) effects of those flags, possibly loading or enqueueing further // packages as a result. -func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkgFlags) { +func (ld *loader) applyPkgFlags(loaderstate *State, ctx context.Context, pkg *loadPkg, flags loadPkgFlags) { if flags == 0 { return } @@ -1708,7 +1708,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // so it's ok if we call it more than is strictly necessary. wantTest := false switch { - case ld.allPatternIsRoot && LoaderState.MainModules.Contains(pkg.mod.Path): + case ld.allPatternIsRoot && loaderstate.MainModules.Contains(pkg.mod.Path): // We are loading the "all" pattern, which includes packages imported by // tests in the main module. This package is in the main module, so we // need to identify the imports of its test even if LoadTests is not set. @@ -1729,13 +1729,13 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg if wantTest { var testFlags loadPkgFlags - if LoaderState.MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { + if loaderstate.MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { // Tests of packages in the main module are in "all", in the sense that // they cause the packages they import to also be in "all". So are tests // of packages in "all" if "all" closes over test dependencies. testFlags |= pkgInAll } - ld.pkgTest(ctx, pkg, testFlags) + ld.pkgTest(loaderstate, ctx, pkg, testFlags) } } @@ -1743,13 +1743,13 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // We have just marked pkg with pkgInAll, or we have just loaded its // imports, or both. Now is the time to propagate pkgInAll to the imports. for _, dep := range pkg.imports { - ld.applyPkgFlags(ctx, dep, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, dep, pkgInAll) } } if new.has(pkgFromRoot) && !old.has(pkgFromRoot|pkgImportsLoaded) { for _, dep := range pkg.imports { - ld.applyPkgFlags(ctx, dep, pkgFromRoot) + ld.applyPkgFlags(loaderstate, ctx, dep, pkgFromRoot) } } } @@ -1757,7 +1757,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // preloadRootModules loads the module requirements needed to identify the // selected version of each module providing a package in rootPkgs, // adding new root modules to the module graph if needed. -func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (changedBuildList bool) { +func (ld *loader) preloadRootModules(loaderstate *State, ctx context.Context, rootPkgs []string) (changedBuildList bool) { needc := make(chan map[module.Version]bool, 1) needc <- map[module.Version]bool{} for _, path := range rootPkgs { @@ -1768,12 +1768,12 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch // If the main module is tidy and the package is in "all" — or if we're // lucky — we can identify all of its imports without actually loading the // full module graph. - m, _, _, _, err := importFromModules(ctx, path, ld.requirements, nil, ld.skipImportModFiles) + m, _, _, _, err := importFromModules(loaderstate, ctx, path, ld.requirements, nil, ld.skipImportModFiles) if err != nil { if _, ok := errors.AsType[*ImportMissingError](err); ok && ld.ResolveMissingImports { // This package isn't provided by any selected module. // If we can find it, it will be a new root dependency. - m, err = queryImport(ctx, path, ld.requirements) + m, err = queryImport(loaderstate, ctx, path, ld.requirements) } if err != nil { // We couldn't identify the root module containing this package. @@ -1786,7 +1786,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch return } - v, ok := ld.requirements.rootSelected(m.Path) + v, ok := ld.requirements.rootSelected(loaderstate, m.Path) if !ok || v != m.Version { // We found the requested package in m, but m is not a root, so // loadModGraph will not load its requirements. We need to promote the @@ -1814,7 +1814,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch } gover.ModSort(toAdd) - rs, err := updateRoots(ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported) + rs, err := updateRoots(loaderstate, ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported) if err != nil { // We are missing some root dependency, and for some reason we can't load // enough of the module dependency graph to add the missing root. Package @@ -1836,11 +1836,11 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch } // load loads an individual package. -func (ld *loader) load(ctx context.Context, pkg *loadPkg) { +func (ld *loader) load(loaderstate *State, ctx context.Context, pkg *loadPkg) { var mg *ModuleGraph if ld.requirements.pruning == unpruned { var err error - mg, err = ld.requirements.Graph(ctx) + mg, err = ld.requirements.Graph(loaderstate, ctx) if err != nil { // We already checked the error from Graph in loadFromRoots and/or // updateRequirements, so we ignored the error on purpose and we should @@ -1855,17 +1855,17 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { } var modroot string - pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles) - if LoaderState.MainModules.Tools()[pkg.path] { + pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(loaderstate, ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles) + if loaderstate.MainModules.Tools()[pkg.path] { // Tools declared by main modules are always in "all". // We apply the package flags before returning so that missing // tool dependencies report an error https://go.dev/issue/70582 - ld.applyPkgFlags(ctx, pkg, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgInAll) } if pkg.dir == "" { return } - if LoaderState.MainModules.Contains(pkg.mod.Path) { + if loaderstate.MainModules.Contains(pkg.mod.Path) { // Go ahead and mark pkg as in "all". This provides the invariant that a // package that is *only* imported by other packages in "all" is always // marked as such before loading its imports. @@ -1875,7 +1875,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { // about (by reducing churn on the flag bits of dependencies), and costs // essentially nothing (these atomic flag ops are essentially free compared // to scanning source code for imports). - ld.applyPkgFlags(ctx, pkg, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgInAll) } if ld.AllowPackage != nil { if err := ld.AllowPackage(ctx, pkg.path, pkg.mod); err != nil { @@ -1907,13 +1907,13 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { if pkg.inStd { // Imports from packages in "std" and "cmd" should resolve using // GOROOT/src/vendor even when "std" is not the main module. - path = ld.stdVendor(pkg.path, path) + path = ld.stdVendor(loaderstate, pkg.path, path) } - pkg.imports = append(pkg.imports, ld.pkg(ctx, path, importFlags)) + pkg.imports = append(pkg.imports, ld.pkg(loaderstate, ctx, path, importFlags)) } pkg.testImports = testImports - ld.applyPkgFlags(ctx, pkg, pkgImportsLoaded) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgImportsLoaded) } // pkgTest locates the test of pkg, creating it if needed, and updates its state @@ -1921,7 +1921,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { // // pkgTest requires that the imports of pkg have already been loaded (flagged // with pkgImportsLoaded). -func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { +func (ld *loader) pkgTest(loaderstate *State, ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { if pkg.isTest() { panic("pkgTest called on a test package") } @@ -1936,7 +1936,7 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl err: pkg.err, inStd: pkg.inStd, } - ld.applyPkgFlags(ctx, pkg.test, testFlags) + ld.applyPkgFlags(loaderstate, ctx, pkg.test, testFlags) createdTest = true }) @@ -1949,14 +1949,14 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl } for _, path := range pkg.testImports { if pkg.inStd { - path = ld.stdVendor(test.path, path) + path = ld.stdVendor(loaderstate, test.path, path) } - test.imports = append(test.imports, ld.pkg(ctx, path, importFlags)) + test.imports = append(test.imports, ld.pkg(loaderstate, ctx, path, importFlags)) } pkg.testImports = nil - ld.applyPkgFlags(ctx, test, pkgImportsLoaded) + ld.applyPkgFlags(loaderstate, ctx, test, pkgImportsLoaded) } else { - ld.applyPkgFlags(ctx, test, testFlags) + ld.applyPkgFlags(loaderstate, ctx, test, testFlags) } return test @@ -1964,7 +1964,7 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl // stdVendor returns the canonical import path for the package with the given // path when imported from the standard-library package at parentPath. -func (ld *loader) stdVendor(parentPath, path string) string { +func (ld *loader) stdVendor(loaderstate *State, parentPath, path string) string { if p, _, ok := fips140.ResolveImport(path); ok { return p } @@ -1973,14 +1973,14 @@ func (ld *loader) stdVendor(parentPath, path string) string { } if str.HasPathPrefix(parentPath, "cmd") { - if !ld.VendorModulesInGOROOTSrc || !LoaderState.MainModules.Contains("cmd") { + if !ld.VendorModulesInGOROOTSrc || !loaderstate.MainModules.Contains("cmd") { vendorPath := pathpkg.Join("cmd", "vendor", path) if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { return vendorPath } } - } else if !ld.VendorModulesInGOROOTSrc || !LoaderState.MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { + } else if !ld.VendorModulesInGOROOTSrc || !loaderstate.MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { // If we are outside of the 'std' module, resolve imports from within 'std' // to the vendor directory. // @@ -2026,7 +2026,7 @@ func (ld *loader) computePatternAll() (all []string) { // or as a replacement for another module, but not both at the same time. // // (See https://golang.org/issue/26607 and https://golang.org/issue/34650.) -func (ld *loader) checkMultiplePaths() { +func (ld *loader) checkMultiplePaths(loaderstate *State) { mods := ld.requirements.rootModules if cached := ld.requirements.graph.Load(); cached != nil { if mg := cached.mg; mg != nil { @@ -2036,7 +2036,7 @@ func (ld *loader) checkMultiplePaths() { firstPath := map[module.Version]string{} for _, mod := range mods { - src := resolveReplacement(mod) + src := resolveReplacement(loaderstate, mod) if prev, ok := firstPath[src]; !ok { firstPath[src] = mod.Path } else if prev != mod.Path { @@ -2047,8 +2047,8 @@ func (ld *loader) checkMultiplePaths() { // checkTidyCompatibility emits an error if any package would be loaded from a // different module under rs than under ld.requirements. -func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, compatVersion string) { - goVersion := rs.GoVersion() +func (ld *loader) checkTidyCompatibility(loaderstate *State, ctx context.Context, rs *Requirements, compatVersion string) { + goVersion := rs.GoVersion(loaderstate) suggestUpgrade := false suggestEFlag := false suggestFixes := func() { @@ -2065,7 +2065,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, fmt.Fprintln(os.Stderr) goFlag := "" - if goVersion != LoaderState.MainModules.GoVersion() { + if goVersion != loaderstate.MainModules.GoVersion(loaderstate) { goFlag = " -go=" + goVersion } @@ -2094,7 +2094,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, fmt.Fprintf(os.Stderr, "For information about 'go mod tidy' compatibility, see:\n\thttps://go.dev/ref/mod#graph-pruning\n") } - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { ld.error(fmt.Errorf("error loading go %s module graph: %w", compatVersion, err)) ld.switchIfErrors(ctx) @@ -2132,7 +2132,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, pkg := pkg ld.work.Add(func() { - mod, _, _, _, err := importFromModules(ctx, pkg.path, rs, mg, ld.skipImportModFiles) + mod, _, _, _, err := importFromModules(loaderstate, ctx, pkg.path, rs, mg, ld.skipImportModFiles) if mod != pkg.mod { mismatches := <-mismatchMu mismatches[pkg] = mismatch{mod: mod, err: err} diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 3fdbdc7010b993..20feb8fcacc784 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -142,7 +142,7 @@ func CheckAllowed(ctx context.Context, m module.Version) error { if err := CheckExclusions(ctx, m); err != nil { return err } - if err := CheckRetractions(ctx, m); err != nil { + if err := CheckRetractions(LoaderState, ctx, m); err != nil { return err } return nil @@ -172,7 +172,7 @@ func (e *excludedError) Is(err error) bool { return err == ErrDisallowed } // CheckRetractions returns an error if module m has been retracted by // its author. -func CheckRetractions(ctx context.Context, m module.Version) (err error) { +func CheckRetractions(loaderstate *State, ctx context.Context, m module.Version) (err error) { defer func() { if err == nil { return @@ -193,7 +193,7 @@ func CheckRetractions(ctx context.Context, m module.Version) (err error) { // Cannot be retracted. return nil } - if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + if repl := Replacement(loaderstate, module.Version{Path: m.Path}); repl.Path != "" { // All versions of the module were replaced. // Don't load retractions, since we'd just load the replacement. return nil @@ -210,11 +210,11 @@ func CheckRetractions(ctx context.Context, m module.Version) (err error) { // We load the raw file here: the go.mod file may have a different module // path that we expect if the module or its repository was renamed. // We still want to apply retractions to other aliases of the module. - rm, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + rm, err := queryLatestVersionIgnoringRetractions(loaderstate, ctx, m.Path) if err != nil { return err } - summary, err := rawGoModSummary(rm) + summary, err := rawGoModSummary(loaderstate, rm) if err != nil && !errors.Is(err, gover.ErrTooNew) { return err } @@ -300,7 +300,7 @@ func ShortMessage(message, emptyDefault string) string { // // CheckDeprecation returns an error if the message can't be loaded. // CheckDeprecation returns "", nil if there is no deprecation message. -func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string, err error) { +func CheckDeprecation(loaderstate *State, ctx context.Context, m module.Version) (deprecation string, err error) { defer func() { if err != nil { err = fmt.Errorf("loading deprecation for %s: %w", m.Path, err) @@ -312,17 +312,17 @@ func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string // Don't look up deprecation. return "", nil } - if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + if repl := Replacement(loaderstate, module.Version{Path: m.Path}); repl.Path != "" { // All versions of the module were replaced. // We'll look up deprecation separately for the replacement. return "", nil } - latest, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + latest, err := queryLatestVersionIgnoringRetractions(loaderstate, ctx, m.Path) if err != nil { return "", err } - summary, err := rawGoModSummary(latest) + summary, err := rawGoModSummary(loaderstate, latest) if err != nil && !errors.Is(err, gover.ErrTooNew) { return "", err } @@ -342,28 +342,28 @@ func replacement(mod module.Version, replace map[module.Version]module.Version) // Replacement returns the replacement for mod, if any. If the path in the // module.Version is relative it's relative to the single main module outside // workspace mode, or the workspace's directory in workspace mode. -func Replacement(mod module.Version) module.Version { - r, foundModRoot, _ := replacementFrom(mod) - return canonicalizeReplacePath(r, foundModRoot) +func Replacement(loaderstate *State, mod module.Version) module.Version { + r, foundModRoot, _ := replacementFrom(loaderstate, mod) + return canonicalizeReplacePath(loaderstate, r, foundModRoot) } // replacementFrom returns the replacement for mod, if any, the modroot of the replacement if it appeared in a go.mod, // and the source of the replacement. The replacement is relative to the go.work or go.mod file it appears in. -func replacementFrom(mod module.Version) (r module.Version, modroot string, fromFile string) { +func replacementFrom(loaderstate *State, mod module.Version) (r module.Version, modroot string, fromFile string) { foundFrom, found, foundModRoot := "", module.Version{}, "" - if LoaderState.MainModules == nil { + if loaderstate.MainModules == nil { return module.Version{}, "", "" - } else if LoaderState.MainModules.Contains(mod.Path) && mod.Version == "" { + } else if loaderstate.MainModules.Contains(mod.Path) && mod.Version == "" { // Don't replace the workspace version of the main module. return module.Version{}, "", "" } - if _, r, ok := replacement(mod, LoaderState.MainModules.WorkFileReplaceMap()); ok { - return r, "", LoaderState.workFilePath + if _, r, ok := replacement(mod, loaderstate.MainModules.WorkFileReplaceMap()); ok { + return r, "", loaderstate.workFilePath } - for _, v := range LoaderState.MainModules.Versions() { - if index := LoaderState.MainModules.Index(v); index != nil { + for _, v := range loaderstate.MainModules.Versions() { + if index := loaderstate.MainModules.Index(v); index != nil { if from, r, ok := replacement(mod, index.replace); ok { - modRoot := LoaderState.MainModules.ModRoot(v) + modRoot := loaderstate.MainModules.ModRoot(v) if foundModRoot != "" && foundFrom != from && found != r { base.Errorf("conflicting replacements found for %v in workspace modules defined by %v and %v", mod, modFilePath(foundModRoot), modFilePath(modRoot)) @@ -376,21 +376,21 @@ func replacementFrom(mod module.Version) (r module.Version, modroot string, from return found, foundModRoot, modFilePath(foundModRoot) } -func replaceRelativeTo() string { - if workFilePath := WorkFilePath(); workFilePath != "" { +func replaceRelativeTo(loaderstate *State) string { + if workFilePath := WorkFilePath(loaderstate); workFilePath != "" { return filepath.Dir(workFilePath) } - return LoaderState.MainModules.ModRoot(LoaderState.MainModules.mustGetSingleMainModule()) + return loaderstate.MainModules.ModRoot(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) } // canonicalizeReplacePath ensures that relative, on-disk, replaced module paths // are relative to the workspace directory (in workspace mode) or to the module's // directory (in module mode, as they already are). -func canonicalizeReplacePath(r module.Version, modRoot string) module.Version { +func canonicalizeReplacePath(loaderstate *State, r module.Version, modRoot string) module.Version { if filepath.IsAbs(r.Path) || r.Version != "" || modRoot == "" { return r } - workFilePath := WorkFilePath() + workFilePath := WorkFilePath(loaderstate) if workFilePath == "" { return r } @@ -407,8 +407,8 @@ func canonicalizeReplacePath(r module.Version, modRoot string) module.Version { // for m: either m itself, or the replacement for m (iff m is replaced). // It also returns the modroot of the module providing the replacement if // one was found. -func resolveReplacement(m module.Version) module.Version { - if r := Replacement(m); r.Path != "" { +func resolveReplacement(loaderstate *State, m module.Version) module.Version { + if r := Replacement(loaderstate, m); r.Path != "" { return r } return m @@ -573,12 +573,12 @@ type retraction struct { // module versions. // // The caller must not modify the returned summary. -func goModSummary(m module.Version) (*modFileSummary, error) { - if m.Version == "" && !inWorkspaceMode() && LoaderState.MainModules.Contains(m.Path) { +func goModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) { + if m.Version == "" && !inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { panic("internal error: goModSummary called on a main module") } if gover.IsToolchain(m.Path) { - return rawGoModSummary(m) + return rawGoModSummary(loaderstate, m) } if cfg.BuildMod == "vendor" { @@ -586,7 +586,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) { module: module.Version{Path: m.Path}, } - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if vendorVersion[m.Path] != m.Version { // This module is not vendored, so packages cannot be loaded from it and // it cannot be relevant to the build. @@ -601,15 +601,15 @@ func goModSummary(m module.Version) (*modFileSummary, error) { return summary, nil } - actual := resolveReplacement(m) - if mustHaveSums() && actual.Version != "" { + actual := resolveReplacement(loaderstate, m) + if mustHaveSums(loaderstate) && actual.Version != "" { key := module.Version{Path: actual.Path, Version: actual.Version + "/go.mod"} if !modfetch.HaveSum(key) { suggestion := fmt.Sprintf(" for go.mod file; to add it:\n\tgo mod download %s", m.Path) return nil, module.VersionError(actual, &sumMissingError{suggestion: suggestion}) } } - summary, err := rawGoModSummary(actual) + summary, err := rawGoModSummary(loaderstate, actual) if err != nil { return nil, err } @@ -641,8 +641,8 @@ func goModSummary(m module.Version) (*modFileSummary, error) { } } - for _, mainModule := range LoaderState.MainModules.Versions() { - if index := LoaderState.MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { + for _, mainModule := range loaderstate.MainModules.Versions() { + if index := loaderstate.MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { // Drop any requirements on excluded versions. // Don't modify the cached summary though, since we might need the raw // summary separately. @@ -676,7 +676,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) { // rawGoModSummary cannot be used on the main module outside of workspace mode. // The modFileSummary can still be used for retractions and deprecations // even if a TooNewError is returned. -func rawGoModSummary(m module.Version) (*modFileSummary, error) { +func rawGoModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) { if gover.IsToolchain(m.Path) { if m.Path == "go" && gover.Compare(m.Version, gover.GoStrictVersion) >= 0 { // Declare that go 1.21.3 requires toolchain 1.21.3, @@ -686,7 +686,7 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { } return &modFileSummary{module: m}, nil } - if m.Version == "" && !inWorkspaceMode() && LoaderState.MainModules.Contains(m.Path) { + if m.Version == "" && !inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { // Calling rawGoModSummary implies that we are treating m as a module whose // requirements aren't the roots of the module graph and can't be modified. // @@ -694,22 +694,22 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { // are the roots of the module graph and we expect them to be kept consistent. panic("internal error: rawGoModSummary called on a main module") } - if m.Version == "" && inWorkspaceMode() && m.Path == "command-line-arguments" { + if m.Version == "" && inWorkspaceMode(loaderstate) && m.Path == "command-line-arguments" { // "go work sync" calls LoadModGraph to make sure the module graph is valid. // If there are no modules in the workspace, we synthesize an empty // command-line-arguments module, which rawGoModData cannot read a go.mod for. return &modFileSummary{module: m}, nil - } else if m.Version == "" && inWorkspaceMode() && LoaderState.MainModules.Contains(m.Path) { + } else if m.Version == "" && inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { // When go get uses EnterWorkspace to check that the workspace loads properly, // it will update the contents of the workspace module's modfile in memory. To use the updated // contents of the modfile when doing the load, don't read from disk and instead // recompute a summary using the updated contents of the modfile. - if mf := LoaderState.MainModules.ModFile(m); mf != nil { - return summaryFromModFile(m, LoaderState.MainModules.modFiles[m]) + if mf := loaderstate.MainModules.ModFile(m); mf != nil { + return summaryFromModFile(m, loaderstate.MainModules.modFiles[m]) } } return rawGoModSummaryCache.Do(m, func() (*modFileSummary, error) { - name, data, err := rawGoModData(m) + name, data, err := rawGoModData(loaderstate, m) if err != nil { return nil, err } @@ -781,15 +781,15 @@ var rawGoModSummaryCache par.ErrCache[module.Version, *modFileSummary] // // Unlike rawGoModSummary, rawGoModData does not cache its results in memory. // Use rawGoModSummary instead unless you specifically need these bytes. -func rawGoModData(m module.Version) (name string, data []byte, err error) { +func rawGoModData(loaderstate *State, m module.Version) (name string, data []byte, err error) { if m.Version == "" { dir := m.Path if !filepath.IsAbs(dir) { - if inWorkspaceMode() && LoaderState.MainModules.Contains(m.Path) { - dir = LoaderState.MainModules.ModRoot(m) + if inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { + dir = loaderstate.MainModules.ModRoot(m) } else { // m is a replacement module with only a file path. - dir = filepath.Join(replaceRelativeTo(), dir) + dir = filepath.Join(replaceRelativeTo(loaderstate), dir) } } name = filepath.Join(dir, "go.mod") @@ -825,12 +825,12 @@ func rawGoModData(m module.Version) (name string, data []byte, err error) { // // If the queried latest version is replaced, // queryLatestVersionIgnoringRetractions returns the replacement. -func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (latest module.Version, err error) { +func queryLatestVersionIgnoringRetractions(loaderstate *State, ctx context.Context, path string) (latest module.Version, err error) { return latestVersionIgnoringRetractionsCache.Do(path, func() (module.Version, error) { ctx, span := trace.StartSpan(ctx, "queryLatestVersionIgnoringRetractions "+path) defer span.Done() - if repl := Replacement(module.Version{Path: path}); repl.Path != "" { + if repl := Replacement(loaderstate, module.Version{Path: path}); repl.Path != "" { // All versions of the module were replaced. // No need to query. return repl, nil @@ -840,12 +840,12 @@ func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (la // Ignore exclusions from the main module's go.mod. const ignoreSelected = "" var allowAll AllowedFunc - rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll) + rev, err := Query(loaderstate, ctx, path, "latest", ignoreSelected, allowAll) if err != nil { return module.Version{}, err } latest := module.Version{Path: path, Version: rev.Version} - if repl := resolveReplacement(latest); repl.Path != "" { + if repl := resolveReplacement(loaderstate, latest); repl.Path != "" { latest = repl } return latest, nil diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 97e6fe44dd7545..32afc866fbcc14 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -53,7 +53,7 @@ func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { return nil, nil } - summary, err := goModSummary(mod) + summary, err := goModSummary(LoaderState, mod) if err != nil { return nil, err } @@ -79,11 +79,11 @@ func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) { return m, nil } -func versions(ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) { +func versions(loaderstate *State, ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) { // Note: modfetch.Lookup and repo.Versions are cached, // so there's no need for us to add extra caching here. err = modfetch.TryProxies(func(proxy string) error { - repo, err := lookupRepo(ctx, proxy, path) + repo, err := lookupRepo(loaderstate, ctx, proxy, path) if err != nil { return err } @@ -111,12 +111,12 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) (versions [ // // Since the version of a main module is not found in the version list, // it has no previous version. -func previousVersion(ctx context.Context, m module.Version) (module.Version, error) { - if m.Version == "" && LoaderState.MainModules.Contains(m.Path) { +func previousVersion(loaderstate *State, ctx context.Context, m module.Version) (module.Version, error) { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { return module.Version{Path: m.Path, Version: "none"}, nil } - list, _, err := versions(ctx, m.Path, CheckAllowed) + list, _, err := versions(loaderstate, ctx, m.Path, CheckAllowed) if err != nil { if errors.Is(err, os.ErrNotExist) { return module.Version{Path: m.Path, Version: "none"}, nil @@ -132,5 +132,5 @@ func previousVersion(ctx context.Context, m module.Version) (module.Version, err func (*mvsReqs) Previous(m module.Version) (module.Version, error) { // TODO(golang.org/issue/38714): thread tracing context through MVS. - return previousVersion(context.TODO(), m) + return previousVersion(LoaderState, context.TODO(), m) } diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index b37a244fbbacdd..17a0aef21ab2e9 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -80,19 +80,19 @@ import ( // // Query often returns a non-nil *RevInfo with a non-nil error, // to provide an info.Origin that can allow the error to be cached. -func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { +func Query(loaderstate *State, ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { ctx, span := trace.StartSpan(ctx, "modload.Query "+path) defer span.Done() - return queryReuse(ctx, path, query, current, allowed, nil) + return queryReuse(loaderstate, ctx, path, query, current, allowed, nil) } // queryReuse is like Query but also takes a map of module info that can be reused // if the validation criteria in Origin are met. -func queryReuse(ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { +func queryReuse(loaderstate *State, ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { var info *modfetch.RevInfo err := modfetch.TryProxies(func(proxy string) (err error) { - info, err = queryProxy(ctx, proxy, path, query, current, allowed, reuse) + info, err = queryProxy(loaderstate, ctx, proxy, path, query, current, allowed, reuse) return err }) return info, err @@ -100,9 +100,9 @@ func queryReuse(ctx context.Context, path, query, current string, allowed Allowe // checkReuse checks whether a revision of a given module // for a given module may be reused, according to the information in origin. -func checkReuse(ctx context.Context, m module.Version, old *codehost.Origin) error { +func checkReuse(loaderstate *State, ctx context.Context, m module.Version, old *codehost.Origin) error { return modfetch.TryProxies(func(proxy string) error { - repo, err := lookupRepo(ctx, proxy, m.Path) + repo, err := lookupRepo(loaderstate, ctx, proxy, m.Path) if err != nil { return err } @@ -197,7 +197,7 @@ func (queryDisabledError) Error() string { return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) } -func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { +func queryProxy(loaderstate *State, ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query) defer span.Done() @@ -211,7 +211,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed allowed = func(context.Context, module.Version) error { return nil } } - if LoaderState.MainModules.Contains(path) && (query == "upgrade" || query == "patch") { + if loaderstate.MainModules.Contains(path) && (query == "upgrade" || query == "patch") { m := module.Version{Path: path} if err := allowed(ctx, m); err != nil { return nil, fmt.Errorf("internal error: main module version is not allowed: %w", err) @@ -223,7 +223,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return nil, fmt.Errorf("can't query specific version (%q) of standard-library module %q", query, path) } - repo, err := lookupRepo(ctx, proxy, path) + repo, err := lookupRepo(loaderstate, ctx, proxy, path) if err != nil { return nil, err } @@ -296,7 +296,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return &clone } - releases, prereleases, err := qm.filterVersions(ctx, versions.List) + releases, prereleases, err := qm.filterVersions(loaderstate, ctx, versions.List) if err != nil { return revWithOrigin(nil), err } @@ -569,7 +569,7 @@ func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool { // // If the allowed predicate returns an error not equivalent to ErrDisallowed, // filterVersions returns that error. -func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) (releases, prereleases []string, err error) { +func (qm *queryMatcher) filterVersions(loaderstate *State, ctx context.Context, versions []string) (releases, prereleases []string, err error) { needIncompatible := qm.preferIncompatible var lastCompatible string @@ -602,7 +602,7 @@ func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) ( // ignore any version with a higher (+incompatible) major version. (See // https://golang.org/issue/34165.) Note that we even prefer a // compatible pre-release over an incompatible release. - ok, err := versionHasGoMod(ctx, module.Version{Path: qm.path, Version: lastCompatible}) + ok, err := versionHasGoMod(loaderstate, ctx, module.Version{Path: qm.path, Version: lastCompatible}) if err != nil { return nil, nil, err } @@ -639,11 +639,11 @@ type QueryResult struct { // QueryPackages is like QueryPattern, but requires that the pattern match at // least one package and omits the non-package result (if any). -func QueryPackages(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) { - pkgMods, modOnly, err := QueryPattern(ctx, pattern, query, current, allowed) +func QueryPackages(loaderstate *State, ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) { + pkgMods, modOnly, err := QueryPattern(loaderstate, ctx, pattern, query, current, allowed) if len(pkgMods) == 0 && err == nil { - replacement := Replacement(modOnly.Mod) + replacement := Replacement(loaderstate, modOnly.Mod) return nil, &PackageNotInModuleError{ Mod: modOnly.Mod, Replacement: replacement, @@ -670,7 +670,7 @@ func QueryPackages(ctx context.Context, pattern, query string, current func(stri // // QueryPattern always returns at least one QueryResult (which may be only // modOnly) or a non-nil error. -func QueryPattern(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) { +func QueryPattern(loaderstate *State, ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) { ctx, span := trace.StartSpan(ctx, "modload.QueryPattern "+pattern+" "+query) defer span.Done() @@ -693,15 +693,15 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin } match = func(mod module.Version, roots []string, isLocal bool) *search.Match { m := search.NewMatch(pattern) - matchPackages(ctx, m, imports.AnyTags(), omitStd, []module.Version{mod}) + matchPackages(loaderstate, ctx, m, imports.AnyTags(), omitStd, []module.Version{mod}) return m } } else { match = func(mod module.Version, roots []string, isLocal bool) *search.Match { m := search.NewMatch(pattern) prefix := mod.Path - if LoaderState.MainModules.Contains(mod.Path) { - prefix = LoaderState.MainModules.PathPrefix(module.Version{Path: mod.Path}) + if loaderstate.MainModules.Contains(mod.Path) { + prefix = loaderstate.MainModules.PathPrefix(module.Version{Path: mod.Path}) } for _, root := range roots { if _, ok, err := dirInModule(pattern, prefix, root, isLocal); err != nil { @@ -715,8 +715,8 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin } var mainModuleMatches []module.Version - for _, mainModule := range LoaderState.MainModules.Versions() { - m := match(mainModule, LoaderState.modRoots, true) + for _, mainModule := range loaderstate.MainModules.Versions() { + m := match(mainModule, loaderstate.modRoots, true) if len(m.Pkgs) > 0 { if query != "upgrade" && query != "patch" { return nil, nil, &QueryMatchesPackagesInMainModuleError{ @@ -756,7 +756,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin var ( results []QueryResult - candidateModules = modulePrefixesExcludingTarget(base) + candidateModules = modulePrefixesExcludingTarget(loaderstate, base) ) if len(candidateModules) == 0 { if modOnly != nil { @@ -783,7 +783,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin pathCurrent := current(path) r.Mod.Path = path - r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed, nil) + r.Rev, err = queryProxy(loaderstate, ctx, proxy, path, query, pathCurrent, allowed, nil) if err != nil { return r, err } @@ -791,7 +791,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin if gover.IsToolchain(r.Mod.Path) { return r, nil } - root, isLocal, err := fetch(ctx, r.Mod) + root, isLocal, err := fetch(loaderstate, ctx, r.Mod) if err != nil { return r, err } @@ -801,7 +801,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin if err := firstError(m); err != nil { return r, err } - replacement := Replacement(r.Mod) + replacement := Replacement(loaderstate, r.Mod) return r, &PackageNotInModuleError{ Mod: r.Mod, Replacement: replacement, @@ -812,7 +812,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin return r, nil } - allResults, err := queryPrefixModules(ctx, candidateModules, queryModule) + allResults, err := queryPrefixModules(loaderstate, ctx, candidateModules, queryModule) results = allResults[:0] for _, r := range allResults { if len(r.Packages) == 0 { @@ -838,11 +838,11 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin // itself, sorted by descending length. Prefixes that are not valid module paths // but are valid package paths (like "m" or "example.com/.gen") are included, // since they might be replaced. -func modulePrefixesExcludingTarget(path string) []string { +func modulePrefixesExcludingTarget(loaderstate *State, path string) []string { prefixes := make([]string, 0, strings.Count(path, "/")+1) mainModulePrefixes := make(map[string]bool) - for _, m := range LoaderState.MainModules.Versions() { + for _, m := range loaderstate.MainModules.Versions() { mainModulePrefixes[m.Path] = true } @@ -863,7 +863,7 @@ func modulePrefixesExcludingTarget(path string) []string { return prefixes } -func queryPrefixModules(ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) { +func queryPrefixModules(loaderstate *State, ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) { ctx, span := trace.StartSpan(ctx, "modload.queryPrefixModules") defer span.Done() @@ -905,7 +905,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod case *PackageNotInModuleError: // Given the option, prefer to attribute “package not in module” // to modules other than the main one. - if noPackage == nil || LoaderState.MainModules.Contains(noPackage.Mod.Path) { + if noPackage == nil || loaderstate.MainModules.Contains(noPackage.Mod.Path) { noPackage = rErr } case *NoMatchingVersionError: @@ -1096,8 +1096,8 @@ func (e *PackageNotInModuleError) ImportPath() string { // go.mod with different content. Second, if we don't fetch the .zip, then // we don't need to verify it in go.sum. This makes 'go list -m -u' faster // and simpler. -func versionHasGoMod(_ context.Context, m module.Version) (bool, error) { - _, data, err := rawGoModData(m) +func versionHasGoMod(loaderstate *State, _ context.Context, m module.Version) (bool, error) { + _, data, err := rawGoModData(loaderstate, m) if err != nil { return false, err } @@ -1117,7 +1117,7 @@ type versionRepo interface { var _ versionRepo = modfetch.Repo(nil) -func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err error) { +func lookupRepo(loaderstate *State, ctx context.Context, proxy, path string) (repo versionRepo, err error) { if path != "go" && path != "toolchain" { err = module.CheckPath(path) } @@ -1127,9 +1127,9 @@ func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err repo = emptyRepo{path: path, err: err} } - if LoaderState.MainModules == nil { + if loaderstate.MainModules == nil { return repo, err - } else if _, ok := LoaderState.MainModules.HighestReplaced()[path]; ok { + } else if _, ok := loaderstate.MainModules.HighestReplaced()[path]; ok { return &replacementRepo{repo: repo}, nil } @@ -1239,7 +1239,7 @@ func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevI } } - if r := Replacement(module.Version{Path: path, Version: v}); r.Path == "" { + if r := Replacement(LoaderState, module.Version{Path: path, Version: v}); r.Path == "" { return info, err } return rr.replacementStat(v) diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go index 93f8f0d00d1c8d..b4487eebb0d9e9 100644 --- a/src/cmd/go/internal/modload/query_test.go +++ b/src/cmd/go/internal/modload/query_test.go @@ -182,7 +182,7 @@ func TestQuery(t *testing.T) { t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+tt.current+"/"+allow, func(t *testing.T) { t.Parallel() - info, err := Query(ctx, tt.path, tt.query, tt.current, allowed) + info, err := Query(LoaderState, ctx, tt.path, tt.query, tt.current, allowed) if tt.err != "" { if err == nil { t.Errorf("Query(_, %q, %q, %q, %v) = %v, want error %q", tt.path, tt.query, tt.current, allow, info.Version, tt.err) diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go index 205db3e8f7c8db..9951e68ee8e6c9 100644 --- a/src/cmd/go/internal/modload/search.go +++ b/src/cmd/go/internal/modload/search.go @@ -41,7 +41,7 @@ const ( // matchPackages is like m.MatchPackages, but uses a local variable (rather than // a global) for tags, can include or exclude packages in the standard library, // and is restricted to the given list of modules. -func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) { +func matchPackages(loaderstate *State, ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) { ctx, span := trace.StartSpan(ctx, "modload.matchPackages") defer span.Done() @@ -74,7 +74,7 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f ) q := par.NewQueue(runtime.GOMAXPROCS(0)) - ignorePatternsMap := parseIgnorePatterns(ctx, treeCanMatch, modules) + ignorePatternsMap := parseIgnorePatterns(loaderstate, ctx, treeCanMatch, modules) walkPkgs := func(root, importPathRoot string, prune pruning) { _, span := trace.StartSpan(ctx, "walkPkgs "+root) defer span.Done() @@ -171,13 +171,13 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f } if cfg.BuildMod == "vendor" { - for _, mod := range LoaderState.MainModules.Versions() { - if modRoot := LoaderState.MainModules.ModRoot(mod); modRoot != "" { - walkPkgs(modRoot, LoaderState.MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) + for _, mod := range loaderstate.MainModules.Versions() { + if modRoot := loaderstate.MainModules.ModRoot(mod); modRoot != "" { + walkPkgs(modRoot, loaderstate.MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) } } - if HasModRoot() { - walkPkgs(VendorDir(), "", pruneVendor) + if HasModRoot(loaderstate) { + walkPkgs(VendorDir(loaderstate), "", pruneVendor) } return } @@ -191,16 +191,16 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f root, modPrefix string isLocal bool ) - if LoaderState.MainModules.Contains(mod.Path) { - if LoaderState.MainModules.ModRoot(mod) == "" { + if loaderstate.MainModules.Contains(mod.Path) { + if loaderstate.MainModules.ModRoot(mod) == "" { continue // If there is no main module, we can't search in it. } - root = LoaderState.MainModules.ModRoot(mod) - modPrefix = LoaderState.MainModules.PathPrefix(mod) + root = loaderstate.MainModules.ModRoot(mod) + modPrefix = loaderstate.MainModules.PathPrefix(mod) isLocal = true } else { var err error - root, isLocal, err = fetch(ctx, mod) + root, isLocal, err = fetch(loaderstate, ctx, mod) if err != nil { m.AddError(err) continue @@ -286,17 +286,17 @@ func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeC func MatchInModule(ctx context.Context, pattern string, m module.Version, tags map[string]bool) *search.Match { match := search.NewMatch(pattern) if m == (module.Version{}) { - matchPackages(ctx, match, tags, includeStd, nil) + matchPackages(LoaderState, ctx, match, tags, includeStd, nil) } - LoadModFile(ctx) // Sets Target, needed by fetch and matchPackages. + LoadModFile(LoaderState, ctx) // Sets Target, needed by fetch and matchPackages. if !match.IsLiteral() { - matchPackages(ctx, match, tags, omitStd, []module.Version{m}) + matchPackages(LoaderState, ctx, match, tags, omitStd, []module.Version{m}) return match } - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(LoaderState, ctx, m) if err != nil { match.Errs = []error{err} return match @@ -322,7 +322,7 @@ func MatchInModule(ctx context.Context, pattern string, m module.Version, tags m // parseIgnorePatterns collects all ignore patterns associated with the // provided list of modules. // It returns a map of module root -> *search.IgnorePatterns. -func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, modules []module.Version) map[string]*search.IgnorePatterns { +func parseIgnorePatterns(loaderstate *State, ctx context.Context, treeCanMatch func(string) bool, modules []module.Version) map[string]*search.IgnorePatterns { ignorePatternsMap := make(map[string]*search.IgnorePatterns) for _, mod := range modules { if gover.IsToolchain(mod.Path) || !treeCanMatch(mod.Path) { @@ -330,12 +330,12 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo } var modRoot string var ignorePatterns []string - if LoaderState.MainModules.Contains(mod.Path) { - modRoot = LoaderState.MainModules.ModRoot(mod) + if loaderstate.MainModules.Contains(mod.Path) { + modRoot = loaderstate.MainModules.ModRoot(mod) if modRoot == "" { continue } - modIndex := LoaderState.MainModules.Index(mod) + modIndex := loaderstate.MainModules.Index(mod) if modIndex == nil { continue } @@ -344,11 +344,11 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo // Skip getting ignore patterns for vendored modules because they // do not have go.mod files. var err error - modRoot, _, err = fetch(ctx, mod) + modRoot, _, err = fetch(loaderstate, ctx, mod) if err != nil { continue } - summary, err := goModSummary(mod) + summary, err := goModSummary(loaderstate, mod) if err != nil { continue } diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go index d3f055acf64f50..bf0c4e403717a6 100644 --- a/src/cmd/go/internal/modload/vendor.go +++ b/src/cmd/go/internal/modload/vendor.go @@ -140,10 +140,10 @@ func readVendorList(vendorDir string) { // checkVendorConsistency verifies that the vendor/modules.txt file matches (if // go 1.14) or at least does not contradict (go 1.13 or earlier) the // requirements and replacements listed in the main module's go.mod file. -func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) { +func checkVendorConsistency(loaderstate *State, indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) { // readVendorList only needs the main module to get the directory // the vendor directory is in. - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if len(modFiles) < 1 { // We should never get here if there are zero modfiles. Either @@ -154,7 +154,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } pre114 := false - if !inWorkspaceMode() { // workspace mode was added after Go 1.14 + if !inWorkspaceMode(loaderstate) { // workspace mode was added after Go 1.14 if len(indexes) != 1 { panic(fmt.Errorf("not in workspace mode but number of indexes is %v, not 1", len(indexes))) } @@ -215,8 +215,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m continue // Don't print the same error more than once } seenrep[r.Old] = true - rNew, modRoot, replacementSource := replacementFrom(r.Old) - rNewCanonical := canonicalizeReplacePath(rNew, modRoot) + rNew, modRoot, replacementSource := replacementFrom(loaderstate, r.Old) + rNewCanonical := canonicalizeReplacePath(loaderstate, rNew, modRoot) vr := vendorMeta[r.Old].Replacement if vr == (module.Version{}) { if rNewCanonical == (module.Version{}) { @@ -236,8 +236,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m for _, modFile := range modFiles { checkReplace(modFile.Replace) } - if LoaderState.MainModules.workFile != nil { - checkReplace(LoaderState.MainModules.workFile.Replace) + if loaderstate.MainModules.workFile != nil { + checkReplace(loaderstate.MainModules.workFile.Replace) } for _, mod := range vendorList { @@ -252,7 +252,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } if !foundRequire { article := "" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { article = "a " } vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in %vgo.mod", article) @@ -262,9 +262,9 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } for _, mod := range vendorReplaced { - r := Replacement(mod) + r := Replacement(loaderstate, mod) replacementSource := "go.mod" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { replacementSource = "the workspace" } if r == (module.Version{}) { @@ -276,9 +276,9 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m if vendErrors.Len() > 0 { subcmd := "mod" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { subcmd = "work" } - base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir()), vendErrors, subcmd) + base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir(loaderstate)), vendErrors, subcmd) } } diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index d922dcdd66a551..b6d76514b032d8 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -78,13 +78,13 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { // for -race and -msan. modload.LoaderState.ForceUseModules = true modload.LoaderState.RootMode = modload.NoRoot - modload.AllowMissingModuleImports() - modload.Init() + modload.AllowMissingModuleImports(modload.LoaderState) + modload.Init(modload.LoaderState) } else { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) } - work.BuildInit() + work.BuildInit(modload.LoaderState) b := work.NewBuilder("") defer func() { if err := b.Close(); err != nil { @@ -107,18 +107,18 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: cannot run *_test.go files (%s)", file) } } - p = load.GoFilesPackage(ctx, pkgOpts, files) + p = load.GoFilesPackage(modload.LoaderState, ctx, pkgOpts, files) } else if len(args) > 0 && !strings.HasPrefix(args[0], "-") { arg := args[0] var pkgs []*load.Package if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) { var err error - pkgs, err = load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args[:1]) + pkgs, err = load.PackagesAndErrorsOutsideModule(modload.LoaderState, ctx, pkgOpts, args[:1]) if err != nil { base.Fatal(err) } } else { - pkgs = load.PackagesAndErrors(ctx, pkgOpts, args[:1]) + pkgs = load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args[:1]) } if len(pkgs) == 0 { @@ -140,7 +140,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { load.CheckPackageErrors([]*load.Package{p}) if cfg.BuildCover { - load.PrepareForCoverageBuild([]*load.Package{p}) + load.PrepareForCoverageBuild(modload.LoaderState, []*load.Package{p}) } p.Internal.OmitDebug = true @@ -166,7 +166,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { p.Internal.ExeName = p.DefaultExecName() } - a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) + a1 := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, p) a1.CacheExecutable = true a := &work.Action{Mode: "go run", Actor: work.ActorFunc(buildRunProgram), Args: cmdArgs, Deps: []*work.Action{a1}} b.Do(ctx, a) diff --git a/src/cmd/go/internal/telemetrystats/telemetrystats.go b/src/cmd/go/internal/telemetrystats/telemetrystats.go index d5b642240f16b7..9586324551dfc9 100644 --- a/src/cmd/go/internal/telemetrystats/telemetrystats.go +++ b/src/cmd/go/internal/telemetrystats/telemetrystats.go @@ -24,7 +24,7 @@ func Increment() { func incrementConfig() { if !modload.WillBeEnabled() { counter.Inc("go/mode:gopath") - } else if workfile := modload.FindGoWork(base.Cwd()); workfile != "" { + } else if workfile := modload.FindGoWork(modload.LoaderState, base.Cwd()); workfile != "" { counter.Inc("go/mode:workspace") } else { counter.Inc("go/mode:module") diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index e225929add20c2..5e5d79a39f001f 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -683,7 +683,7 @@ var defaultVetFlags = []string{ func runTest(ctx context.Context, cmd *base.Command, args []string) { pkgArgs, testArgs = testFlags(args) - modload.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that. + modload.InitWorkfile(modload.LoaderState) // The test command does custom flag processing; initialize workspaces after that. if cfg.DebugTrace != "" { var close func() error @@ -704,13 +704,13 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { work.FindExecCmd() // initialize cached result - work.BuildInit() + work.BuildInit(modload.LoaderState) work.VetFlags = testVet.flags work.VetExplicit = testVet.explicit work.VetTool = base.Tool("vet") pkgOpts := load.PackageOpts{ModResolveTests: true} - pkgs = load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) + pkgs = load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, pkgArgs) // We *don't* call load.CheckPackageErrors here because we want to report // loading errors as per-package test setup errors later. if len(pkgs) == 0 { @@ -741,7 +741,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { if !mainMods.Contains(m.Path) { base.Fatalf("cannot use -fuzz flag on package outside the main module") } - } else if pkgs[0].Standard && modload.Enabled() { + } else if pkgs[0].Standard && modload.Enabled(modload.LoaderState) { // Because packages in 'std' and 'cmd' are part of the standard library, // they are only treated as part of a module in 'go mod' subcommands and // 'go get'. However, we still don't want to accidentally corrupt their @@ -867,13 +867,13 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { if cfg.BuildCoverPkg != nil { match := make([]func(*load.Package) bool, len(cfg.BuildCoverPkg)) for i := range cfg.BuildCoverPkg { - match[i] = load.MatchPackage(cfg.BuildCoverPkg[i], base.Cwd()) + match[i] = load.MatchPackage(modload.LoaderState, cfg.BuildCoverPkg[i], base.Cwd()) } // Select for coverage all dependencies matching the -coverpkg // patterns. plist := load.TestPackageList(ctx, pkgOpts, pkgs) - testCoverPkgs = load.SelectCoverPackages(plist, match, "test") + testCoverPkgs = load.SelectCoverPackages(modload.LoaderState, plist, match, "test") if len(testCoverPkgs) > 0 { // create a new singleton action that will collect up the // meta-data files from all of the packages mentioned in @@ -981,7 +981,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { // happens we'll wind up building the Q compile action // before updating its deps to include sync/atomic). if cfg.BuildCoverMode == "atomic" && p.ImportPath != "sync/atomic" { - load.EnsureImport(p, "sync/atomic") + load.EnsureImport(modload.LoaderState, p, "sync/atomic") } // Tag the package for static meta-data generation if no // test files (this works only with the new coverage @@ -1221,7 +1221,7 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, } } - a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain) + a := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, pmain) a.Target = testDir + testBinary + cfg.ExeSuffix if cfg.Goos == "windows" { // There are many reserved words on Windows that, diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go index e25c06a8f046bd..e95b07d8c813aa 100644 --- a/src/cmd/go/internal/tool/tool.go +++ b/src/cmd/go/internal/tool/tool.go @@ -161,8 +161,8 @@ func listTools(ctx context.Context) { fmt.Println(name) } - modload.InitWorkfile() - modload.LoadModFile(ctx) + modload.InitWorkfile(modload.LoaderState) + modload.LoadModFile(modload.LoaderState, ctx) modTools := slices.Sorted(maps.Keys(modload.LoaderState.MainModules.Tools())) for _, tool := range modTools { fmt.Println(tool) @@ -252,8 +252,8 @@ func loadBuiltinTool(toolName string) string { } func loadModTool(ctx context.Context, name string) string { - modload.InitWorkfile() - modload.LoadModFile(ctx) + modload.InitWorkfile(modload.LoaderState) + modload.LoadModFile(modload.LoaderState, ctx) matches := []string{} for tool := range modload.LoaderState.MainModules.Tools() { @@ -336,7 +336,7 @@ func buildAndRunModtool(ctx context.Context, toolName, tool string, args []strin } func buildAndRunTool(ctx context.Context, tool string, args []string, runTool work.ActorFunc) { - work.BuildInit() + work.BuildInit(modload.LoaderState) b := work.NewBuilder("") defer func() { if err := b.Close(); err != nil { @@ -345,11 +345,11 @@ func buildAndRunTool(ctx context.Context, tool string, args []string, runTool wo }() pkgOpts := load.PackageOpts{MainOnly: true} - p := load.PackagesAndErrors(ctx, pkgOpts, []string{tool})[0] + p := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, []string{tool})[0] p.Internal.OmitDebug = true p.Internal.ExeName = p.DefaultExecName() - a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) + a1 := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, p) a1.CacheExecutable = true a := &work.Action{Mode: "go tool", Actor: runTool, Args: args, Deps: []*work.Action{a1}} b.Do(ctx, a) diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go index 4f46b19c12167b..d54277ed1b4bea 100644 --- a/src/cmd/go/internal/toolchain/select.go +++ b/src/cmd/go/internal/toolchain/select.go @@ -355,7 +355,7 @@ func Exec(gotoolchain string) { modload.Reset() modload.LoaderState.ForceUseModules = true modload.LoaderState.RootMode = modload.NoRoot - modload.Init() + modload.Init(modload.LoaderState) // Download and unpack toolchain module into module cache. // Note that multiple go commands might be doing this at the same time, @@ -529,7 +529,7 @@ func raceSafeCopy(old, new string) error { // The toolchain line overrides the version line func modGoToolchain() (file, goVers, toolchain string) { wd := base.UncachedCwd() - file = modload.FindGoWork(wd) + file = modload.FindGoWork(modload.LoaderState, wd) // $GOWORK can be set to a file that does not yet exist, if we are running 'go work init'. // Do not try to load the file in that case if _, err := os.Stat(file); err != nil { @@ -694,7 +694,7 @@ func maybeSwitchForGoInstallVersion(minVers string) { // Set up modules without an explicit go.mod, to download go.mod. modload.LoaderState.ForceUseModules = true modload.LoaderState.RootMode = modload.NoRoot - modload.Init() + modload.Init(modload.LoaderState) defer modload.Reset() // See internal/load.PackagesAndErrorsOutsideModule @@ -705,7 +705,7 @@ func maybeSwitchForGoInstallVersion(minVers string) { allowed = nil } noneSelected := func(path string) (version string) { return "none" } - _, err := modload.QueryPackages(ctx, path, version, noneSelected, allowed) + _, err := modload.QueryPackages(modload.LoaderState, ctx, path, version, noneSelected, allowed) if errors.Is(err, gover.ErrTooNew) { // Run early switch, same one go install or go run would eventually do, // if it understood all the command-line flags. diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go index a2625b21188cf4..e274348bd6a38e 100644 --- a/src/cmd/go/internal/vet/vet.go +++ b/src/cmd/go/internal/vet/vet.go @@ -123,7 +123,7 @@ func run(ctx context.Context, cmd *base.Command, args []string) { // The vet/fix commands do custom flag processing; // initialize workspaces after that. - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if cfg.DebugTrace != "" { var close func() error @@ -142,7 +142,7 @@ func run(ctx context.Context, cmd *base.Command, args []string) { ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command")) defer span.Done() - work.BuildInit() + work.BuildInit(modload.LoaderState) // Flag theory: // @@ -217,7 +217,7 @@ func run(ctx context.Context, cmd *base.Command, args []string) { work.VetFlags = toolFlags pkgOpts := load.PackageOpts{ModResolveTests: true} - pkgs := load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) + pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, pkgArgs) load.CheckPackageErrors(pkgs) if len(pkgs) == 0 { base.Fatalf("no packages to %s", cmd.Name()) diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go index 3636f642e26401..f1f3bcea38d9d5 100644 --- a/src/cmd/go/internal/work/action.go +++ b/src/cmd/go/internal/work/action.go @@ -28,6 +28,7 @@ import ( "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/load" + "cmd/go/internal/modload" "cmd/go/internal/str" "cmd/go/internal/trace" "cmd/internal/buildid" @@ -392,7 +393,7 @@ func (b *Builder) NewObjdir() string { // at shlibpath. For the native toolchain this list is stored, newline separated, in // an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the // .go_export section. -func readpkglist(shlibpath string) (pkgs []*load.Package) { +func readpkglist(loaderstate *modload.State, shlibpath string) (pkgs []*load.Package) { var stk load.ImportStack if cfg.BuildToolchainName == "gccgo" { f, err := elf.Open(shlibpath) @@ -412,7 +413,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) { for _, line := range bytes.Split(data, []byte{'\n'}) { if path, found := bytes.CutPrefix(line, pkgpath); found { path = bytes.TrimSuffix(path, []byte{';'}) - pkgs = append(pkgs, load.LoadPackageWithFlags(string(path), base.Cwd(), &stk, nil, 0)) + pkgs = append(pkgs, load.LoadPackageWithFlags(loaderstate, string(path), base.Cwd(), &stk, nil, 0)) } } } else { @@ -423,7 +424,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) { scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes)) for scanner.Scan() { t := scanner.Text() - pkgs = append(pkgs, load.LoadPackageWithFlags(t, base.Cwd(), &stk, nil, 0)) + pkgs = append(pkgs, load.LoadPackageWithFlags(loaderstate, t, base.Cwd(), &stk, nil, 0)) } } return @@ -445,7 +446,7 @@ func (b *Builder) cacheAction(mode string, p *load.Package, f func() *Action) *A // AutoAction returns the "right" action for go build or go install of p. func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action { if p.Name == "main" { - return b.LinkAction(mode, depMode, p) + return b.LinkAction(modload.LoaderState, mode, depMode, p) } return b.CompileAction(mode, depMode, p) } @@ -913,7 +914,7 @@ func (b *Builder) vetAction(mode, depMode BuildMode, p *load.Package) *Action { // LinkAction returns the action for linking p into an executable // and possibly installing the result (according to mode). // depMode is the action (build or install) to use when compiling dependencies. -func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action { +func (b *Builder) LinkAction(loaderstate *modload.State, mode, depMode BuildMode, p *load.Package) *Action { // Construct link action. a := b.cacheAction("link", p, func() *Action { a := &Action{ @@ -948,7 +949,7 @@ func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action { } a.Target = a.Objdir + filepath.Join("exe", name) + cfg.ExeSuffix a.built = a.Target - b.addTransitiveLinkDeps(a, a1, "") + b.addTransitiveLinkDeps(loaderstate, a, a1, "") // Sequence the build of the main package (a1) strictly after the build // of all other dependencies that go into the link. It is likely to be after @@ -1034,7 +1035,7 @@ func (b *Builder) installAction(a1 *Action, mode BuildMode) *Action { // makes sure those are present in a.Deps. // If shlib is non-empty, then a corresponds to the build and installation of shlib, // so any rebuild of shlib should not be added as a dependency. -func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) { +func (b *Builder) addTransitiveLinkDeps(loaderstate *modload.State, a, a1 *Action, shlib string) { // Expand Deps to include all built packages, for the linker. // Use breadth-first search to find rebuilt-for-test packages // before the standard ones. @@ -1075,7 +1076,7 @@ func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) { // we'll end up building an overall library or executable that depends at runtime // on other libraries that are out-of-date, which is clearly not good either. // We call it ModeBuggyInstall to make clear that this is not right. - a.Deps = append(a.Deps, b.linkSharedAction(ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil)) + a.Deps = append(a.Deps, b.linkSharedAction(loaderstate, ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil)) } } } @@ -1116,21 +1117,21 @@ func (b *Builder) buildmodeShared(mode, depMode BuildMode, args []string, pkgs [ if err != nil { base.Fatalf("%v", err) } - return b.linkSharedAction(mode, depMode, name, a1) + return b.linkSharedAction(modload.LoaderState, mode, depMode, name, a1) } // linkSharedAction takes a grouping action a1 corresponding to a list of built packages // and returns an action that links them together into a shared library with the name shlib. // If a1 is nil, shlib should be an absolute path to an existing shared library, // and then linkSharedAction reads that library to find out the package list. -func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Action) *Action { +func (b *Builder) linkSharedAction(loaderstate *modload.State, mode, depMode BuildMode, shlib string, a1 *Action) *Action { fullShlib := shlib shlib = filepath.Base(shlib) a := b.cacheAction("build-shlib "+shlib, nil, func() *Action { if a1 == nil { // TODO(rsc): Need to find some other place to store config, // not in pkg directory. See golang.org/issue/22196. - pkgs := readpkglist(fullShlib) + pkgs := readpkglist(loaderstate, fullShlib) a1 = &Action{ Mode: "shlib packages", } @@ -1173,7 +1174,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac } } var stk load.ImportStack - p := load.LoadPackageWithFlags(pkg, base.Cwd(), &stk, nil, 0) + p := load.LoadPackageWithFlags(loaderstate, pkg, base.Cwd(), &stk, nil, 0) if p.Error != nil { base.Fatalf("load %s: %v", pkg, p.Error) } @@ -1201,7 +1202,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac add(a, dep, true) } } - b.addTransitiveLinkDeps(a, a1, shlib) + b.addTransitiveLinkDeps(loaderstate, a, a1, shlib) return a }) diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index fdb483d46f21d9..45acbe85c2a6d0 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -459,8 +459,8 @@ func oneMainPkg(pkgs []*load.Package) []*load.Package { var pkgsFilter = func(pkgs []*load.Package) []*load.Package { return pkgs } func runBuild(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - BuildInit() + modload.InitWorkfile(modload.LoaderState) + BuildInit(modload.LoaderState) b := NewBuilder("") defer func() { if err := b.Close(); err != nil { @@ -468,7 +468,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) { } }() - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args) + pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{AutoVCS: true}, args) load.CheckPackageErrors(pkgs) explicitO := len(cfg.BuildO) > 0 @@ -503,7 +503,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) { } if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(modload.LoaderState, pkgs) } if cfg.BuildO != "" { @@ -694,10 +694,10 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { } } - modload.InitWorkfile() - BuildInit() - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args) - if cfg.ModulesEnabled && !modload.HasModRoot() { + modload.InitWorkfile(modload.LoaderState) + BuildInit(modload.LoaderState) + pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{AutoVCS: true}, args) + if cfg.ModulesEnabled && !modload.HasModRoot(modload.LoaderState) { haveErrors := false allMissingErrors := true for _, pkg := range pkgs { @@ -722,7 +722,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { load.CheckPackageErrors(pkgs) if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(modload.LoaderState, pkgs) } InstallPackages(ctx, args, pkgs) @@ -861,9 +861,9 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag func installOutsideModule(ctx context.Context, args []string) { modload.LoaderState.ForceUseModules = true modload.LoaderState.RootMode = modload.NoRoot - modload.AllowMissingModuleImports() - modload.Init() - BuildInit() + modload.AllowMissingModuleImports(modload.LoaderState) + modload.Init(modload.LoaderState) + BuildInit(modload.LoaderState) // Load packages. Ignore non-main packages. // Print a warning if an argument contains "..." and matches no main packages. @@ -872,7 +872,7 @@ func installOutsideModule(ctx context.Context, args []string) { // TODO(golang.org/issue/40276): don't report errors loading non-main packages // matched by a pattern. pkgOpts := load.PackageOpts{MainOnly: true} - pkgs, err := load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args) + pkgs, err := load.PackagesAndErrorsOutsideModule(modload.LoaderState, ctx, pkgOpts, args) if err != nil { base.Fatal(err) } diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index 703b4367db31ef..eb012d26109f57 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -2193,7 +2193,7 @@ func (noToolchain) cc(b *Builder, a *Action, ofile, cfile string) error { // gcc runs the gcc C compiler to create an object from a single C file. func (b *Builder) gcc(a *Action, workdir, out string, flags []string, cfile string) error { p := a.Package - return b.ccompile(a, out, flags, cfile, b.GccCmd(p.Dir, workdir)) + return b.ccompile(modload.LoaderState, a, out, flags, cfile, b.GccCmd(p.Dir, workdir)) } // gas runs the gcc c compiler to create an object file from a single C assembly file. @@ -2207,23 +2207,23 @@ func (b *Builder) gas(a *Action, workdir, out string, flags []string, sfile stri return fmt.Errorf("package using cgo has Go assembly file %s", sfile) } } - return b.ccompile(a, out, flags, sfile, b.GccCmd(p.Dir, workdir)) + return b.ccompile(modload.LoaderState, a, out, flags, sfile, b.GccCmd(p.Dir, workdir)) } // gxx runs the g++ C++ compiler to create an object from a single C++ file. func (b *Builder) gxx(a *Action, workdir, out string, flags []string, cxxfile string) error { p := a.Package - return b.ccompile(a, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir)) + return b.ccompile(modload.LoaderState, a, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir)) } // gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file. func (b *Builder) gfortran(a *Action, workdir, out string, flags []string, ffile string) error { p := a.Package - return b.ccompile(a, out, flags, ffile, b.gfortranCmd(p.Dir, workdir)) + return b.ccompile(modload.LoaderState, a, out, flags, ffile, b.gfortranCmd(p.Dir, workdir)) } // ccompile runs the given C or C++ compiler and creates an object from a single source file. -func (b *Builder) ccompile(a *Action, outfile string, flags []string, file string, compiler []string) error { +func (b *Builder) ccompile(loaderstate *modload.State, a *Action, outfile string, flags []string, file string, compiler []string) error { p := a.Package sh := b.Shell(a) file = mkAbs(p.Dir, file) @@ -2260,7 +2260,7 @@ func (b *Builder) ccompile(a *Action, outfile string, flags []string, file strin } else if m.Dir == "" { // The module is in the vendor directory. Replace the entire vendor // directory path, because the module's Dir is not filled in. - from = modload.VendorDir() + from = modload.VendorDir(loaderstate) toPath = "vendor" } else { from = m.Dir @@ -2310,7 +2310,7 @@ func (b *Builder) ccompile(a *Action, outfile string, flags []string, file strin } } if len(newFlags) < len(flags) { - return b.ccompile(a, outfile, newFlags, file, compiler) + return b.ccompile(loaderstate, a, outfile, newFlags, file, compiler) } } @@ -3383,7 +3383,7 @@ func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) { } srcs := []string{src} - p := load.GoFilesPackage(context.TODO(), load.PackageOpts{}, srcs) + p := load.GoFilesPackage(modload.LoaderState, context.TODO(), load.PackageOpts{}, srcs) if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, nil, "", false, "", srcs); e != nil { return "32", nil diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index e4e83dc8f9853e..a2954ab91ab60a 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -50,14 +50,14 @@ func makeCfgChangedEnv() []string { return slices.Clip(env) } -func BuildInit() { +func BuildInit(loaderstate *modload.State) { if buildInitStarted { base.Fatalf("go: internal error: work.BuildInit called more than once") } buildInitStarted = true base.AtExit(closeBuilders) - modload.Init() + modload.Init(loaderstate) instrumentInit() buildModeInit() cfgChangedEnv = makeCfgChangedEnv() diff --git a/src/cmd/go/internal/workcmd/edit.go b/src/cmd/go/internal/workcmd/edit.go index 18730436ca8217..3778e70b687aec 100644 --- a/src/cmd/go/internal/workcmd/edit.go +++ b/src/cmd/go/internal/workcmd/edit.go @@ -143,8 +143,8 @@ func runEditwork(ctx context.Context, cmd *base.Command, args []string) { if len(args) == 1 { gowork = args[0] } else { - modload.InitWorkfile() - gowork = modload.WorkFilePath() + modload.InitWorkfile(modload.LoaderState) + gowork = modload.WorkFilePath(modload.LoaderState) } if gowork == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go index 52185391c115b3..20fef91d5e953c 100644 --- a/src/cmd/go/internal/workcmd/init.go +++ b/src/cmd/go/internal/workcmd/init.go @@ -44,11 +44,11 @@ func init() { } func runInit(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) modload.LoaderState.ForceUseModules = true - gowork := modload.WorkFilePath() + gowork := modload.WorkFilePath(modload.LoaderState) if gowork == "" { gowork = filepath.Join(base.Cwd(), "go.work") } diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go index 640771d8f75996..c58cd55ceee392 100644 --- a/src/cmd/go/internal/workcmd/sync.go +++ b/src/cmd/go/internal/workcmd/sync.go @@ -49,8 +49,8 @@ func init() { func runSync(ctx context.Context, cmd *base.Command, args []string) { modload.LoaderState.ForceUseModules = true - modload.InitWorkfile() - if modload.WorkFilePath() == "" { + modload.InitWorkfile(modload.LoaderState) + if modload.WorkFilePath(modload.LoaderState) == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } @@ -73,7 +73,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { } for _, m := range mms.Versions() { opts.MainModule = m - _, pkgs := modload.LoadPackages(ctx, opts, "all") + _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, opts, "all") opts.MainModule = module.Version{} // reset var ( @@ -91,7 +91,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { mustSelectFor[m] = mustSelect } - workFilePath := modload.WorkFilePath() // save go.work path because EnterModule clobbers it. + workFilePath := modload.WorkFilePath(modload.LoaderState) // save go.work path because EnterModule clobbers it. var goV string for _, m := range mms.Versions() { @@ -114,12 +114,12 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { // so we don't write some go.mods with the "before" toolchain // and others with the "after" toolchain. If nothing else, that // discrepancy could show up in auto-recorded toolchain lines. - changed, err := modload.EditBuildList(ctx, nil, mustSelectFor[m]) + changed, err := modload.EditBuildList(modload.LoaderState, ctx, nil, mustSelectFor[m]) if err != nil { continue } if changed { - modload.LoadPackages(ctx, modload.PackageOpts{ + modload.LoadPackages(modload.LoaderState, ctx, modload.PackageOpts{ Tags: imports.AnyTags(), Tidy: true, VendorModulesInGOROOTSrc: true, @@ -131,7 +131,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { }, "all") modload.WriteGoMod(ctx, modload.WriteOpts{}) } - goV = gover.Max(goV, modload.LoaderState.MainModules.GoVersion()) + goV = gover.Max(goV, modload.LoaderState.MainModules.GoVersion(modload.LoaderState)) } wf, err := modload.ReadWorkFile(workFilePath) diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go index 2842163517892c..ca8de22cca884f 100644 --- a/src/cmd/go/internal/workcmd/use.go +++ b/src/cmd/go/internal/workcmd/use.go @@ -62,8 +62,8 @@ func init() { func runUse(ctx context.Context, cmd *base.Command, args []string) { modload.LoaderState.ForceUseModules = true - modload.InitWorkfile() - gowork := modload.WorkFilePath() + modload.InitWorkfile(modload.LoaderState) + gowork := modload.WorkFilePath(modload.LoaderState) if gowork == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } diff --git a/src/cmd/go/internal/workcmd/vendor.go b/src/cmd/go/internal/workcmd/vendor.go index f9f0cc0898836f..36c1f7b522f9e7 100644 --- a/src/cmd/go/internal/workcmd/vendor.go +++ b/src/cmd/go/internal/workcmd/vendor.go @@ -46,8 +46,8 @@ func init() { } func runVendor(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - if modload.WorkFilePath() == "" { + modload.InitWorkfile(modload.LoaderState) + if modload.WorkFilePath(modload.LoaderState) == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } From 61d1ff61adb3febdbae21da7721b7cd5389efe4a Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Wed, 8 Oct 2025 15:33:19 -0700 Subject: [PATCH 149/152] cmd/compile: use block starting position for phi line number Fixes #75615 Change-Id: I2c7f0ea1203e8a97749c9f780c29a66050f0159d Reviewed-on: https://go-review.googlesource.com/c/go/+/710355 Reviewed-by: Keith Randall Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/ssa/stmtlines_test.go | 12 ++++++------ src/cmd/compile/internal/ssagen/phi.go | 3 ++- src/cmd/compile/internal/ssagen/ssa.go | 14 ++++++++++++++ 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go index 8cd11e9828e0f9..d0f09da86ffce2 100644 --- a/src/cmd/compile/internal/ssa/stmtlines_test.go +++ b/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -137,17 +137,17 @@ func TestStmtLines(t *testing.T) { } } - var m int + var m float64 if runtime.GOARCH == "amd64" { - m = 1 // > 99% obtained on amd64, no backsliding + m = 0.011 // > 98.9% obtained on amd64, no backsliding } else if runtime.GOARCH == "riscv64" { - m = 3 // XXX temporary update threshold to 97% for regabi + m = 0.03 // XXX temporary update threshold to 97% for regabi } else { - m = 2 // expect 98% elsewhere. + m = 0.02 // expect 98% elsewhere. } - if len(nonStmtLines)*100 > m*len(lines) { - t.Errorf("Saw too many (%s, > %d%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m, len(lines), len(nonStmtLines)) + if float64(len(nonStmtLines)) > m*float64(len(lines)) { + t.Errorf("Saw too many (%s, > %.1f%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m*100, len(lines), len(nonStmtLines)) } t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines)) if testing.Verbose() { diff --git a/src/cmd/compile/internal/ssagen/phi.go b/src/cmd/compile/internal/ssagen/phi.go index 19b6920913d83c..0dcf353bf43089 100644 --- a/src/cmd/compile/internal/ssagen/phi.go +++ b/src/cmd/compile/internal/ssagen/phi.go @@ -253,7 +253,7 @@ func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *ty } // Add a phi to block c for variable n. hasPhi.add(c.ID) - v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right? + v := c.NewValue0I(s.s.blockStarts[b.ID], ssa.OpPhi, typ, int64(n)) // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building. if var_.Op() == ir.ONAME { s.s.addNamedValue(var_.(*ir.Name), v) @@ -513,6 +513,7 @@ loop: v.Op = ssa.OpPhi v.AddArgs(args...) v.Aux = nil + v.Pos = s.s.blockStarts[b.ID] continue loop } w = a // save witness diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index e2ef33274577cc..ae7d57566f7e0d 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -1088,6 +1088,9 @@ type state struct { // First argument of append calls that could be stack allocated. appendTargets map[ir.Node]bool + + // Block starting position, indexed by block id. + blockStarts []src.XPos } type funcLine struct { @@ -1146,6 +1149,9 @@ func (s *state) startBlock(b *ssa.Block) { s.curBlock = b s.vars = map[ir.Node]*ssa.Value{} clear(s.fwdVars) + for len(s.blockStarts) <= int(b.ID) { + s.blockStarts = append(s.blockStarts, src.NoXPos) + } } // endBlock marks the end of generating code for the current block. @@ -1172,6 +1178,9 @@ func (s *state) endBlock() *ssa.Block { b.Pos = src.NoXPos } else { b.Pos = s.lastPos + if s.blockStarts[b.ID] == src.NoXPos { + s.blockStarts[b.ID] = s.lastPos + } } return b } @@ -1188,6 +1197,11 @@ func (s *state) pushLine(line src.XPos) { } else { s.lastPos = line } + // The first position we see for a new block is its starting position + // (the line number for its phis, if any). + if b := s.curBlock; b != nil && s.blockStarts[b.ID] == src.NoXPos { + s.blockStarts[b.ID] = line + } s.line = append(s.line, line) } From 5c9a26c7f882dba5bfe10036815bcb239dd9b7e8 Mon Sep 17 00:00:00 2001 From: Vasily Leonenko Date: Fri, 25 Jul 2025 23:06:33 +0300 Subject: [PATCH 150/152] cmd/compile: use arm64 neon in LoweredMemmove/LoweredMemmoveLoop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Raspberry Pi 5 (Cortex-A76) │ base.log │ opt.log │ │ sec/op │ sec/op vs base │ MemmoveKnownSize112 3.549n ± 0% 3.652n ± 0% +2.92% (p=0.000 n=10) MemmoveKnownSize128 3.979n ± 0% 3.617n ± 0% -9.09% (p=0.000 n=10) MemmoveKnownSize192 7.566n ± 0% 5.074n ± 0% -32.94% (p=0.000 n=10) MemmoveKnownSize248 8.549n ± 0% 7.184n ± 1% -15.97% (p=0.000 n=10) MemmoveKnownSize256 10.010n ± 0% 6.827n ± 0% -31.80% (p=0.000 n=10) MemmoveKnownSize512 19.81n ± 0% 13.59n ± 0% -31.40% (p=0.000 n=10) MemmoveKnownSize1024 39.66n ± 0% 27.00n ± 0% -31.93% (p=0.000 n=10) geomean 9.538n 7.392n -22.50% Change-Id: I7b17408cd0a500ceaa80bc93ffe2f19ddeea9c0d Reviewed-on: https://go-review.googlesource.com/c/go/+/692315 Reviewed-by: Keith Randall Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/arm64/ssa.go | 89 ++++++++++++++----- src/cmd/compile/internal/ssa/_gen/ARM64Ops.go | 11 +-- src/cmd/compile/internal/ssa/opGen.go | 12 +-- 3 files changed, 79 insertions(+), 33 deletions(-) diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 7bc0e536e941e6..43ecb6b4b715b4 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -1189,8 +1189,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if dstReg == srcReg { break } - tmpReg1 := int16(arm64.REG_R24) - tmpReg2 := int16(arm64.REG_R25) + tmpReg1 := int16(arm64.REG_R25) + tmpFReg1 := int16(arm64.REG_F16) + tmpFReg2 := int16(arm64.REG_F17) n := v.AuxInt if n < 16 { v.Fatalf("Move too small %d", n) @@ -1198,10 +1199,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Generate copying instructions. var off int64 + for n >= 32 { + // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false) + off += 32 + n -= 32 + } for n >= 16 { - // LDP off(srcReg), (tmpReg1, tmpReg2) - // STP (tmpReg1, tmpReg2), off(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false) + // FMOVQ off(src), tmpFReg1 + // FMOVQ tmpFReg1, off(dst) + move16(s, srcReg, dstReg, tmpFReg1, off, false) off += 16 n -= 16 } @@ -1223,9 +1231,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if dstReg == srcReg { break } - countReg := int16(arm64.REG_R23) - tmpReg1 := int16(arm64.REG_R24) - tmpReg2 := int16(arm64.REG_R25) + countReg := int16(arm64.REG_R24) + tmpReg1 := int16(arm64.REG_R25) + tmpFReg1 := int16(arm64.REG_F16) + tmpFReg2 := int16(arm64.REG_F17) n := v.AuxInt loopSize := int64(64) if n < 3*loopSize { @@ -1251,10 +1260,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Move loopSize bytes starting at srcReg to dstReg. // Increment srcReg and destReg by loopSize as a side effect. - for range loopSize / 16 { - // LDP.P 16(srcReg), (tmpReg1, tmpReg2) - // STP.P (tmpReg1, tmpReg2), 16(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, 0, true) + for range loopSize / 32 { + // FLDPQ.P 32(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ.P (tmpFReg1, tmpFReg2), 32(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, 0, true) } // Decrement loop count. // SUB $1, countReg @@ -1276,10 +1285,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Copy any fractional portion. var off int64 + for n >= 32 { + // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false) + off += 32 + n -= 32 + } for n >= 16 { - // LDP off(srcReg), (tmpReg1, tmpReg2) - // STP (tmpReg1, tmpReg2), off(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false) + // FMOVQ off(src), tmpFReg1 + // FMOVQ tmpFReg1, off(dst) + move16(s, srcReg, dstReg, tmpFReg1, off, false) off += 16 n -= 16 } @@ -1699,26 +1715,55 @@ func zero8(s *ssagen.State, reg int16, off int64) { p.To.Offset = off } -// move16 copies 16 bytes at src+off to dst+off. +// move32 copies 32 bytes at src+off to dst+off. // Uses registers tmp1 and tmp2. -// If postInc is true, increment src and dst by 16. -func move16(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) { - // LDP off(src), (tmp1, tmp2) - ld := s.Prog(arm64.ALDP) +// If postInc is true, increment src and dst by 32. +func move32(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) { + // FLDPQ off(src), (tmp1, tmp2) + ld := s.Prog(arm64.AFLDPQ) ld.From.Type = obj.TYPE_MEM ld.From.Reg = src ld.From.Offset = off ld.To.Type = obj.TYPE_REGREG ld.To.Reg = tmp1 ld.To.Offset = int64(tmp2) - // STP (tmp1, tmp2), off(dst) - st := s.Prog(arm64.ASTP) + // FSTPQ (tmp1, tmp2), off(dst) + st := s.Prog(arm64.AFSTPQ) st.From.Type = obj.TYPE_REGREG st.From.Reg = tmp1 st.From.Offset = int64(tmp2) st.To.Type = obj.TYPE_MEM st.To.Reg = dst st.To.Offset = off + if postInc { + if off != 0 { + panic("can't postinc with non-zero offset") + } + ld.Scond = arm64.C_XPOST + st.Scond = arm64.C_XPOST + ld.From.Offset = 32 + st.To.Offset = 32 + } +} + +// move16 copies 16 bytes at src+off to dst+off. +// Uses register tmp1 +// If postInc is true, increment src and dst by 16. +func move16(s *ssagen.State, src, dst, tmp1 int16, off int64, postInc bool) { + // FMOVQ off(src), tmp1 + ld := s.Prog(arm64.AFMOVQ) + ld.From.Type = obj.TYPE_MEM + ld.From.Reg = src + ld.From.Offset = off + ld.To.Type = obj.TYPE_REG + ld.To.Reg = tmp1 + // FMOVQ tmp1, off(dst) + st := s.Prog(arm64.AFMOVQ) + st.From.Type = obj.TYPE_REG + st.From.Reg = tmp1 + st.To.Type = obj.TYPE_MEM + st.To.Reg = dst + st.To.Offset = off if postInc { if off != 0 { panic("can't postinc with non-zero offset") diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go index 43072ae9130ede..cc3758d10956d4 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go @@ -144,8 +144,9 @@ func init() { gpspsbg = gpspg | buildReg("SB") fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r25 = buildReg("R25") r24to25 = buildReg("R24 R25") - r23to25 = buildReg("R23 R24 R25") + f16to17 = buildReg("F16 F17") rz = buildReg("ZERO") first16 = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15") ) @@ -599,8 +600,8 @@ func init() { aux: "Int64", argLength: 3, reg: regInfo{ - inputs: []regMask{gp &^ r24to25, gp &^ r24to25}, - clobbers: r24to25, // TODO: figure out needIntTemp x2 + inputs: []regMask{gp &^ r25, gp &^ r25}, + clobbers: r25 | f16to17, // TODO: figure out needIntTemp + x2 for floats }, faultOnNilArg0: true, faultOnNilArg1: true, @@ -617,8 +618,8 @@ func init() { aux: "Int64", argLength: 3, reg: regInfo{ - inputs: []regMask{gp &^ r23to25, gp &^ r23to25}, - clobbers: r23to25, // TODO: figure out needIntTemp x3 + inputs: []regMask{gp &^ r24to25, gp &^ r24to25}, + clobbers: r24to25 | f16to17, // TODO: figure out needIntTemp x2 + x2 for floats clobbersArg0: true, clobbersArg1: true, }, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9b38e66a23f019..061f1333382af4 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -23199,10 +23199,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 - {1, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 + {0, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 + {1, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 }, - clobbers: 25165824, // R24 R25 + clobbers: 422212481843200, // R25 F16 F17 }, }, { @@ -23213,10 +23213,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 306184191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R26 R30 - {1, 306184191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R26 R30 + {0, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 + {1, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 }, - clobbers: 29360128, // R23 R24 R25 + clobbers: 422212490231808, // R24 R25 F16 F17 clobbersArg0: true, clobbersArg1: true, }, From bb5eb5171535b9080055fee5996bd55398202124 Mon Sep 17 00:00:00 2001 From: matloob Date: Wed, 15 Oct 2025 12:52:47 -0400 Subject: [PATCH 151/152] runtime/pprof: fix errors in pprof_test I think the original depth-1 argument to allocDeep was correct. Reverted that, and also the change to maxSkip in mprof.go, which was also incorrect. I think before we were usually passing accidentally in the loop over matched stacks when we really should usually have been passing in the previous loop. Change-Id: I6a6a696463e2baf045b66f418d7afbfcb49258e4 Reviewed-on: https://go-review.googlesource.com/c/go/+/712100 Reviewed-by: Michael Matloob TryBot-Bypass: Michael Matloob Reviewed-by: Michael Knyszek --- src/runtime/mprof.go | 2 +- src/runtime/pprof/pprof_test.go | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 743fa5a3fe848b..794f57cee24a7c 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -49,7 +49,7 @@ const ( // desired maximum number of frames after expansion. // This should be at least as large as the largest skip value // used for profiling; otherwise stacks may be truncated inconsistently - maxSkip = 8 + maxSkip = 6 // maxProfStackDepth is the highest valid value for debug.profstackdepth. // It's used for the bucket.stk func. diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 23d3cf585e59f1..b816833e52285f 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -2549,9 +2549,6 @@ func TestProfilerStackDepth(t *testing.T) { for _, test := range tests { t.Run(test.profiler, func(t *testing.T) { - if test.profiler == "heap" { - testenv.SkipFlaky(t, 74029) - } var buf bytes.Buffer if err := Lookup(test.profiler).WriteTo(&buf, 0); err != nil { t.Fatalf("failed to write heap profile: %v", err) @@ -2586,6 +2583,7 @@ func TestProfilerStackDepth(t *testing.T) { t.Logf("matched stack=%s", stk) if len(stk) != depth { t.Errorf("want stack depth = %d, got %d", depth, len(stk)) + continue } if rootFn, wantFn := stk[depth-1], "runtime/pprof.allocDeep"; rootFn != wantFn { @@ -2663,7 +2661,7 @@ func goroutineDeep(t *testing.T, n int) { // guaranteed to have exactly the desired depth with produceProfileEvents as // their root frame which is expected by TestProfilerStackDepth. func produceProfileEvents(t *testing.T, depth int) { - allocDeep(depth + 1) // +1 for produceProfileEvents, ** + allocDeep(depth - 1) // -1 for produceProfileEvents, ** blockChanDeep(t, depth-2) // -2 for produceProfileEvents, **, chanrecv1 blockMutexDeep(t, depth-2) // -2 for produceProfileEvents, **, Unlock memSink = nil From ff360c2f1b51b1e725d10c0864a6b698d3a5ffc3 Mon Sep 17 00:00:00 2001 From: Julien Cretel Date: Wed, 15 Oct 2025 15:48:41 +0200 Subject: [PATCH 152/152] net/url: speed up escape and unescape MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change adds a generated 8-bit bitmask for use in functions shouldEscape and ishex. Function shouldEscape is now inlineable. Function escape is now much faster; function unescape is a bit faster. Here are some benchmark results (no change to allocations): goos: darwin goarch: amd64 pkg: net/url cpu: Intel(R) Core(TM) i7-6700HQ CPU @ 2.60GHz │ old │ new │ │ sec/op │ sec/op vs base │ QueryEscape/#00-8 58.38n ± 1% 35.98n ± 1% -38.38% (p=0.000 n=20) QueryEscape/#01-8 303.50n ± 0% 94.77n ± 0% -68.77% (p=0.000 n=20) QueryEscape/#02-8 202.90n ± 0% 78.66n ± 1% -61.23% (p=0.000 n=20) QueryEscape/#03-8 444.5n ± 0% 145.9n ± 0% -67.17% (p=0.000 n=20) QueryEscape/#04-8 2678.0n ± 0% 913.7n ± 0% -65.88% (p=0.000 n=20) PathEscape/#00-8 81.34n ± 0% 44.64n ± 1% -45.12% (p=0.000 n=20) PathEscape/#01-8 307.65n ± 0% 96.71n ± 1% -68.56% (p=0.000 n=20) PathEscape/#02-8 200.80n ± 1% 78.25n ± 0% -61.03% (p=0.000 n=20) PathEscape/#03-8 450.1n ± 1% 145.5n ± 0% -67.67% (p=0.000 n=20) PathEscape/#04-8 2663.5n ± 0% 876.5n ± 0% -67.09% (p=0.000 n=20) QueryUnescape/#00-8 53.32n ± 1% 51.67n ± 1% -3.09% (p=0.000 n=20) QueryUnescape/#01-8 161.0n ± 1% 136.2n ± 1% -15.40% (p=0.000 n=20) QueryUnescape/#02-8 126.1n ± 1% 118.3n ± 1% -6.23% (p=0.000 n=20) QueryUnescape/#03-8 294.6n ± 0% 273.1n ± 0% -7.30% (p=0.000 n=20) QueryUnescape/#04-8 1.511µ ± 0% 1.411µ ± 0% -6.62% (p=0.000 n=20) PathUnescape/#00-8 63.84n ± 1% 53.59n ± 1% -16.05% (p=0.000 n=20) PathUnescape/#01-8 163.6n ± 3% 137.9n ± 1% -15.71% (p=0.000 n=20) PathUnescape/#02-8 126.4n ± 1% 119.1n ± 1% -5.78% (p=0.000 n=20) PathUnescape/#03-8 294.2n ± 0% 273.3n ± 0% -7.12% (p=0.000 n=20) PathUnescape/#04-8 1.554µ ± 0% 1.417µ ± 0% -8.78% (p=0.000 n=20) geomean 277.8n 162.7n -41.44% This change draws heavy inspiration from CL 174998, which showed promise but stalled years ago. Updates #17860 --- src/net/url/encoding_table.go | 114 +++++++++++++++ src/net/url/gen_encoding_table.go | 234 ++++++++++++++++++++++++++++++ src/net/url/url.go | 106 +------------- 3 files changed, 354 insertions(+), 100 deletions(-) create mode 100644 src/net/url/encoding_table.go create mode 100644 src/net/url/gen_encoding_table.go diff --git a/src/net/url/encoding_table.go b/src/net/url/encoding_table.go new file mode 100644 index 00000000000000..60b3564948e3ce --- /dev/null +++ b/src/net/url/encoding_table.go @@ -0,0 +1,114 @@ +// Code generated from gen_encoding_table.go using 'go generate'; DO NOT EDIT. + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package url + +type encoding uint8 + +const ( + encodePath encoding = 1 << iota + encodePathSegment + encodeHost + encodeZone + encodeUserPassword + encodeQueryComponent + encodeFragment + + // hexChar is actually NOT an encoding mode, but there are only seven + // encoding modes. We might as well abuse the otherwise unused most + // significant bit in uint8 to indicate whether a character is + // hexadecimal. + hexChar +) + +var table = [256]encoding{ + '!': encodeFragment | encodeZone | encodeHost, + '"': encodeZone | encodeHost, + '$': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '&': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '\'': encodeZone | encodeHost, + '(': encodeFragment | encodeZone | encodeHost, + ')': encodeFragment | encodeZone | encodeHost, + '*': encodeFragment | encodeZone | encodeHost, + '+': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + ',': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePath, + '-': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '.': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '/': encodeFragment | encodePath, + '0': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '1': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '2': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '3': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '4': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '5': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '6': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '7': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '8': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '9': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + ':': encodeFragment | encodeZone | encodeHost | encodePathSegment | encodePath, + ';': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePath, + '<': encodeZone | encodeHost, + '=': encodeFragment | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '>': encodeZone | encodeHost, + '?': encodeFragment, + '@': encodeFragment | encodePathSegment | encodePath, + 'A': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'B': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'C': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'D': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'E': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'F': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'G': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'H': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'I': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'J': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'K': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'L': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'M': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'N': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'O': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'P': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'Q': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'R': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'S': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'T': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'U': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'V': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'W': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'X': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'Y': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'Z': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '[': encodeZone | encodeHost, + ']': encodeZone | encodeHost, + '_': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'a': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'b': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'c': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'd': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'e': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'f': hexChar | encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'g': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'h': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'i': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'j': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'k': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'l': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'm': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'n': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'o': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'p': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'q': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'r': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 's': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 't': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'u': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'v': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'w': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'x': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'y': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + 'z': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, + '~': encodeFragment | encodeQueryComponent | encodeUserPassword | encodeZone | encodeHost | encodePathSegment | encodePath, +} diff --git a/src/net/url/gen_encoding_table.go b/src/net/url/gen_encoding_table.go new file mode 100644 index 00000000000000..5defe5046bb292 --- /dev/null +++ b/src/net/url/gen_encoding_table.go @@ -0,0 +1,234 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + _ "embed" + "fmt" + "go/format" + "io" + "log" + "maps" + "os" + "slices" + "strconv" + "strings" +) + +// We embed this source file in the resulting code-generation program in order +// to extract the definitions of the encoding type and constants from it and +// include them in the generated file. +// +//go:embed gen_encoding_table.go +var genSource string + +const filename = "encoding_table.go" + +func main() { + var out bytes.Buffer + fmt.Fprintln(&out, "// Code generated from gen_encoding_table.go using 'go generate'; DO NOT EDIT.") + fmt.Fprintln(&out) + fmt.Fprintln(&out, "// Copyright 2025 The Go Authors. All rights reserved.") + fmt.Fprintln(&out, "// Use of this source code is governed by a BSD-style") + fmt.Fprintln(&out, "// license that can be found in the LICENSE file.") + fmt.Fprintln(&out) + fmt.Fprintln(&out, "package url") + fmt.Fprintln(&out) + generateEnc(&out, genSource) + generateTable(&out) + + formatted, err := format.Source(out.Bytes()) + if err != nil { + log.Fatal("format:", err) + } + + err = os.WriteFile(filename, formatted, 0644) + if err != nil { + log.Fatal("WriteFile:", err) + } +} + +func generateEnc(w io.Writer, src string) { + var writeLine bool + for line := range strings.Lines(src) { + if strings.HasPrefix(line, "// START encoding") { + writeLine = true + continue + } + if strings.HasPrefix(line, "// END encoding") { + return + } + if writeLine { + fmt.Fprint(w, line) + } + } +} + +func generateTable(w io.Writer) { + fmt.Fprintln(w, "var table = [256]encoding{") + + // Sort the encodings (in decreasing order) to guarantee a stable output. + sortedEncs := slices.Sorted(maps.Keys(encNames)) + slices.Reverse(sortedEncs) + + for i := range 256 { + c := byte(i) + var lineBuf bytes.Buffer + + // Write key to line buffer. + lineBuf.WriteString(strconv.QuoteRune(rune(c))) + + lineBuf.WriteByte(':') + + // Write value to line buffer. + blankVal := true + if ishex(c) { + // Set the hexChar bit if this char is hexadecimal. + lineBuf.WriteString("hexChar") + blankVal = false + } + for _, enc := range sortedEncs { + if !shouldEscape(c, enc) { + if !blankVal { + lineBuf.WriteByte('|') + } + // Set this encoding mode's bit if this char should NOT be + // escaped. + name := encNames[enc] + lineBuf.WriteString(name) + blankVal = false + } + } + + if !blankVal { + lineBuf.WriteString(",\n") + w.Write(lineBuf.Bytes()) + } + } + fmt.Fprintln(w, "}") +} + +// START encoding (keep this marker comment in sync with genEnc) +type encoding uint8 + +const ( + encodePath encoding = 1 << iota + encodePathSegment + encodeHost + encodeZone + encodeUserPassword + encodeQueryComponent + encodeFragment + + // hexChar is actually NOT an encoding mode, but there are only seven + // encoding modes. We might as well abuse the otherwise unused most + // significant bit in uint8 to indicate whether a character is + // hexadecimal. + hexChar +) + +// END encoding (keep this marker comment in sync with genEnc) + +// Keep this in sync with the definitions of encoding mode constants. +var encNames = map[encoding]string{ + encodePath: "encodePath", + encodePathSegment: "encodePathSegment", + encodeHost: "encodeHost", + encodeZone: "encodeZone", + encodeUserPassword: "encodeUserPassword", + encodeQueryComponent: "encodeQueryComponent", + encodeFragment: "encodeFragment", +} + +// Return true if the specified character should be escaped when +// appearing in a URL string, according to RFC 3986. +// +// Please be informed that for now shouldEscape does not check all +// reserved characters correctly. See golang.org/issue/5684. +func shouldEscape(c byte, mode encoding) bool { + // §2.3 Unreserved characters (alphanum) + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + + if mode == encodeHost || mode == encodeZone { + // §3.2.2 Host allows + // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + // as part of reg-name. + // We add : because we include :port as part of host. + // We add [ ] because we include [ipv6]:port as part of host. + // We add < > because they're the only characters left that + // we could possibly allow, and Parse will reject them if we + // escape them (because hosts can't use %-encoding for + // ASCII bytes). + switch c { + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '[', ']', '<', '>', '"': + return false + } + } + + switch c { + case '-', '_', '.', '~': // §2.3 Unreserved characters (mark) + return false + + case '$', '&', '+', ',', '/', ':', ';', '=', '?', '@': // §2.2 Reserved characters (reserved) + // Different sections of the URL allow a few of + // the reserved characters to appear unescaped. + switch mode { + case encodePath: // §3.3 + // The RFC allows : @ & = + $ but saves / ; , for assigning + // meaning to individual path segments. This package + // only manipulates the path as a whole, so we allow those + // last three as well. That leaves only ? to escape. + return c == '?' + + case encodePathSegment: // §3.3 + // The RFC allows : @ & = + $ but saves / ; , for assigning + // meaning to individual path segments. + return c == '/' || c == ';' || c == ',' || c == '?' + + case encodeUserPassword: // §3.2.1 + // The RFC allows ';', ':', '&', '=', '+', '$', and ',' in + // userinfo, so we must escape only '@', '/', and '?'. + // The parsing of userinfo treats ':' as special so we must escape + // that too. + return c == '@' || c == '/' || c == '?' || c == ':' + + case encodeQueryComponent: // §3.4 + // The RFC reserves (so we must escape) everything. + return true + + case encodeFragment: // §4.1 + // The RFC text is silent but the grammar allows + // everything, so escape nothing. + return false + } + } + + if mode == encodeFragment { + // RFC 3986 §2.2 allows not escaping sub-delims. A subset of sub-delims are + // included in reserved from RFC 2396 §2.2. The remaining sub-delims do not + // need to be escaped. To minimize potential breakage, we apply two restrictions: + // (1) we always escape sub-delims outside of the fragment, and (2) we always + // escape single quote to avoid breaking callers that had previously assumed that + // single quotes would be escaped. See issue #19917. + switch c { + case '!', '(', ')', '*': + return false + } + } + + // Everything else must be escaped. + return true +} + +func ishex(c byte) bool { + return '0' <= c && c <= '9' || + 'a' <= c && c <= 'f' || + 'A' <= c && c <= 'F' +} diff --git a/src/net/url/url.go b/src/net/url/url.go index 6afa30f162bd25..078910e266f0a5 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -7,6 +7,9 @@ // See RFC 3986. This package generally follows RFC 3986, except where // it deviates for compatibility reasons. // RFC 6874 followed for IPv6 zone literals. + +//go:generate go run gen_encoding_table.go + package url // When sending changes, first search old issues for history on decisions. @@ -51,15 +54,7 @@ func (e *Error) Temporary() bool { const upperhex = "0123456789ABCDEF" func ishex(c byte) bool { - switch { - case '0' <= c && c <= '9': - return true - case 'a' <= c && c <= 'f': - return true - case 'A' <= c && c <= 'F': - return true - } - return false + return table[c]&hexChar != 0 } func unhex(c byte) byte { @@ -75,18 +70,6 @@ func unhex(c byte) byte { } } -type encoding int - -const ( - encodePath encoding = 1 + iota - encodePathSegment - encodeHost - encodeZone - encodeUserPassword - encodeQueryComponent - encodeFragment -) - type EscapeError string func (e EscapeError) Error() string { @@ -99,86 +82,9 @@ func (e InvalidHostError) Error() string { return "invalid character " + strconv.Quote(string(e)) + " in host name" } -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -// -// Please be informed that for now shouldEscape does not check all -// reserved characters correctly. See golang.org/issue/5684. +// See the reference implementation in gen_encoding_table.go. func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - return false - } - - if mode == encodeHost || mode == encodeZone { - // §3.2.2 Host allows - // sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" - // as part of reg-name. - // We add : because we include :port as part of host. - // We add [ ] because we include [ipv6]:port as part of host. - // We add < > because they're the only characters left that - // we could possibly allow, and Parse will reject them if we - // escape them (because hosts can't use %-encoding for - // ASCII bytes). - switch c { - case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '[', ']', '<', '>', '"': - return false - } - } - - switch c { - case '-', '_', '.', '~': // §2.3 Unreserved characters (mark) - return false - - case '$', '&', '+', ',', '/', ':', ';', '=', '?', '@': // §2.2 Reserved characters (reserved) - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows : @ & = + $ but saves / ; , for assigning - // meaning to individual path segments. This package - // only manipulates the path as a whole, so we allow those - // last three as well. That leaves only ? to escape. - return c == '?' - - case encodePathSegment: // §3.3 - // The RFC allows : @ & = + $ but saves / ; , for assigning - // meaning to individual path segments. - return c == '/' || c == ';' || c == ',' || c == '?' - - case encodeUserPassword: // §3.2.1 - // The RFC allows ';', ':', '&', '=', '+', '$', and ',' in - // userinfo, so we must escape only '@', '/', and '?'. - // The parsing of userinfo treats ':' as special so we must escape - // that too. - return c == '@' || c == '/' || c == '?' || c == ':' - - case encodeQueryComponent: // §3.4 - // The RFC reserves (so we must escape) everything. - return true - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing. - return false - } - } - - if mode == encodeFragment { - // RFC 3986 §2.2 allows not escaping sub-delims. A subset of sub-delims are - // included in reserved from RFC 2396 §2.2. The remaining sub-delims do not - // need to be escaped. To minimize potential breakage, we apply two restrictions: - // (1) we always escape sub-delims outside of the fragment, and (2) we always - // escape single quote to avoid breaking callers that had previously assumed that - // single quotes would be escaped. See issue #19917. - switch c { - case '!', '(', ')', '*': - return false - } - } - - // Everything else must be escaped. - return true + return table[c]&mode == 0 } // QueryUnescape does the inverse transformation of [QueryEscape],