View
@@ -14,8 +14,8 @@ import "fmt"
// the nearest tree ancestor of a given node such that the
// ancestor is also in the set.
//
// Given a set of blocks {B1, B2, B3} within the dominator tree, established by
// stm.Insert()ing B1, B2, B3, etc, a query at block B
// Given a set of blocks {B1, B2, B3} within the dominator tree, established
// by stm.Insert()ing B1, B2, B3, etc, a query at block B
// (performed with stm.Find(stm, B, adjust, helper))
// will return the member of the set that is the nearest strict
// ancestor of B within the dominator tree, or nil if none exists.
@@ -49,9 +49,9 @@ type SparseTreeMap RBTint32
// packages, such as gc.
type SparseTreeHelper struct {
Sdom []SparseTreeNode // indexed by block.ID
Po []*Block // exported data
Dom []*Block // exported data
Ponums []int32 // exported data
Po []*Block // exported data; the blocks, in a post-order
Dom []*Block // exported data; the dominator of this block.
Ponums []int32 // exported data; Po[Ponums[b.ID]] == b; the index of b in Po
}
// NewSparseTreeHelper returns a SparseTreeHelper for use
@@ -79,11 +79,19 @@ func makeSparseTreeHelper(sdom SparseTree, dom, po []*Block, ponums []int32) *Sp
// A sparseTreeMapEntry contains the data stored in a binary search
// data structure indexed by (dominator tree walk) entry and exit numbers.
// Each entry is added twice, once keyed by entry-1/entry/entry+1 and
// once keyed by exit+1/exit/exit-1. (there are three choices of paired indices, not 9, and they properly nest)
// once keyed by exit+1/exit/exit-1.
//
// Within a sparse tree, the two entries added bracket all their descendant
// entries within the tree; the first insertion is keyed by entry number,
// which comes before all the entry and exit numbers of descendants, and
// the second insertion is keyed by exit number, which comes after all the
// entry and exit numbers of the descendants.
type sparseTreeMapEntry struct {
index *SparseTreeNode
block *Block // TODO: store this in a separate index.
data interface{}
index *SparseTreeNode // references the entry and exit numbers for a block in the sparse tree
block *Block // TODO: store this in a separate index.
data interface{}
sparseParent *sparseTreeMapEntry // references the nearest ancestor of this block in the sparse tree.
adjust int32 // at what adjustment was this node entered into the sparse tree? The same block may be entered more than once, but at different adjustments.
}
// Insert creates a definition within b with data x.
@@ -98,12 +106,25 @@ func (m *SparseTreeMap) Insert(b *Block, adjust int32, x interface{}, helper *Sp
// assert unreachable
return
}
entry := &sparseTreeMapEntry{index: blockIndex, data: x}
// sp will be the sparse parent in this sparse tree (nearest ancestor in the larger tree that is also in this sparse tree)
sp := m.findEntry(b, adjust, helper)
entry := &sparseTreeMapEntry{index: blockIndex, block: b, data: x, sparseParent: sp, adjust: adjust}
right := blockIndex.exit - adjust
_ = rbtree.Insert(right, entry)
left := blockIndex.entry + adjust
_ = rbtree.Insert(left, entry)
// This newly inserted block may now be the sparse parent of some existing nodes (the new sparse children of this block)
// Iterate over nodes bracketed by this new node to correct their parent, but not over the proper sparse descendants of those nodes.
_, d := rbtree.Lub(left) // Lub (not EQ) of left is either right or a sparse child
for tme := d.(*sparseTreeMapEntry); tme != entry; tme = d.(*sparseTreeMapEntry) {
tme.sparseParent = entry
// all descendants of tme are unchanged;
// next sparse sibling (or right-bracketing sparse parent == entry) is first node after tme.index.exit - tme.adjust
_, d = rbtree.Lub(tme.index.exit - tme.adjust)
}
}
// Find returns the definition visible from block b, or nil if none can be found.
@@ -118,45 +139,41 @@ func (m *SparseTreeMap) Insert(b *Block, adjust int32, x interface{}, helper *Sp
//
// Another way to think of this is that Find searches for inputs, Insert defines outputs.
func (m *SparseTreeMap) Find(b *Block, adjust int32, helper *SparseTreeHelper) interface{} {
v := m.findEntry(b, adjust, helper)
if v == nil {
return nil
}
return v.data
}
func (m *SparseTreeMap) findEntry(b *Block, adjust int32, helper *SparseTreeHelper) *sparseTreeMapEntry {
rbtree := (*RBTint32)(m)
if rbtree == nil {
return nil
}
blockIndex := &helper.Sdom[b.ID]
// The Glb (not EQ) of this probe is either the entry-indexed end of a sparse parent
// or the exit-indexed end of a sparse sibling
_, v := rbtree.Glb(blockIndex.entry + adjust)
for v != nil {
otherEntry := v.(*sparseTreeMapEntry)
otherIndex := otherEntry.index
// Two cases -- either otherIndex brackets blockIndex,
// or it doesn't.
//
// Note that if otherIndex and blockIndex are
// the same block, then the glb test only passed
// because the definition is "before",
// i.e., k == blockIndex.entry-1
// allowing equality is okay on the blocks check.
if otherIndex.exit >= blockIndex.exit {
// bracketed.
return otherEntry.data
if v == nil {
return nil
}
otherEntry := v.(*sparseTreeMapEntry)
if otherEntry.index.exit >= blockIndex.exit { // otherEntry exit after blockIndex exit; therefore, brackets
return otherEntry
}
// otherEntry is a sparse Sibling, and shares the same sparse parent (nearest ancestor within larger tree)
sp := otherEntry.sparseParent
if sp != nil {
if sp.index.exit < blockIndex.exit { // no ancestor found
return nil
}
// In the not-bracketed case, we could memoize the results of
// walking up the tree, but for now we won't.
// Memoize plan is to take the gap (inclusive)
// from otherIndex.exit+1 to blockIndex.entry-1
// and insert it into this or a second tree.
// Said tree would then need adjusting whenever
// an insertion occurred.
// Expectation is that per-variable tree is sparse,
// therefore probe siblings instead of climbing up.
// Note that each sibling encountered in this walk
// to find a defining ancestor shares that ancestor
// because the walk skips over the interior -- each
// Glb will be an exit, and the iteration is to the
// Glb of the entry.
_, v = rbtree.Glb(otherIndex.entry - 1)
return sp
}
return nil // nothing found
return nil
}
func (m *SparseTreeMap) String() string {
@@ -165,5 +182,8 @@ func (m *SparseTreeMap) String() string {
}
func (e *sparseTreeMapEntry) String() string {
return fmt.Sprintf("index=%v, data=%v", e.index, e.data)
if e == nil {
return "nil"
}
return fmt.Sprintf("(index=%v, block=%v, data=%v)->%v", e.index, e.block, e.data, e.sparseParent)
}
View
@@ -86,3 +86,26 @@ func tighten(f *Func) {
}
}
}
// phiTighten moves constants closer to phi users.
// This pass avoids having lots of constants live for lots of the program.
// See issue 16407.
func phiTighten(f *Func) {
for _, b := range f.Blocks {
for _, v := range b.Values {
if v.Op != OpPhi {
continue
}
for i, a := range v.Args {
if !a.rematerializeable() {
continue // not a constant we can move around
}
if a.Block == b.Preds[i].b {
continue // already in the right place
}
// Make a copy of a, put in predecessor block.
v.SetArg(i, a.copyInto(b.Preds[i].b))
}
}
}
}
View
@@ -673,11 +673,6 @@ func init() {
goarch = buildContext.GOARCH
goos = buildContext.GOOS
if _, ok := osArchSupportsCgo[goos+"/"+goarch]; !ok {
fmt.Fprintf(os.Stderr, "cmd/go: unsupported GOOS/GOARCH pair %s/%s\n", goos, goarch)
os.Exit(2)
}
if goos == "windows" {
exeSuffix = ".exe"
}
@@ -1226,6 +1221,11 @@ func allArchiveActions(root *action) []*action {
// do runs the action graph rooted at root.
func (b *builder) do(root *action) {
if _, ok := osArchSupportsCgo[goos+"/"+goarch]; !ok && buildContext.Compiler == "gc" {
fmt.Fprintf(os.Stderr, "cmd/go: unsupported GOOS/GOARCH pair %s/%s\n", goos, goarch)
os.Exit(2)
}
// Build list of all actions, assigning depth-first post-order priority.
// The original implementation here was a true queue
// (using a channel) but it had the effect of getting
View
@@ -55,7 +55,6 @@ func report(err error) {
func usage() {
fmt.Fprintf(os.Stderr, "usage: gofmt [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(2)
}
func initParserMode() {
View
@@ -325,9 +325,9 @@ func (r *readRune) readByte() (b byte, err error) {
r.pending--
return
}
_, err = r.reader.Read(r.pendBuf[:1])
if err != nil {
return
n, err := io.ReadFull(r.reader, r.pendBuf[:1])
if n != 1 {
return 0, err
}
return r.pendBuf[0], err
}
View
@@ -15,6 +15,7 @@ import (
"regexp"
"strings"
"testing"
"testing/iotest"
"unicode/utf8"
)
@@ -118,20 +119,6 @@ func (s *IntString) Scan(state ScanState, verb rune) error {
var intStringVal IntString
// myStringReader implements Read but not ReadRune, allowing us to test our readRune wrapper
// type that creates something that can read runes given only Read().
type myStringReader struct {
r *strings.Reader
}
func (s *myStringReader) Read(p []byte) (n int, err error) {
return s.r.Read(p)
}
func newReader(s string) *myStringReader {
return &myStringReader{strings.NewReader(s)}
}
var scanTests = []ScanTest{
// Basic types
{"T\n", &boolVal, true}, // boolean test vals toggle to be sure they are written
@@ -363,25 +350,38 @@ var multiTests = []ScanfMultiTest{
{"%v%v", "FALSE23", args(&truth, &i), args(false, 23), ""},
}
func testScan(name string, t *testing.T, scan func(r io.Reader, a ...interface{}) (int, error)) {
var readers = []struct {
name string
f func(string) io.Reader
}{
{"StringReader", func(s string) io.Reader {
return strings.NewReader(s)
}},
{"ReaderOnly", func(s string) io.Reader {
return struct{ io.Reader }{strings.NewReader(s)}
}},
{"OneByteReader", func(s string) io.Reader {
return iotest.OneByteReader(strings.NewReader(s))
}},
{"DataErrReader", func(s string) io.Reader {
return iotest.DataErrReader(strings.NewReader(s))
}},
}
func testScan(t *testing.T, f func(string) io.Reader, scan func(r io.Reader, a ...interface{}) (int, error)) {
for _, test := range scanTests {
var r io.Reader
if name == "StringReader" {
r = strings.NewReader(test.text)
} else {
r = newReader(test.text)
}
r := f(test.text)
n, err := scan(r, test.in)
if err != nil {
m := ""
if n > 0 {
m = Sprintf(" (%d fields ok)", n)
}
t.Errorf("%s got error scanning %q: %s%s", name, test.text, err, m)
t.Errorf("got error scanning %q: %s%s", test.text, err, m)
continue
}
if n != 1 {
t.Errorf("%s count error on entry %q: got %d", name, test.text, n)
t.Errorf("count error on entry %q: got %d", test.text, n)
continue
}
// The incoming value may be a pointer
@@ -391,25 +391,25 @@ func testScan(name string, t *testing.T, scan func(r io.Reader, a ...interface{}
}
val := v.Interface()
if !reflect.DeepEqual(val, test.out) {
t.Errorf("%s scanning %q: expected %#v got %#v, type %T", name, test.text, test.out, val, val)
t.Errorf("scanning %q: expected %#v got %#v, type %T", test.text, test.out, val, val)
}
}
}
func TestScan(t *testing.T) {
testScan("StringReader", t, Fscan)
}
func TestMyReaderScan(t *testing.T) {
testScan("myStringReader", t, Fscan)
for _, r := range readers {
t.Run(r.name, func(t *testing.T) {
testScan(t, r.f, Fscan)
})
}
}
func TestScanln(t *testing.T) {
testScan("StringReader", t, Fscanln)
}
func TestMyReaderScanln(t *testing.T) {
testScan("myStringReader", t, Fscanln)
for _, r := range readers {
t.Run(r.name, func(t *testing.T) {
testScan(t, r.f, Fscanln)
})
}
}
func TestScanf(t *testing.T) {
@@ -500,15 +500,10 @@ func TestInf(t *testing.T) {
}
}
func testScanfMulti(name string, t *testing.T) {
func testScanfMulti(t *testing.T, f func(string) io.Reader) {
sliceType := reflect.TypeOf(make([]interface{}, 1))
for _, test := range multiTests {
var r io.Reader
if name == "StringReader" {
r = strings.NewReader(test.text)
} else {
r = newReader(test.text)
}
r := f(test.text)
n, err := Fscanf(r, test.format, test.in...)
if err != nil {
if test.err == "" {
@@ -539,11 +534,11 @@ func testScanfMulti(name string, t *testing.T) {
}
func TestScanfMulti(t *testing.T) {
testScanfMulti("StringReader", t)
}
func TestMyReaderScanfMulti(t *testing.T) {
testScanfMulti("myStringReader", t)
for _, r := range readers {
t.Run(r.name, func(t *testing.T) {
testScanfMulti(t, r.f)
})
}
}
func TestScanMultiple(t *testing.T) {
@@ -818,20 +813,10 @@ func TestMultiLine(t *testing.T) {
}
}
// simpleReader is a strings.Reader that implements only Read, not ReadRune.
// Good for testing readahead.
type simpleReader struct {
sr *strings.Reader
}
func (s *simpleReader) Read(b []byte) (n int, err error) {
return s.sr.Read(b)
}
// TestLineByLineFscanf tests that Fscanf does not read past newline. Issue
// 3481.
func TestLineByLineFscanf(t *testing.T) {
r := &simpleReader{strings.NewReader("1\n2\n")}
r := struct{ io.Reader }{strings.NewReader("1\n2\n")}
var i, j int
n, err := Fscanf(r, "%v\n", &i)
if n != 1 || err != nil {
@@ -1000,7 +985,7 @@ func BenchmarkScanRecursiveIntReaderWrapper(b *testing.B) {
ints := makeInts(intCount)
var r RecursiveInt
for i := b.N - 1; i >= 0; i-- {
buf := newReader(string(ints))
buf := struct{ io.Reader }{strings.NewReader(string(ints))}
b.StartTimer()
Fscan(buf, &r)
b.StopTimer()
View
@@ -303,11 +303,11 @@ func TestImportVendor(t *testing.T) {
testenv.MustHaveGoBuild(t) // really must just have source
ctxt := Default
ctxt.GOPATH = ""
p, err := ctxt.Import("golang.org/x/net/http2/hpack", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
p, err := ctxt.Import("golang_org/x/net/http2/hpack", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
if err != nil {
t.Fatalf("cannot find vendored golang.org/x/net/http2/hpack from net/http directory: %v", err)
t.Fatalf("cannot find vendored golang_org/x/net/http2/hpack from net/http directory: %v", err)
}
want := "vendor/golang.org/x/net/http2/hpack"
want := "vendor/golang_org/x/net/http2/hpack"
if p.ImportPath != want {
t.Fatalf("Import succeeded but found %q, want %q", p.ImportPath, want)
}
@@ -333,7 +333,7 @@ func TestImportVendorParentFailure(t *testing.T) {
ctxt := Default
ctxt.GOPATH = ""
// This import should fail because the vendor/golang.org/x/net/http2 directory has no source code.
p, err := ctxt.Import("golang.org/x/net/http2", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
p, err := ctxt.Import("golang_org/x/net/http2", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
if err == nil {
t.Fatalf("found empty parent in %s", p.Dir)
}
View
@@ -297,7 +297,7 @@ var pkgDeps = map[string][]string{
"context", "math/rand", "os", "sort", "syscall", "time",
"internal/nettrace",
"internal/syscall/windows", "internal/singleflight", "internal/race",
"golang.org/x/net/route",
"golang_org/x/net/route",
},
// NET enables use of basic network-related packages.
@@ -378,8 +378,8 @@ var pkgDeps = map[string][]string{
"context", "compress/gzip", "container/list", "crypto/tls",
"mime/multipart", "runtime/debug",
"net/http/internal",
"golang.org/x/net/http2/hpack",
"golang.org/x/net/lex/httplex",
"golang_org/x/net/http2/hpack",
"golang_org/x/net/lex/httplex",
"internal/nettrace",
"net/http/httptrace",
},
@@ -443,7 +443,7 @@ func listStdPkgs(goroot string) ([]string, error) {
}
name := filepath.ToSlash(path[len(src):])
if name == "builtin" || name == "cmd" || strings.Contains(name, ".") {
if name == "builtin" || name == "cmd" || strings.Contains(name, "golang_org") {
return filepath.SkipDir
}
View
@@ -0,0 +1,47 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rand
import (
"sync"
"testing"
)
// TestConcurrent exercises the rand API concurrently, triggering situations
// where the race detector is likely to detect issues.
func TestConcurrent(t *testing.T) {
const (
numRoutines = 10
numCycles = 10
)
var wg sync.WaitGroup
defer wg.Wait()
wg.Add(numRoutines)
for i := 0; i < numRoutines; i++ {
go func(i int) {
defer wg.Done()
buf := make([]byte, 997)
for j := 0; j < numCycles; j++ {
var seed int64
seed += int64(ExpFloat64())
seed += int64(Float32())
seed += int64(Float64())
seed += int64(Intn(Int()))
seed += int64(Int31n(Int31()))
seed += int64(Int63n(Int63()))
seed += int64(NormFloat64())
seed += int64(Uint32())
for _, p := range Perm(10) {
seed += int64(p)
}
Read(buf)
for _, b := range buf {
seed += int64(b)
}
Seed(int64(i*j) * seed)
}
}(i)
}
}
View
@@ -49,7 +49,13 @@ type Rand struct {
func New(src Source) *Rand { return &Rand{src: src} }
// Seed uses the provided seed value to initialize the generator to a deterministic state.
// Seed should not be called concurrently with any other Rand method.
func (r *Rand) Seed(seed int64) {
if lk, ok := r.src.(*lockedSource); ok {
lk.seedPos(seed, &r.readPos)
return
}
r.src.Seed(seed)
r.readPos = 0
}
@@ -172,20 +178,28 @@ func (r *Rand) Perm(n int) []int {
// Read generates len(p) random bytes and writes them into p. It
// always returns len(p) and a nil error.
// Read should not be called concurrently with any other Rand method.
func (r *Rand) Read(p []byte) (n int, err error) {
pos := r.readPos
val := r.readVal
if lk, ok := r.src.(*lockedSource); ok {
return lk.read(p, &r.readVal, &r.readPos)
}
return read(p, r.Int63, &r.readVal, &r.readPos)
}
func read(p []byte, int63 func() int64, readVal *int64, readPos *int8) (n int, err error) {
pos := *readPos
val := *readVal
for n = 0; n < len(p); n++ {
if pos == 0 {
val = r.Int63()
val = int63()
pos = 7
}
p[n] = byte(val)
val >>= 8
pos--
}
r.readPos = pos
r.readVal = val
*readPos = pos
*readVal = val
return
}
@@ -199,6 +213,7 @@ var globalRand = New(&lockedSource{src: NewSource(1)})
// deterministic state. If Seed is not called, the generator behaves as
// if seeded by Seed(1). Seed values that have the same remainder when
// divided by 2^31-1 generate the same pseudo-random sequence.
// Seed, unlike the Rand.Seed method, is safe for concurrent use.
func Seed(seed int64) { globalRand.Seed(seed) }
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64
@@ -245,6 +260,7 @@ func Perm(n int) []int { return globalRand.Perm(n) }
// Read generates len(p) random bytes from the default Source and
// writes them into p. It always returns len(p) and a nil error.
// Read, unlike the Rand.Read method, is safe for concurrent use.
func Read(p []byte) (n int, err error) { return globalRand.Read(p) }
// NormFloat64 returns a normally distributed float64 in the range
@@ -285,3 +301,19 @@ func (r *lockedSource) Seed(seed int64) {
r.src.Seed(seed)
r.lk.Unlock()
}
// seedPos implements Seed for a lockedSource without a race condiiton.
func (r *lockedSource) seedPos(seed int64, readPos *int8) {
r.lk.Lock()
r.src.Seed(seed)
*readPos = 0
r.lk.Unlock()
}
// read implements Read for a lockedSource without a race condition.
func (r *lockedSource) read(p []byte, readVal *int64, readPos *int8) (n int, err error) {
r.lk.Lock()
n, err = read(p, r.src.Int63, readVal, readPos)
r.lk.Unlock()
return
}
View
@@ -695,6 +695,11 @@ func TestDialerLocalAddr(t *testing.T) {
}
func TestDialerDualStack(t *testing.T) {
// This test is known to be flaky. Don't frighten regular
// users about it; only fail on the build dashboard.
if testenv.Builder() == "" {
testenv.SkipFlaky(t, 13324)
}
if !supportsIPv4 || !supportsIPv6 {
t.Skip("both IPv4 and IPv6 are required")
}
View

Some generated files are not rendered by default. Learn more.

Oops, something went wrong.
View
@@ -7,7 +7,7 @@ package http
import (
"strings"
"golang.org/x/net/lex/httplex"
"golang_org/x/net/lex/httplex"
)
// maxInt64 is the effective "infinite" value for the Server and
View
@@ -28,7 +28,7 @@ import (
"sync/atomic"
"time"
"golang.org/x/net/lex/httplex"
"golang_org/x/net/lex/httplex"
)
// Errors used by the HTTP server.
@@ -775,9 +775,6 @@ func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
return nil, badRequestError("unsupported protocol version")
}
ctx, cancelCtx := context.WithCancel(ctx)
req.ctx = ctx
c.lastMethod = req.Method
c.r.setInfiniteReadLimit()
@@ -804,6 +801,8 @@ func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
}
delete(req.Header, "Host")
ctx, cancelCtx := context.WithCancel(ctx)
req.ctx = ctx
req.RemoteAddr = c.remoteAddr
req.TLS = c.tlsState
if body, ok := req.Body.(*body); ok {
View
@@ -18,7 +18,7 @@ import (
"strings"
"sync"
"golang.org/x/net/lex/httplex"
"golang_org/x/net/lex/httplex"
)
// ErrLineTooLong is returned when reading request or response bodies
View
@@ -27,7 +27,7 @@ import (
"sync"
"time"
"golang.org/x/net/lex/httplex"
"golang_org/x/net/lex/httplex"
)
// DefaultTransport is the default implementation of Transport and is
View
@@ -9,7 +9,7 @@ package net
import (
"syscall"
"golang.org/x/net/route"
"golang_org/x/net/route"
)
// If the ifindex is zero, interfaceTable returns mappings of all
View
@@ -9,7 +9,7 @@ package net
import (
"syscall"
"golang.org/x/net/route"
"golang_org/x/net/route"
)
func interfaceMessages(ifindex int) ([]route.Message, error) {
View
@@ -7,7 +7,7 @@ package net
import (
"syscall"
"golang.org/x/net/route"
"golang_org/x/net/route"
)
func interfaceMessages(ifindex int) ([]route.Message, error) {
View
@@ -7,7 +7,7 @@ package net
import (
"syscall"
"golang.org/x/net/route"
"golang_org/x/net/route"
)
func interfaceMessages(ifindex int) ([]route.Message, error) {
View
@@ -8,6 +8,11 @@
// AUTH RFC 2554
// STARTTLS RFC 3207
// Additional extensions may be handled by clients.
//
// The smtp package is frozen and not accepting new features.
// Some external packages provide more functionality. See:
//
// https://godoc.org/?q=smtp
package smtp
import (
View
@@ -2261,6 +2261,8 @@ func TestImportPath(t *testing.T) {
{TypeOf((*int64)(nil)), ""},
{TypeOf(map[string]int{}), ""},
{TypeOf((*error)(nil)).Elem(), ""},
{TypeOf((*Point)(nil)), ""},
{TypeOf((*Point)(nil)).Elem(), "reflect_test"},
}
for _, test := range tests {
if path := test.t.PkgPath(); path != test.path {
View
@@ -876,6 +876,9 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) {
}
func (t *rtype) PkgPath() string {
if t.tflag&tflagNamed == 0 {
return ""
}
ut := t.uncommon()
if ut == nil {
return ""
View
@@ -44,7 +44,7 @@
// call arbitrary Go code directly and must be careful not to allocate
// memory or use up m->g0's stack.
//
// _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize).
// _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize, ctxt).
// (The reason for having _cgoexp_GoF instead of writing a crosscall3
// to make this call directly is that _cgoexp_GoF, because it is compiled
// with 6c instead of gcc, can refer to dotted names like
@@ -80,6 +80,7 @@
package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
@@ -176,7 +177,7 @@ func cgocallbackg(ctxt uintptr) {
func cgocallbackg1(ctxt uintptr) {
gp := getg()
if gp.m.needextram {
if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 {
gp.m.needextram = false
systemstack(newextram)
}
View
@@ -32,13 +32,13 @@ TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-8
TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8
JMP runtime∕internal∕atomic·Store(SB)
TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-8
TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-12
JMP runtime∕internal∕atomic·Xadd(SB)
TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-12
JMP runtime∕internal∕atomic·Load64(SB)
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-20
JMP runtime∕internal∕atomic·Xadd64(SB)
View
@@ -52,7 +52,7 @@ TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
JMP runtime∕internal∕atomic·Load64(SB)
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
JMP runtime∕internal∕atomic·Xadd64(SB)
// bool Casp(void **val, void *old, void *new)
View
@@ -29,10 +29,10 @@ TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-12
TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-12
JMP runtime∕internal∕atomic·Load(SB)
TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-12
TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8
JMP runtime∕internal∕atomic·Store(SB)
TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-24
TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
JMP runtime∕internal∕atomic·Load64(SB)
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
View
@@ -61,11 +61,11 @@ TEXT runtime∕internal∕atomic·Loaduint(SB),NOSPLIT,$0-8
TEXT runtime∕internal∕atomic·Storeuintptr(SB),NOSPLIT,$0-8
B runtime∕internal∕atomic·Store(SB)
TEXT runtime∕internal∕atomic·Xadduintptr(SB),NOSPLIT,$0-8
TEXT runtime∕internal∕atomic·Xadduintptr(SB),NOSPLIT,$0-12
B runtime∕internal∕atomic·Xadd(SB)
TEXT runtime∕internal∕atomic·Loadint64(SB),NOSPLIT,$0-16
TEXT runtime∕internal∕atomic·Loadint64(SB),NOSPLIT,$0-12
B runtime∕internal∕atomic·Load64(SB)
TEXT runtime∕internal∕atomic·Xaddint64(SB),NOSPLIT,$0-16
TEXT runtime∕internal∕atomic·Xaddint64(SB),NOSPLIT,$0-20
B runtime∕internal∕atomic·Xadd64(SB)
View
@@ -38,13 +38,13 @@ TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $-8-16
TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
B runtime∕internal∕atomic·Store64(SB)
TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-16
TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
B runtime∕internal∕atomic·Xadd64(SB)
TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
B runtime∕internal∕atomic·Load64(SB)
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
B runtime∕internal∕atomic·Xadd64(SB)
// bool Casp(void **val, void *old, void *new)
View
@@ -77,7 +77,7 @@ TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
BR runtime∕internal∕atomic·Load64(SB)
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
BR runtime∕internal∕atomic·Xadd64(SB)
// bool casp(void **val, void *old, void *new)
View
@@ -145,7 +145,7 @@ func writebarrierptr(dst *uintptr, src uintptr) {
if !writeBarrier.needed {
return
}
if src != 0 && src < sys.PhysPageSize {
if src != 0 && src < minPhysPageSize {
systemstack(func() {
print("runtime: writebarrierptr *", dst, " = ", hex(src), "\n")
throw("bad pointer in write barrier")
@@ -164,7 +164,7 @@ func writebarrierptr_nostore(dst *uintptr, src uintptr) {
if !writeBarrier.needed {
return
}
if src != 0 && src < sys.PhysPageSize {
if src != 0 && src < minPhysPageSize {
systemstack(func() { throw("bad pointer in write barrier") })
}
writebarrierptr_nostore1(dst, src)
View
@@ -10,8 +10,8 @@ import (
)
const (
_PAGE_SIZE = sys.PhysPageSize
_EACCES = 13
_EACCES = 13
_EINVAL = 22
)
// NOTE: vec must be just 1 byte long here.
@@ -22,13 +22,19 @@ const (
var addrspace_vec [1]byte
func addrspace_free(v unsafe.Pointer, n uintptr) bool {
var chunk uintptr
for off := uintptr(0); off < n; off += chunk {
chunk = _PAGE_SIZE * uintptr(len(addrspace_vec))
if chunk > (n - off) {
chunk = n - off
// Step by the minimum possible physical page size. This is
// safe even if we have the wrong physical page size; mincore
// will just return EINVAL for unaligned addresses.
for off := uintptr(0); off < n; off += minPhysPageSize {
// Use a length of 1 byte, which the kernel will round
// up to one physical page regardless of the true
// physical page size.
errval := mincore(unsafe.Pointer(uintptr(v)+off), 1, &addrspace_vec[0])
if errval == -_EINVAL {
// Address is not a multiple of the physical
// page size. That's fine.
continue
}
errval := mincore(unsafe.Pointer(uintptr(v)+off), chunk, &addrspace_vec[0])
// ENOMEM means unmapped, which is what we want.
// Anything else we assume means the pages are mapped.
if errval != -_ENOMEM {
View
@@ -450,7 +450,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
// type File struct { d int }
// d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
// // ... do something if err != nil ...
// p := &FILE{d}
// p := &File{d}
// runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
// var buf [10]byte
// n, err := syscall.Read(p.d, buf[:])
View
@@ -14,6 +14,11 @@ import (
"unsafe"
)
// minPhysPageSize is a lower-bound on the physical page size. The
// true physical page size may be larger than this. In contrast,
// sys.PhysPageSize is an upper-bound on the physical page size.
const minPhysPageSize = 4096
// Main malloc heap.
// The heap itself is the "free[]" and "large" arrays,
// but all the other global data is here too.
View
@@ -246,7 +246,7 @@ func memlimit() uintptr {
//go:norace
//go:nowritebarrierrec
func badsignal(sig uintptr) {
cgocallback(unsafe.Pointer(funcPC(badsignalgo)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(badsignalgo)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
func badsignalgo(sig uintptr) {
View
@@ -353,12 +353,9 @@ func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
if name == "" {
show = true
fmt.Fprintf(w, "#\t%#x\n", frame.PC)
} else {
} else if name != "runtime.goexit" && (show || !strings.HasPrefix(name, "runtime.")) {
// Hide runtime.goexit and any runtime functions at the beginning.
// This is useful mainly for allocation traces.
if name == "runtime.goexit" || !show && strings.HasPrefix(name, "runtime.") {
continue
}
show = true
fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", frame.PC, name, frame.PC-frame.Entry, frame.File, frame.Line)
}
View
@@ -497,6 +497,10 @@ func TestBlockProfile(t *testing.T) {
t.Fatalf("Bad profile header:\n%v", prof)
}
if strings.HasSuffix(prof, "#\t0x0\n\n") {
t.Errorf("Useless 0 suffix:\n%v", prof)
}
for _, test := range tests {
if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) {
t.Fatalf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
View
@@ -1389,10 +1389,27 @@ func needm(x byte) {
var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
// newextram allocates an m and puts it on the extra list.
// newextram allocates m's and puts them on the extra list.
// It is called with a working local m, so that it can do things
// like call schedlock and allocate.
func newextram() {
c := atomic.Xchg(&extraMWaiters, 0)
if c > 0 {
for i := uint32(0); i < c; i++ {
oneNewExtraM()
}
} else {
// Make sure there is at least one extra M.
mp := lockextra(true)
unlockextra(mp)
if mp == nil {
oneNewExtraM()
}
}
}
// oneNewExtraM allocates an m and puts it on the extra list.
func oneNewExtraM() {
// Create extra goroutine locked to extra m.
// The goroutine is the context in which the cgo callback will run.
// The sched.pc will never be returned to, but setting it to
@@ -1485,6 +1502,7 @@ func getm() uintptr {
}
var extram uintptr
var extraMWaiters uint32
// lockextra locks the extra list and returns the list head.
// The caller must unlock the list by storing a new list head
@@ -1495,6 +1513,7 @@ var extram uintptr
func lockextra(nilokay bool) *m {
const locked = 1
incr := false
for {
old := atomic.Loaduintptr(&extram)
if old == locked {
@@ -1503,6 +1522,13 @@ func lockextra(nilokay bool) *m {
continue
}
if old == 0 && !nilokay {
if !incr {
// Add 1 to the number of threads
// waiting for an M.
// This is cleared by newextram.
atomic.Xadd(&extraMWaiters, 1)
incr = true
}
usleep(1)
continue
}
View
@@ -4,4 +4,4 @@ the LLVM project (http://llvm.org/git/compiler-rt.git).
To update the .syso files use golang.org/x/build/cmd/racebuild.
Current runtime is built on rev 9d79ea3416bfbe3acac50e47802ee9621bf53254.
Current runtime is built on rev e35e7c00b5c7e7ee5e24d537b80cb0d34cebb038.
View
Binary file not shown.
View
Binary file not shown.
View
Binary file not shown.
View
@@ -221,3 +221,21 @@ func BenchmarkSyncLeak(b *testing.B) {
}
wg.Wait()
}
func BenchmarkStackLeak(b *testing.B) {
done := make(chan bool, 1)
for i := 0; i < b.N; i++ {
go func() {
growStack(rand.Intn(100))
done <- true
}()
<-done
}
}
func growStack(i int) {
if i == 0 {
return
}
growStack(i - 1)
}
View
Binary file not shown.
View
@@ -338,7 +338,7 @@ func sigNotOnStack(sig uint32) {
//go:norace
//go:nowritebarrierrec
func badsignal(sig uintptr, c *sigctxt) {
cgocallback(unsafe.Pointer(funcPC(badsignalgo)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig)+unsafe.Sizeof(c))
cgocallback(unsafe.Pointer(funcPC(badsignalgo)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig)+unsafe.Sizeof(c), 0)
}
func badsignalgo(sig uintptr, c *sigctxt) {
View
@@ -70,12 +70,12 @@ func sigtrampgo(fn uintptr, infostyle, sig uint32, info *siginfo, ctx unsafe.Poi
sigaltstack(nil, &st)
if st.ss_flags&_SS_DISABLE != 0 {
setg(nil)
cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
stsp := uintptr(unsafe.Pointer(st.ss_sp))
if sp < stsp || sp >= stsp+st.ss_size {
setg(nil)
cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
g.m.gsignal.stack.lo = stsp
g.m.gsignal.stack.hi = stsp + st.ss_size
View
@@ -66,12 +66,12 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
sigaltstack(nil, &st)
if st.ss_flags&_SS_DISABLE != 0 {
setg(nil)
cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
stsp := uintptr(unsafe.Pointer(st.ss_sp))
if sp < stsp || sp >= stsp+st.ss_size {
setg(nil)
cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
g.m.gsignal.stack.lo = stsp
g.m.gsignal.stack.hi = stsp + st.ss_size
View
@@ -66,12 +66,12 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
sigaltstack(nil, &st)
if st.ss_flags&_SS_DISABLE != 0 {
setg(nil)
cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
stsp := uintptr(unsafe.Pointer(st.ss_sp))
if sp < stsp || sp >= stsp+st.ss_size {
setg(nil)
cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
g.m.gsignal.stack.lo = stsp
g.m.gsignal.stack.hi = stsp + st.ss_size
View
@@ -37,12 +37,12 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
sigaltstack(nil, &st)
if st.ss_flags&_SS_DISABLE != 0 {
setg(nil)
cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
stsp := uintptr(unsafe.Pointer(st.ss_sp))
if sp < stsp || sp >= stsp+st.ss_size {
setg(nil)
cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig), 0)
}
g.m.gsignal.stack.lo = stsp
g.m.gsignal.stack.hi = stsp + st.ss_size
View
@@ -98,7 +98,7 @@ func noescape(p unsafe.Pointer) unsafe.Pointer {
return unsafe.Pointer(x ^ 0)
}
func cgocallback(fn, frame unsafe.Pointer, framesize uintptr)
func cgocallback(fn, frame unsafe.Pointer, framesize, ctxt uintptr)
func gogo(buf *gobuf)
func gosave(buf *gobuf)
func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
@@ -143,7 +143,7 @@ func goexit(neverCallThisFunction)
// cgocallback_gofunc is not called from go, only from cgocallback,
// so the arguments will be found via cgocallback's pointer-declared arguments.
// See the assembly implementations for more details.
func cgocallback_gofunc(fv uintptr, frame uintptr, framesize uintptr)
func cgocallback_gofunc(fv uintptr, frame uintptr, framesize, ctxt uintptr)
// publicationBarrier performs a store/store barrier (a "publication"
// or "export" barrier). Some form of synchronization is required
View
@@ -244,6 +244,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$32
MOVQ R8, 24(SP) // ctx
MOVQ $runtime·sigtrampgo(SB), AX
CALL AX
INT $3 // not reached (see issue 16453)
TEXT runtime·mmap(SB),NOSPLIT,$0
MOVQ addr+0(FP), DI // arg 1 addr
View
@@ -238,6 +238,7 @@ func TestGroupCleanupUserNamespace(t *testing.T) {
"uid=0(root) gid=0(root) groups=0(root)",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody)",
"uid=0(root) gid=0(root) groups=0(root),65534(nogroup)",
"uid=0(root) gid=0(root) groups=0(root),65534",
}
for _, e := range expected {
if strOut == e {
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.
View
File renamed without changes.