diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1018652d0..fe36b76d1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ jobs: strategy: matrix: os: ["windows-latest", "ubuntu-latest", "macOS-latest"] - go: ["1.12.x", "1.13.x"] + go: ["1.13.x", "1.14.x"] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v1 diff --git a/cmd/staticcheck/staticcheck.go b/cmd/staticcheck/staticcheck.go index 4f504dc39..87bed55c0 100644 --- a/cmd/staticcheck/staticcheck.go +++ b/cmd/staticcheck/staticcheck.go @@ -6,7 +6,6 @@ import ( "os" "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/lint" "honnef.co/go/tools/lint/lintutil" "honnef.co/go/tools/simple" "honnef.co/go/tools/staticcheck" @@ -16,7 +15,6 @@ import ( func main() { fs := lintutil.FlagSet("staticcheck") - wholeProgram := fs.Bool("unused.whole-program", false, "Run unused in whole program mode") debug := fs.String("debug.unused-graph", "", "Write unused's object graph to `file`") fs.Parse(os.Args[1:]) @@ -31,14 +29,14 @@ func main() { cs = append(cs, v) } - u := unused.NewChecker(*wholeProgram) + cs = append(cs, unused.Analyzer) if *debug != "" { f, err := os.OpenFile(*debug, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { log.Fatal(err) } - u.Debug = f + unused.Debug = f } - cums := []lint.CumulativeChecker{u} - lintutil.ProcessFlagSet(cs, cums, fs) + + lintutil.ProcessFlagSet(cs, fs) } diff --git a/code/code.go b/code/code.go index 6f4df8b9a..5ef7aef4d 100644 --- a/code/code.go +++ b/code/code.go @@ -2,6 +2,7 @@ package code import ( + "bytes" "flag" "fmt" "go/ast" @@ -9,6 +10,7 @@ import ( "go/token" "go/types" "strings" + "sync" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" @@ -17,9 +19,55 @@ import ( "honnef.co/go/tools/facts" "honnef.co/go/tools/go/types/typeutil" "honnef.co/go/tools/ir" - "honnef.co/go/tools/lint" ) +var bufferPool = &sync.Pool{ + New: func() interface{} { + buf := bytes.NewBuffer(nil) + buf.Grow(64) + return buf + }, +} + +func FuncName(f *types.Func) string { + buf := bufferPool.Get().(*bytes.Buffer) + buf.Reset() + if f.Type() != nil { + sig := f.Type().(*types.Signature) + if recv := sig.Recv(); recv != nil { + buf.WriteByte('(') + if _, ok := recv.Type().(*types.Interface); ok { + // gcimporter creates abstract methods of + // named interfaces using the interface type + // (not the named type) as the receiver. + // Don't print it in full. + buf.WriteString("interface") + } else { + types.WriteType(buf, recv.Type(), nil) + } + buf.WriteByte(')') + buf.WriteByte('.') + } else if f.Pkg() != nil { + writePackage(buf, f.Pkg()) + } + } + buf.WriteString(f.Name()) + s := buf.String() + bufferPool.Put(buf) + return s +} + +func writePackage(buf *bytes.Buffer, pkg *types.Package) { + if pkg == nil { + return + } + s := pkg.Path() + if s != "" { + buf.WriteString(s) + buf.WriteByte('.') + } +} + type Positioner interface { Pos() token.Pos } @@ -34,7 +82,7 @@ func CallName(call *ir.CallCommon) string { if !ok { return "" } - return lint.FuncName(fn) + return FuncName(fn) case *ir.Builtin: return v.Name() } @@ -244,12 +292,12 @@ func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string { if !ok { return "" } - return lint.FuncName(fn) + return FuncName(fn) case *ast.Ident: obj := pass.TypesInfo.ObjectOf(fun) switch obj := obj.(type) { case *types.Func: - return lint.FuncName(obj) + return FuncName(obj) case *types.Builtin: return obj.Name() default: @@ -472,7 +520,11 @@ func MayHaveSideEffects(pass *analysis.Pass, expr ast.Expr, purity facts.PurityR } func IsGoVersion(pass *analysis.Pass, minor int) bool { - version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int) + f, ok := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter) + if !ok { + panic("requested Go version, but analyzer has no version flag") + } + version := f.Get().(int) return version >= minor } diff --git a/facts/directives.go b/facts/directives.go new file mode 100644 index 000000000..04cee52aa --- /dev/null +++ b/facts/directives.go @@ -0,0 +1,107 @@ +package facts + +import ( + "go/ast" + "go/token" + "path/filepath" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// A directive is a comment of the form '//lint: +// [arguments...]'. It represents instructions to the static analysis +// tool. +type Directive struct { + Command string + Arguments []string + Directive *ast.Comment + Node ast.Node +} + +type SerializedDirective struct { + Command string + Arguments []string + // The position of the comment + DirectivePosition token.Position + // The position of the node that the comment is attached to + NodePosition token.Position +} + +func parseDirective(s string) (cmd string, args []string) { + if !strings.HasPrefix(s, "//lint:") { + return "", nil + } + s = strings.TrimPrefix(s, "//lint:") + fields := strings.Split(s, " ") + return fields[0], fields[1:] +} + +func directives(pass *analysis.Pass) (interface{}, error) { + return ParseDirectives(pass.Files, pass.Fset), nil +} + +func ParseDirectives(files []*ast.File, fset *token.FileSet) []Directive { + var dirs []Directive + for _, f := range files { + // OPT(dh): in our old code, we skip all the commentmap work if we + // couldn't find any directives, benchmark if that's actually + // worth doing + cm := ast.NewCommentMap(fset, f, f.Comments) + for node, cgs := range cm { + for _, cg := range cgs { + for _, c := range cg.List { + if !strings.HasPrefix(c.Text, "//lint:") { + continue + } + cmd, args := parseDirective(c.Text) + d := Directive{ + Command: cmd, + Arguments: args, + Directive: c, + Node: node, + } + dirs = append(dirs, d) + } + } + } + } + return dirs +} + +// duplicated from report.DisplayPosition to break import cycle +func displayPosition(fset *token.FileSet, p token.Pos) token.Position { + if p == token.NoPos { + return token.Position{} + } + + // Only use the adjusted position if it points to another Go file. + // This means we'll point to the original file for cgo files, but + // we won't point to a YACC grammar file. + pos := fset.PositionFor(p, false) + adjPos := fset.PositionFor(p, true) + + if filepath.Ext(adjPos.Filename) == ".go" { + return adjPos + } + + return pos +} + +var Directives = &analysis.Analyzer{ + Name: "directives", + Doc: "extracts linter directives", + Run: directives, + RunDespiteErrors: true, + ResultType: reflect.TypeOf([]Directive{}), +} + +func SerializeDirective(dir Directive, fset *token.FileSet) SerializedDirective { + return SerializedDirective{ + Command: dir.Command, + Arguments: dir.Arguments, + DirectivePosition: displayPosition(fset, dir.Directive.Pos()), + NodePosition: displayPosition(fset, dir.Node.Pos()), + } +} diff --git a/go/types/typeutil/identical.go b/go/types/typeutil/identical.go index c0ca441c3..0cd82e8c0 100644 --- a/go/types/typeutil/identical.go +++ b/go/types/typeutil/identical.go @@ -4,23 +4,80 @@ import ( "go/types" ) -// Identical reports whether x and y are identical types. // Unlike types.Identical, receivers of Signature types are not ignored. // Unlike types.Identical, interfaces are compared via pointer equality (except for the empty interface, which gets deduplicated). // Unlike types.Identical, structs are compared via pointer equality. -func Identical(x, y types.Type) (ret bool) { - if !types.Identical(x, y) { - return false +func identical0(x, y types.Type) bool { + if x == y { + return true } switch x := x.(type) { + case *types.Basic: + // Basic types are singletons except for the rune and byte + // aliases, thus we cannot solely rely on the x == y check + // above. See also comment in TypeName.IsAlias. + if y, ok := y.(*types.Basic); ok { + return x.Kind() == y.Kind() + } + + case *types.Array: + // Two array types are identical if they have identical element types + // and the same array length. + if y, ok := y.(*types.Array); ok { + // If one or both array lengths are unknown (< 0) due to some error, + // assume they are the same to avoid spurious follow-on errors. + return (x.Len() < 0 || y.Len() < 0 || x.Len() == y.Len()) && identical0(x.Elem(), y.Elem()) + } + + case *types.Slice: + // Two slice types are identical if they have identical element types. + if y, ok := y.(*types.Slice); ok { + return identical0(x.Elem(), y.Elem()) + } + case *types.Struct: - y, ok := y.(*types.Struct) - if !ok { - // should be impossible - return true + if y, ok := y.(*types.Struct); ok { + return x == y + } + + case *types.Pointer: + // Two pointer types are identical if they have identical base types. + if y, ok := y.(*types.Pointer); ok { + return identical0(x.Elem(), y.Elem()) + } + + case *types.Tuple: + // Two tuples types are identical if they have the same number of elements + // and corresponding elements have identical types. + if y, ok := y.(*types.Tuple); ok { + if x.Len() == y.Len() { + if x != nil { + for i := 0; i < x.Len(); i++ { + v := x.At(i) + w := y.At(i) + if !identical0(v.Type(), w.Type()) { + return false + } + } + } + return true + } + } + + case *types.Signature: + // Two function types are identical if they have the same number of parameters + // and result values, corresponding parameter and result types are identical, + // and either both functions are variadic or neither is. Parameter and result + // names are not required to match. + if y, ok := y.(*types.Signature); ok { + + return x.Variadic() == y.Variadic() && + identical0(x.Params(), y.Params()) && + identical0(x.Results(), y.Results()) && + (x.Recv() != nil && y.Recv() != nil && identical0(x.Recv().Type(), y.Recv().Type()) || x.Recv() == nil && y.Recv() == nil) } - return x == y + case *types.Interface: // The issue with interfaces, typeutil.Map and types.Identical // @@ -43,33 +100,50 @@ func Identical(x, y types.Type) (ret bool) { // pointers. This will obviously miss identical interfaces, // but this only has a runtime cost, it doesn't affect // correctness. - y, ok := y.(*types.Interface) - if !ok { - // should be impossible - return true - } - if x.NumEmbeddeds() == 0 && - y.NumEmbeddeds() == 0 && - x.NumMethods() == 0 && - y.NumMethods() == 0 { - // all truly empty interfaces are the same - return true + if y, ok := y.(*types.Interface); ok { + if x.NumEmbeddeds() == 0 && + y.NumEmbeddeds() == 0 && + x.NumMethods() == 0 && + y.NumMethods() == 0 { + // all truly empty interfaces are the same + return true + } + return x == y } - return x == y - case *types.Signature: - y, ok := y.(*types.Signature) - if !ok { - // should be impossible - return true + + case *types.Map: + // Two map types are identical if they have identical key and value types. + if y, ok := y.(*types.Map); ok { + return identical0(x.Key(), y.Key()) && identical0(x.Elem(), y.Elem()) } - if x.Recv() == y.Recv() { - return true + + case *types.Chan: + // Two channel types are identical if they have identical value types + // and the same direction. + if y, ok := y.(*types.Chan); ok { + return x.Dir() == y.Dir() && identical0(x.Elem(), y.Elem()) } - if x.Recv() == nil || y.Recv() == nil { - return false + + case *types.Named: + // Two named types are identical if their type names originate + // in the same type declaration. + if y, ok := y.(*types.Named); ok { + return x.Obj() == y.Obj() } - return Identical(x.Recv().Type(), y.Recv().Type()) + + case nil: + default: - return true + panic("unreachable") } + + return false +} + +// Identical reports whether x and y are identical types. +// Unlike types.Identical, receivers of Signature types are not ignored. +// Unlike types.Identical, interfaces are compared via pointer equality (except for the empty interface, which gets deduplicated). +// Unlike types.Identical, structs are compared via pointer equality. +func Identical(x, y types.Type) (ret bool) { + return identical0(x, y) } diff --git a/internal/cache/cache.go b/internal/cache/cache.go index 6b41811cf..cfd4241f9 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -77,7 +77,7 @@ func (c *Cache) fileName(id [HashSize]byte, key string) string { return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key) } -var errMissing = errors.New("cache entry not found") +var ErrMissing = errors.New("cache entry not found") const ( // action entry file is "v1 \n" @@ -124,7 +124,7 @@ func initEnv() { // saved file for that output ID is still available. func (c *Cache) Get(id ActionID) (Entry, error) { if verify { - return Entry{}, errMissing + return Entry{}, ErrMissing } return c.get(id) } @@ -138,7 +138,7 @@ type Entry struct { // get is Get but does not respect verify mode, so that Put can use it. func (c *Cache) get(id ActionID) (Entry, error) { missing := func() (Entry, error) { - return Entry{}, errMissing + return Entry{}, ErrMissing } f, err := os.Open(c.fileName(id, "a")) if err != nil { @@ -196,7 +196,7 @@ func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) { file = c.OutputFile(entry.OutputID) info, err := os.Stat(file) if err != nil || info.Size() != entry.Size { - return "", Entry{}, errMissing + return "", Entry{}, ErrMissing } return file, entry, nil } @@ -211,7 +211,7 @@ func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { } data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID)) if sha256.Sum256(data) != entry.OutputID { - return nil, entry, errMissing + return nil, entry, ErrMissing } return data, entry, nil } diff --git a/internal/passes/buildir/buildir.go b/internal/passes/buildir/buildir.go index 394697702..884884f55 100644 --- a/internal/passes/buildir/buildir.go +++ b/internal/passes/buildir/buildir.go @@ -25,6 +25,9 @@ type willUnwind struct{} func (*willExit) AFact() {} func (*willUnwind) AFact() {} +func (*willExit) String() string { return "will exit" } +func (*willUnwind) String() string { return "will unwind" } + var Analyzer = &analysis.Analyzer{ Name: "buildir", Doc: "build IR for later passes", diff --git a/internal/sync/sync.go b/internal/sync/sync.go new file mode 100644 index 000000000..e78ad5072 --- /dev/null +++ b/internal/sync/sync.go @@ -0,0 +1,36 @@ +package sync + +type Semaphore struct { + ch chan struct{} +} + +func NewSemaphore(size int) Semaphore { + return Semaphore{ + ch: make(chan struct{}, size), + } +} + +func (sem Semaphore) Acquire() { + sem.ch <- struct{}{} +} + +func (sem Semaphore) AcquireMaybe() bool { + select { + case sem.ch <- struct{}{}: + return true + default: + return false + } +} + +func (sem Semaphore) Release() { + <-sem.ch +} + +func (sem Semaphore) Len() int { + return len(sem.ch) +} + +func (sem Semaphore) Cap() int { + return cap(sem.ch) +} diff --git a/lint/directives.go b/lint/directives.go new file mode 100644 index 000000000..1ca8d5acf --- /dev/null +++ b/lint/directives.go @@ -0,0 +1,56 @@ +package lint + +import ( + "strings" + + "honnef.co/go/tools/facts" + "honnef.co/go/tools/runner" +) + +func parseDirectives(dirs []facts.SerializedDirective) ([]ignore, []Problem) { + var ignores []ignore + var problems []Problem + + for _, dir := range dirs { + cmd := dir.Command + args := dir.Arguments + switch cmd { + case "ignore", "file-ignore": + if len(args) < 2 { + p := Problem{ + Diagnostic: runner.Diagnostic{ + Position: dir.NodePosition, + Message: "malformed linter directive; missing the required reason field?", + Category: "compile", + }, + Severity: Error, + } + problems = append(problems, p) + continue + } + default: + // unknown directive, ignore + continue + } + checks := strings.Split(args[0], ",") + pos := dir.NodePosition + var ig ignore + switch cmd { + case "ignore": + ig = &lineIgnore{ + File: pos.Filename, + Line: pos.Line, + Checks: checks, + Pos: dir.DirectivePosition, + } + case "file-ignore": + ig = &fileIgnore{ + File: pos.Filename, + Checks: checks, + } + } + ignores = append(ignores, ig) + } + + return ignores, problems +} diff --git a/lint/lint.go b/lint/lint.go index 1a70e0c29..b25680087 100644 --- a/lint/lint.go +++ b/lint/lint.go @@ -2,23 +2,23 @@ package lint // import "honnef.co/go/tools/lint" import ( - "bytes" - "encoding/gob" "fmt" - "go/scanner" "go/token" - "go/types" + "log" "path/filepath" + "reflect" + "regexp" "sort" + "strconv" "strings" - "sync" - "sync/atomic" "unicode" + "honnef.co/go/tools/config" + "honnef.co/go/tools/runner" + "honnef.co/go/tools/unused" + "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" - "honnef.co/go/tools/config" - "honnef.co/go/tools/internal/cache" ) type Documentation struct { @@ -55,11 +55,11 @@ func (doc *Documentation) String() string { return b.String() } -type Ignore interface { +type ignore interface { Match(p Problem) bool } -type LineIgnore struct { +type lineIgnore struct { File string Line int Checks []string @@ -67,13 +67,13 @@ type LineIgnore struct { Pos token.Position } -func (li *LineIgnore) Match(p Problem) bool { - pos := p.Pos +func (li *lineIgnore) Match(p Problem) bool { + pos := p.Position if pos.Filename != li.File || pos.Line != li.Line { return false } for _, c := range li.Checks { - if m, _ := filepath.Match(c, p.Check); m { + if m, _ := filepath.Match(c, p.Category); m { li.Matched = true return true } @@ -81,7 +81,7 @@ func (li *LineIgnore) Match(p Problem) bool { return false } -func (li *LineIgnore) String() string { +func (li *lineIgnore) String() string { matched := "not matched" if li.Matched { matched = "matched" @@ -89,17 +89,17 @@ func (li *LineIgnore) String() string { return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched) } -type FileIgnore struct { +type fileIgnore struct { File string Checks []string } -func (fi *FileIgnore) Match(p Problem) bool { - if p.Pos.Filename != fi.File { +func (fi *fileIgnore) Match(p Problem) bool { + if p.Position.Filename != fi.File { return false } for _, c := range fi.Checks { - if m, _ := filepath.Match(c, p.Check); m { + if m, _ := filepath.Match(c, p.Category); m { return true } } @@ -116,286 +116,262 @@ const ( // Problem represents a problem in some source code. type Problem struct { - Pos token.Position - End token.Position - Message string - Check string + runner.Diagnostic Severity Severity - Related []Related -} - -type Related struct { - Pos token.Position - End token.Position - Message string } -func (p Problem) Equal(o Problem) bool { - return p.Pos == o.Pos && +func (p Problem) equal(o Problem) bool { + return p.Position == o.Position && p.End == o.End && p.Message == o.Message && - p.Check == o.Check && + p.Category == o.Category && p.Severity == o.Severity } func (p *Problem) String() string { - return fmt.Sprintf("%s (%s)", p.Message, p.Check) + return fmt.Sprintf("%s (%s)", p.Message, p.Category) } // A Linter lints Go source code. type Linter struct { - Checkers []*analysis.Analyzer - CumulativeCheckers []CumulativeChecker - GoVersion int - Config config.Config - Stats Stats - RepeatAnalyzers uint + Checkers []*analysis.Analyzer + GoVersion int + Config config.Config + Runner *runner.Runner } -type CumulativeChecker interface { - Analyzer() *analysis.Analyzer - Result() []types.Object - ProblemObject(*token.FileSet, types.Object) Problem -} +func failed(res runner.Result) []Problem { + var problems []Problem -func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) { - var allAnalyzers []*analysis.Analyzer - allAnalyzers = append(allAnalyzers, l.Checkers...) - for _, cum := range l.CumulativeCheckers { - allAnalyzers = append(allAnalyzers, cum.Analyzer()) - } + log.Println(reflect.TypeOf(res.Errors[0])) + for _, e := range res.Errors { + switch e := e.(type) { + case packages.Error: + msg := e.Msg + if len(msg) != 0 && msg[0] == '\n' { + // TODO(dh): See https://github.com/golang/go/issues/32363 + msg = msg[1:] + } - // The -checks command line flag overrules all configuration - // files, which means that for `-checks="foo"`, no check other - // than foo can ever be reported to the user. Make use of this - // fact to cull the list of analyses we need to run. - - // replace "inherit" with "all", as we don't want to base the - // list of all checks on the default configuration, which - // disables certain checks. - checks := make([]string, len(l.Config.Checks)) - copy(checks, l.Config.Checks) - for i, c := range checks { - if c == "inherit" { - checks[i] = "all" + var posn token.Position + if e.Pos == "" { + // Under certain conditions (malformed package + // declarations, multiple packages in the same + // directory), go list emits an error on stderr + // instead of JSON. Those errors do not have + // associated position information in + // go/packages.Error, even though the output on + // stderr may contain it. + if p, n, err := parsePos(msg); err == nil { + if abs, err := filepath.Abs(p.Filename); err == nil { + p.Filename = abs + } + posn = p + msg = msg[n+2:] + } + } else { + var err error + posn, _, err = parsePos(e.Pos) + if err != nil { + panic(fmt.Sprintf("internal error: %s", e)) + } + } + p := Problem{ + Diagnostic: runner.Diagnostic{ + Position: posn, + Message: msg, + Category: "compile", + }, + Severity: Error, + } + problems = append(problems, p) + case error: + p := Problem{ + Diagnostic: runner.Diagnostic{ + Position: token.Position{}, + Message: e.Error(), + Category: "compile", + }, + Severity: Error, + } + problems = append(problems, p) } } - allowed := FilterChecks(allAnalyzers, checks) - var allowedAnalyzers []*analysis.Analyzer - for _, c := range l.Checkers { - if allowed[c.Name] { - allowedAnalyzers = append(allowedAnalyzers, c) - } + return problems +} + +type unusedKey struct { + pkgPath string + base string + line int + name string +} + +type unusedPair struct { + key unusedKey + obj unused.SerializedObject +} + +func success(allowedChecks map[string]bool, res runner.Result) ([]Problem, unused.SerializedResult, error) { + diags, err := res.Diagnostics() + if err != nil { + return nil, unused.SerializedResult{}, err } - hasCumulative := false - for _, cum := range l.CumulativeCheckers { - a := cum.Analyzer() - if allowed[a.Name] { - hasCumulative = true - allowedAnalyzers = append(allowedAnalyzers, a) + + var problems []Problem + + for _, diag := range diags { + if !allowedChecks[diag.Category] { + continue } + problems = append(problems, Problem{Diagnostic: diag}) } - r, err := NewRunner(&l.Stats) - if err != nil { - return nil, err + u, err := res.Unused() + return problems, u, err +} + +func filterIgnored(problems []Problem, res runner.Result) []Problem { + couldveMatched := func(ig *lineIgnore) bool { + for _, c := range ig.Checks { + if c == "U1000" { + // We never want to flag ignores for U1000, + // because U1000 isn't local to a single + // package. For example, an identifier may + // only be used by tests, in which case an + // ignore would only fire when not analyzing + // tests. To avoid spurious "useless ignore" + // warnings, just never flag U1000. + return false + } + + // XXX see if check was possible + + // XXX we need the runner to give us the list of analyzers + // it ran. we can't look at Config.Checks, because that + // one hasn't been expanded yet. + } + + return false } - r.goVersion = l.GoVersion - r.repeatAnalyzers = l.RepeatAnalyzers - pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative) + dirs, err := res.Directives() if err != nil { - return nil, err + // XXX + panic(err) } - tpkgToPkg := map[*types.Package]*Package{} - for _, pkg := range pkgs { - tpkgToPkg[pkg.Types] = pkg - - for _, e := range pkg.errs { - switch e := e.(type) { - case types.Error: - p := Problem{ - Pos: e.Fset.PositionFor(e.Pos, false), - Message: e.Msg, - Severity: Error, - Check: "compile", - } - pkg.problems = append(pkg.problems, p) - case packages.Error: - msg := e.Msg - if len(msg) != 0 && msg[0] == '\n' { - // TODO(dh): See https://github.com/golang/go/issues/32363 - msg = msg[1:] - } + ignores, moreProblems := parseDirectives(dirs) - var pos token.Position - if e.Pos == "" { - // Under certain conditions (malformed package - // declarations, multiple packages in the same - // directory), go list emits an error on stderr - // instead of JSON. Those errors do not have - // associated position information in - // go/packages.Error, even though the output on - // stderr may contain it. - if p, n, err := parsePos(msg); err == nil { - if abs, err := filepath.Abs(p.Filename); err == nil { - p.Filename = abs - } - pos = p - msg = msg[n+2:] - } - } else { - var err error - pos, _, err = parsePos(e.Pos) - if err != nil { - panic(fmt.Sprintf("internal error: %s", e)) - } - } - p := Problem{ - Pos: pos, - Message: msg, - Severity: Error, - Check: "compile", - } - pkg.problems = append(pkg.problems, p) - case scanner.ErrorList: - for _, e := range e { - p := Problem{ - Pos: e.Pos, - Message: e.Msg, - Severity: Error, - Check: "compile", - } - pkg.problems = append(pkg.problems, p) - } - case error: - p := Problem{ - Pos: token.Position{}, - Message: e.Error(), - Severity: Error, - Check: "compile", - } - pkg.problems = append(pkg.problems, p) + for _, ig := range ignores { + for i := range problems { + p := &problems[i] + if ig.Match(*p) { + p.Severity = Ignored } } - } - atomic.StoreUint32(&r.stats.State, StateCumulative) - for _, cum := range l.CumulativeCheckers { - for _, res := range cum.Result() { - pkg := tpkgToPkg[res.Pkg()] - if pkg == nil { - panic(fmt.Sprintf("analyzer %s flagged object %s in package %s, a package that we aren't tracking", cum.Analyzer(), res, res.Pkg())) - } - allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) - if allowedChecks[cum.Analyzer().Name] { - pos := DisplayPosition(pkg.Fset, res.Pos()) - // FIXME(dh): why are we ignoring generated files - // here? Surely this is specific to 'unused', not all - // cumulative checkers - if _, ok := pkg.gen[pos.Filename]; ok { - continue - } - p := cum.ProblemObject(pkg.Fset, res) - pkg.problems = append(pkg.problems, p) + if ig, ok := ig.(*lineIgnore); ok && !ig.Matched && couldveMatched(ig) { + p := Problem{ + Diagnostic: runner.Diagnostic{ + Position: ig.Pos, + Message: "this linter directive didn't match anything; should it be removed?", + Category: "", + }, } + moreProblems = append(moreProblems, p) } } - for _, pkg := range pkgs { - if !pkg.fromSource { - // Don't cache packages that we loaded from the cache - continue - } - cpkg := cachedPackage{ - Problems: pkg.problems, - Ignores: pkg.ignores, - Config: pkg.cfg, - } - buf := &bytes.Buffer{} - if err := gob.NewEncoder(buf).Encode(cpkg); err != nil { - return nil, err - } - id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey) - if err := r.cache.PutBytes(id, buf.Bytes()); err != nil { - return nil, err - } + return append(problems, moreProblems...) +} + +func NewLinter(cfg config.Config) (*Linter, error) { + r, err := runner.New(cfg) + if err != nil { + return nil, err + } + return &Linter{ + Config: cfg, + Runner: r, + }, nil +} + +func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) { + // XXX + // r.goVersion = l.GoVersion + + results, err := l.Runner.Run(cfg, l.Checkers, patterns) + if err != nil { + return nil, err + } + + analyzerNames := make([]string, len(l.Checkers)) + for i, a := range l.Checkers { + analyzerNames[i] = a.Name } var problems []Problem - // Deduplicate line ignores. When U1000 processes a package and - // its test variant, it will only emit a single problem for an - // unused object, not two problems. We will, however, have two - // line ignores, one per package. Without deduplication, one line - // ignore will be marked as matched, while the other one won't, - // subsequently reporting a "this linter directive didn't match - // anything" error. - ignores := map[token.Position]Ignore{} - for _, pkg := range pkgs { - for _, ig := range pkg.ignores { - if lig, ok := ig.(*LineIgnore); ok { - ig = ignores[lig.Pos] - if ig == nil { - ignores[lig.Pos] = lig - ig = lig - } - } - for i := range pkg.problems { - p := &pkg.problems[i] - if ig.Match(*p) { - p.Severity = Ignored - } - } + used := map[unusedKey]bool{} + var unuseds []unusedPair + for _, res := range results { + if len(res.Errors) > 0 && !res.Failed { + panic("package has errors but isn't marked as failed") } - - if pkg.cfg == nil { - // The package failed to load, otherwise we would have a - // valid config. Pass through all errors. - problems = append(problems, pkg.problems...) + if res.Failed { + problems = append(problems, failed(res)...) } else { - for _, p := range pkg.problems { - allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) - allowedChecks["compile"] = true - if allowedChecks[p.Check] { - problems = append(problems, p) - } + allowedAnalyzers := FilterAnalyzerNames(analyzerNames, res.Config.Checks) + ps, u, err := success(allowedAnalyzers, res) + if err != nil { + // XXX + panic(err) } - } - - for _, ig := range pkg.ignores { - ig, ok := ig.(*LineIgnore) - if !ok { - continue - } - ig = ignores[ig.Pos].(*LineIgnore) - if ig.Matched { - continue + problems = append(problems, filterIgnored(ps, res)...) + + for _, obj := range u.Used { + // FIXME(dh): pick the object whose filename does not include $GOROOT + key := unusedKey{ + pkgPath: obj.PkgPath, + base: filepath.Base(obj.Position.Filename), + line: obj.Position.Line, + name: obj.Name, + } + used[key] = true } - couldveMatched := false - allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) - for _, c := range ig.Checks { - if !allowedChecks[c] { - continue + if allowedAnalyzers["U1000"] { + for _, obj := range u.Unused { + key := unusedKey{ + pkgPath: obj.PkgPath, + base: filepath.Base(obj.Position.Filename), + line: obj.Position.Line, + name: obj.Name, + } + unuseds = append(unuseds, unusedPair{key, obj}) + if _, ok := used[key]; !ok { + used[key] = false + } } - couldveMatched = true - break } + } + } - if !couldveMatched { - // The ignored checks were disabled for the containing package. - // Don't flag the ignore for not having matched. - continue - } - p := Problem{ - Pos: ig.Pos, - Message: "this linter directive didn't match anything; should it be removed?", - Check: "", - } - problems = append(problems, p) + for _, uo := range unuseds { + if used[uo.key] { + continue + } + if uo.obj.InGenerated { + continue } + problems = append(problems, Problem{ + Diagnostic: runner.Diagnostic{ + Position: uo.obj.DisplayPosition, + Message: fmt.Sprintf("%s %s is unused", uo.obj.Kind, uo.obj.Name), + Category: "U1000", + }, + }) } if len(problems) == 0 { @@ -403,8 +379,8 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error } sort.Slice(problems, func(i, j int) bool { - pi := problems[i].Pos - pj := problems[j].Pos + pi := problems[i].Position + pj := problems[j].Position if pi.Filename != pj.Filename { return pi.Filename < pj.Filename @@ -424,15 +400,14 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error for i, p := range problems[1:] { // We may encounter duplicate problems because one file // can be part of many packages. - if !problems[i].Equal(p) { + if !problems[i].equal(p) { out = append(out, p) } } return out, nil } -func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool { - // OPT(dh): this entire computation could be cached per package +func FilterAnalyzerNames(analyzers []string, checks []string) map[string]bool { allowedChecks := map[string]bool{} for _, check := range checks { @@ -443,26 +418,26 @@ func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bo } if check == "*" || check == "all" { // Match all - for _, c := range allChecks { - allowedChecks[c.Name] = b + for _, c := range analyzers { + allowedChecks[c] = b } } else if strings.HasSuffix(check, "*") { // Glob prefix := check[:len(check)-1] isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1 - for _, c := range allChecks { - idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) }) + for _, a := range analyzers { + idx := strings.IndexFunc(a, func(r rune) bool { return unicode.IsNumber(r) }) if isCat { // Glob is S*, which should match S1000 but not SA1000 - cat := c.Name[:idx] + cat := a[:idx] if prefix == cat { - allowedChecks[c.Name] = b + allowedChecks[a] = b } } else { // Glob is S1* - if strings.HasPrefix(c.Name, prefix) { - allowedChecks[c.Name] = b + if strings.HasPrefix(a, prefix) { + allowedChecks[a] = b } } } @@ -474,66 +449,22 @@ func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bo return allowedChecks } -func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position { - if p == token.NoPos { - return token.Position{} - } - - // Only use the adjusted position if it points to another Go file. - // This means we'll point to the original file for cgo files, but - // we won't point to a YACC grammar file. - pos := fset.PositionFor(p, false) - adjPos := fset.PositionFor(p, true) - - if filepath.Ext(adjPos.Filename) == ".go" { - return adjPos - } - return pos -} - -var bufferPool = &sync.Pool{ - New: func() interface{} { - buf := bytes.NewBuffer(nil) - buf.Grow(64) - return buf - }, -} - -func FuncName(f *types.Func) string { - buf := bufferPool.Get().(*bytes.Buffer) - buf.Reset() - if f.Type() != nil { - sig := f.Type().(*types.Signature) - if recv := sig.Recv(); recv != nil { - buf.WriteByte('(') - if _, ok := recv.Type().(*types.Interface); ok { - // gcimporter creates abstract methods of - // named interfaces using the interface type - // (not the named type) as the receiver. - // Don't print it in full. - buf.WriteString("interface") - } else { - types.WriteType(buf, recv.Type(), nil) - } - buf.WriteByte(')') - buf.WriteByte('.') - } else if f.Pkg() != nil { - writePackage(buf, f.Pkg()) - } - } - buf.WriteString(f.Name()) - s := buf.String() - bufferPool.Put(buf) - return s -} +var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`) -func writePackage(buf *bytes.Buffer, pkg *types.Package) { - if pkg == nil { - return +func parsePos(pos string) (token.Position, int, error) { + if pos == "-" || pos == "" { + return token.Position{}, 0, nil } - s := pkg.Path() - if s != "" { - buf.WriteString(s) - buf.WriteByte('.') + parts := posRe.FindStringSubmatch(pos) + if parts == nil { + return token.Position{}, 0, fmt.Errorf("malformed position %q", pos) } + file := parts[1] + line, _ := strconv.Atoi(parts[2]) + col, _ := strconv.Atoi(parts[3]) + return token.Position{ + Filename: file, + Line: line, + Column: col, + }, len(parts[0]), nil } diff --git a/lint/lint_test.go b/lint/lint_test.go index 07e7623cd..e9bf7ad6d 100644 --- a/lint/lint_test.go +++ b/lint/lint_test.go @@ -10,6 +10,8 @@ import ( "testing" "golang.org/x/tools/go/packages" + "honnef.co/go/tools/config" + "honnef.co/go/tools/runner" ) func testdata() string { @@ -21,7 +23,10 @@ func testdata() string { } func lintPackage(t *testing.T, name string) []Problem { - l := Linter{} + l, err := NewLinter(config.Config{}) + if err != nil { + t.Fatal(err) + } cfg := &packages.Config{ Env: append(os.Environ(), "GOPATH="+testdata(), "GO111MODULE=off"), } @@ -48,7 +53,7 @@ func TestErrors(t *testing.T) { if want := "expected 'package', found pckage"; ps[0].Message != want { t.Errorf("got message %q, want %q", ps[0].Message, want) } - if ps[0].Pos.Filename == "" { + if ps[0].Position.Filename == "" { t.Errorf("didn't get useful position") } }) @@ -61,19 +66,21 @@ func TestErrors(t *testing.T) { if len(ps) != 1 { t.Fatalf("got %d problems, want 1", len(ps)) } - trimPosition(&ps[0].Pos) + trimPosition(&ps[0].Position) want := Problem{ - Pos: token.Position{ - Filename: "broken_typeerror/pkg.go", - Offset: 42, - Line: 5, - Column: 10, + Diagnostic: runner.Diagnostic{ + Position: token.Position{ + Filename: "broken_typeerror/pkg.go", + Offset: 0, + Line: 5, + Column: 10, + }, + Message: "cannot convert \"\" (untyped string constant) to int", + Category: "compile", }, - Message: "cannot convert \"\" (untyped string constant) to int", - Check: "compile", Severity: 0, } - if !ps[0].Equal(want) { + if !ps[0].equal(want) { t.Errorf("got %#v, want %#v", ps[0], want) } }) @@ -91,19 +98,21 @@ func TestErrors(t *testing.T) { t.Fatalf("got %d problems, want 1", len(ps)) } - trimPosition(&ps[0].Pos) + trimPosition(&ps[0].Position) want := Problem{ - Pos: token.Position{ - Filename: "broken_parse/pkg.go", - Offset: 13, - Line: 3, - Column: 1, + Diagnostic: runner.Diagnostic{ + Position: token.Position{ + Filename: "broken_parse/pkg.go", + Offset: 0, + Line: 3, + Column: 1, + }, + Message: "expected declaration, found asd", + Category: "compile", }, - Message: "expected declaration, found asd", - Check: "compile", Severity: 0, } - if !ps[0].Equal(want) { + if !ps[0].equal(want) { t.Errorf("got %#v, want %#v", ps[0], want) } }) diff --git a/lint/lintutil/format/format.go b/lint/lintutil/format/format.go index b28f8885b..ef75a75d4 100644 --- a/lint/lintutil/format/format.go +++ b/lint/lintutil/format/format.go @@ -51,9 +51,9 @@ type Text struct { } func (o Text) Format(p lint.Problem) { - fmt.Fprintf(o.W, "%s: %s\n", relativePositionString(p.Pos), p.String()) + fmt.Fprintf(o.W, "%s: %s\n", relativePositionString(p.Position), p.String()) for _, r := range p.Related { - fmt.Fprintf(o.W, "\t%s: %s\n", relativePositionString(r.Pos), r.Message) + fmt.Fprintf(o.W, "\t%s: %s\n", relativePositionString(r.Position), r.Message) } } @@ -92,12 +92,12 @@ func (o JSON) Format(p lint.Problem) { Message string `json:"message"` Related []related `json:"related,omitempty"` }{ - Code: p.Check, + Code: p.Category, Severity: severity(p.Severity), Location: location{ - File: p.Pos.Filename, - Line: p.Pos.Line, - Column: p.Pos.Column, + File: p.Position.Filename, + Line: p.Position.Line, + Column: p.Position.Column, }, End: location{ File: p.End.Filename, @@ -109,9 +109,9 @@ func (o JSON) Format(p lint.Problem) { for _, r := range p.Related { jp.Related = append(jp.Related, related{ Location: location{ - File: r.Pos.Filename, - Line: r.Pos.Line, - Column: r.Pos.Column, + File: r.Position.Filename, + Line: r.Position.Line, + Column: r.Position.Column, }, End: location{ File: r.End.Filename, @@ -132,7 +132,7 @@ type Stylish struct { } func (o *Stylish) Format(p lint.Problem) { - pos := p.Pos + pos := p.Position if pos.Filename == "" { pos.Filename = "-" } @@ -146,9 +146,9 @@ func (o *Stylish) Format(p lint.Problem) { o.prevFile = pos.Filename o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0) } - fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message) + fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Category, p.Message) for _, r := range p.Related { - fmt.Fprintf(o.tw, " (%d, %d)\t\t %s\n", r.Pos.Line, r.Pos.Column, r.Message) + fmt.Fprintf(o.tw, " (%d, %d)\t\t %s\n", r.Position.Line, r.Position.Column, r.Message) } } diff --git a/lint/lintutil/util.go b/lint/lintutil/util.go index 278cd267b..48805d1e2 100644 --- a/lint/lintutil/util.go +++ b/lint/lintutil/util.go @@ -24,13 +24,14 @@ import ( "strconv" "strings" "sync" - "sync/atomic" "time" "honnef.co/go/tools/config" "honnef.co/go/tools/internal/cache" "honnef.co/go/tools/lint" "honnef.co/go/tools/lint/lintutil/format" + "honnef.co/go/tools/loader" + "honnef.co/go/tools/runner" "honnef.co/go/tools/version" "golang.org/x/tools/go/analysis" @@ -38,24 +39,24 @@ import ( "golang.org/x/tools/go/packages" ) -func NewVersionFlag() flag.Getter { +func newVersionFlag() flag.Getter { tags := build.Default.ReleaseTags v := tags[len(tags)-1][2:] - version := new(VersionFlag) + version := new(versionFlag) if err := version.Set(v); err != nil { panic(fmt.Sprintf("internal error: %s", err)) } return version } -type VersionFlag int +type versionFlag int -func (v *VersionFlag) String() string { +func (v *versionFlag) String() string { return fmt.Sprintf("1.%d", *v) } -func (v *VersionFlag) Set(s string) error { +func (v *versionFlag) Set(s string) error { if len(s) < 3 { return errors.New("invalid Go version") } @@ -66,11 +67,11 @@ func (v *VersionFlag) Set(s string) error { return errors.New("invalid Go version") } i, err := strconv.Atoi(s[2:]) - *v = VersionFlag(i) + *v = versionFlag(i) return err } -func (v *VersionFlag) Get() interface{} { +func (v *versionFlag) Get() interface{} { return int(*v) } @@ -117,7 +118,6 @@ func FlagSet(name string) *flag.FlagSet { flags.Bool("debug.version", false, "Print detailed version information about this program") flags.Bool("debug.no-compile-errors", false, "Don't print compile errors") flags.String("debug.measure-analyzers", "", "Write analysis measurements to `file`. `file` will be opened for appending if it already exists.") - flags.Uint("debug.repeat-analyzers", 0, "Run analyzers `num` times") checks := list{"inherit"} fail := list{"all"} @@ -126,7 +126,7 @@ func FlagSet(name string) *flag.FlagSet { tags := build.Default.ReleaseTags v := tags[len(tags)-1][2:] - version := new(VersionFlag) + version := new(versionFlag) if err := version.Set(v); err != nil { panic(fmt.Sprintf("internal error: %s", err)) } @@ -144,7 +144,7 @@ func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) return nil, false } -func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) { +func ProcessFlagSet(cs []*analysis.Analyzer, fs *flag.FlagSet) { tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string) tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool) goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int) @@ -157,9 +157,8 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string) debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool) debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool) - debugRepeat := fs.Lookup("debug.repeat-analyzers").Value.(flag.Getter).Get().(uint) - var measureAnalyzers func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) + var measureAnalyzers func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration) if path := fs.Lookup("debug.measure-analyzers").Value.(flag.Getter).Get().(string); path != "" { f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) if err != nil { @@ -167,10 +166,11 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * } mu := &sync.Mutex{} - measureAnalyzers = func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) { + measureAnalyzers = func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration) { mu.Lock() defer mu.Unlock() - if _, err := fmt.Fprintf(f, "%s\t%s\t%d\n", analysis.Name, pkg.ID, d.Nanoseconds()); err != nil { + // XXX pkg.ID + if _, err := fmt.Fprintf(f, "%s\t%s\t%d\n", analysis.Name, pkg, d.Nanoseconds()); err != nil { log.Println("error writing analysis measurements:", err) } } @@ -223,9 +223,6 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * if explain != "" { var haystack []*analysis.Analyzer haystack = append(haystack, cs...) - for _, cum := range cums { - haystack = append(haystack, cum.Analyzer()) - } check, ok := findCheck(haystack, explain) if !ok { fmt.Fprintln(os.Stderr, "Couldn't find check", explain) @@ -252,13 +249,12 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * exit(2) } - ps, err := Lint(cs, cums, fs.Args(), &Options{ + ps, err := doLint(cs, fs.Args(), &Options{ Tags: tags, LintTests: tests, GoVersion: goVersion, Config: cfg, PrintAnalyzerMeasurement: measureAnalyzers, - RepeatAnalyzers: debugRepeat, }) if err != nil { fmt.Fprintln(os.Stderr, err) @@ -273,24 +269,23 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * ) fail := *fs.Lookup("fail").Value.(*list) - analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums)) - copy(analyzers, cs) - for _, cum := range cums { - analyzers = append(analyzers, cum.Analyzer()) + analyzerNames := make([]string, len(cs)) + for i, a := range cs { + analyzerNames[i] = a.Name } - shouldExit := lint.FilterChecks(analyzers, fail) + shouldExit := lint.FilterAnalyzerNames(analyzerNames, fail) shouldExit["compile"] = true total = len(ps) for _, p := range ps { - if p.Check == "compile" && debugNoCompile { + if p.Category == "compile" && debugNoCompile { continue } if p.Severity == lint.Ignored && !showIgnored { ignored++ continue } - if shouldExit[p.Check] { + if shouldExit[p.Category] { errors++ } else { p.Severity = lint.Warning @@ -313,8 +308,7 @@ type Options struct { Tags string LintTests bool GoVersion int - PrintAnalyzerMeasurement func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) - RepeatAnalyzers uint + PrintAnalyzerMeasurement func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration) } func computeSalt() ([]byte, error) { @@ -337,7 +331,7 @@ func computeSalt() ([]byte, error) { return h.Sum(nil), nil } -func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) { +func doLint(cs []*analysis.Analyzer, paths []string, opt *Options) ([]lint.Problem, error) { salt, err := computeSalt() if err != nil { return nil, fmt.Errorf("could not compute salt for cache: %s", err) @@ -348,14 +342,14 @@ func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string opt = &Options{} } - l := &lint.Linter{ - Checkers: cs, - CumulativeCheckers: cums, - GoVersion: opt.GoVersion, - Config: opt.Config, - RepeatAnalyzers: opt.RepeatAnalyzers, + l, err := lint.NewLinter(opt.Config) + if err != nil { + return nil, err } - l.Stats.PrintAnalyzerMeasurement = opt.PrintAnalyzerMeasurement + l.Checkers = cs + l.GoVersion = opt.GoVersion + l.Runner.Stats.PrintAnalyzerMeasurement = opt.PrintAnalyzerMeasurement + cfg := &packages.Config{} if opt.LintTests { cfg.Tests = true @@ -368,23 +362,24 @@ func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string // Individual stats are read atomically, but overall there // is no synchronisation. For printing rough progress // information, this doesn't matter. - switch atomic.LoadUint32(&l.Stats.State) { - case lint.StateInitializing: + switch l.Runner.Stats.State() { + case runner.StateInitializing: fmt.Fprintln(os.Stderr, "Status: initializing") - case lint.StateGraph: + case runner.StateLoadPackageGraph: fmt.Fprintln(os.Stderr, "Status: loading package graph") - case lint.StateProcessing: - fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n", - atomic.LoadUint32(&l.Stats.ProcessedInitialPackages), - atomic.LoadUint32(&l.Stats.InitialPackages), - atomic.LoadUint32(&l.Stats.ProcessedPackages), - atomic.LoadUint32(&l.Stats.TotalPackages), - atomic.LoadUint32(&l.Stats.ActiveWorkers), - atomic.LoadUint32(&l.Stats.TotalWorkers), - atomic.LoadUint32(&l.Stats.Problems), + case runner.StateBuildActionGraph: + fmt.Fprintln(os.Stderr, "Status: building action graph") + case runner.StateProcessing: + fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d\n", + l.Runner.Stats.ProcessedInitialPackages(), + l.Runner.Stats.InitialPackages(), + l.Runner.Stats.ProcessedPackages(), + l.Runner.Stats.TotalPackages(), + l.Runner.ActiveWorkers(), + l.Runner.TotalWorkers(), ) - case lint.StateCumulative: - fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers") + case runner.StateFinalizing: + fmt.Fprintln(os.Stderr, "Status: finalizing") } } if len(infoSignals) > 0 { @@ -397,7 +392,6 @@ func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string } }() } - ps, err := l.Lint(cfg, paths) return ps, err } @@ -436,7 +430,7 @@ func InitializeAnalyzers(docs map[string]*lint.Documentation, analyzers map[stri vc.Doc = doc.String() if vc.Flags.Usage == nil { fs := flag.NewFlagSet("", flag.PanicOnError) - fs.Var(NewVersionFlag(), "go", "Target Go version") + fs.Var(newVersionFlag(), "go", "Target Go version") vc.Flags = *fs } } diff --git a/lint/runner.go b/lint/runner.go deleted file mode 100644 index 74106ced8..000000000 --- a/lint/runner.go +++ /dev/null @@ -1,1114 +0,0 @@ -package lint - -/* -Package loading - -Conceptually, package loading in the runner can be imagined as a -graph-shaped work list. We iteratively pop off leaf nodes (packages -that have no unloaded dependencies) and load data from export data, -our cache, or source. - -Specifically, non-initial packages are loaded from export data and the -fact cache if possible, otherwise from source. Initial packages are -loaded from export data, the fact cache and the (problems, ignores, -config) cache if possible, otherwise from source. - -The appeal of this approach is that it is both simple to implement and -easily parallelizable. Each leaf node can be processed independently, -and new leaf nodes appear as their dependencies are being processed. - -The downside of this approach, however, is that we're doing more work -than necessary. Imagine an initial package A, which has the following -dependency chain: A->B->C->D – in the current implementation, we will -load all 4 packages. However, if package A can be loaded fully from -cached information, then none of its dependencies are necessary, and -we could avoid loading them. - - -Parallelism - -Runner implements parallel processing of packages by spawning one -goroutine per package in the dependency graph, without any semaphores. -Each goroutine initially waits on the completion of all of its -dependencies, thus establishing correct order of processing. Once all -dependencies finish processing, the goroutine will load the package -from export data or source – this loading is guarded by a semaphore, -sized according to the number of CPU cores. This way, we only have as -many packages occupying memory and CPU resources as there are actual -cores to process them. - -This combination of unbounded goroutines but bounded package loading -means that if we have many parallel, independent subgraphs, they will -all execute in parallel, while not wasting resources for long linear -chains or trying to process more subgraphs in parallel than the system -can handle. - - -Caching - -We make use of several caches. These caches are Go's export data, our -facts cache, and our (problems, ignores, config) cache. - -Initial packages will either be loaded from a combination of all three -caches, or from source. Non-initial packages will either be loaded -from a combination of export data and facts cache, or from source. - -The facts cache is separate from the (problems, ignores, config) cache -because when we process non-initial packages, we generate facts, but -we discard problems and ignores. - -The facts cache is keyed by (package, analyzer), whereas the -(problems, ignores, config) cache is keyed by (package, list of -analyzes). The difference between the two exists because there are -only a handful of analyses that produce facts, but hundreds of -analyses that don't. Creating one cache entry per fact-generating -analysis is feasible, creating one cache entry per normal analysis has -significant performance and storage overheads. - -The downside of keying by the list of analyzes is, naturally, that a -change in list of analyzes changes the cache key. `staticcheck -checks -A` and `staticcheck -checks A,B` will therefore need their own cache -entries and not reuse each other's work. This problem does not affect -the facts cache. - -*/ - -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" - "go/ast" - "go/token" - "go/types" - "reflect" - "regexp" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/go/types/objectpath" - "honnef.co/go/tools/config" - "honnef.co/go/tools/facts" - "honnef.co/go/tools/internal/cache" - "honnef.co/go/tools/loader" -) - -func init() { - gob.Register(&FileIgnore{}) - gob.Register(&LineIgnore{}) -} - -// If enabled, abuse of the go/analysis API will lead to panics -const sanityCheck = true - -// OPT(dh): for a dependency tree A->B->C->D, if we have cached data -// for B, there should be no need to load C and D individually. Go's -// export data for B contains all the data we need on types, and our -// fact cache could store the union of B, C and D in B. -// -// This may change unused's behavior, however, as it may observe fewer -// interfaces from transitive dependencies. - -// OPT(dh): every single package will have the same value for -// canClearTypes. We could move the Package.decUse method to runner to -// eliminate this field. This is probably not worth it, though. There -// are only thousands of packages, so the field only takes up -// kilobytes of memory. - -// OPT(dh): do we really need the Package.gen field? it's based -// trivially on pkg.results and merely caches the result of a type -// assertion. How often do we actually use the field? - -type Package struct { - // dependents is initially set to 1 plus the number of packages - // that directly import this package. It is atomically decreased - // by 1 every time a dependent has been processed or when the - // package itself has been processed. Once the value reaches zero, - // the package is no longer needed. - dependents uint64 - - *packages.Package - Imports []*Package - initial bool - // fromSource is set to true for packages that have been loaded - // from source. This is the case for initial packages, packages - // with missing export data, and packages with no cached facts. - fromSource bool - // hash stores the package hash, as computed by packageHash - hash string - actionID cache.ActionID - done chan struct{} - - resultsMu sync.Mutex - // results maps analyzer IDs to analyzer results. it is - // implemented as a deduplicating concurrent cache. - results []*result - - cfg *config.Config - // gen maps file names to the code generator that created them - gen map[string]facts.Generator - problems []Problem - ignores []Ignore - errs []error - - // these slices are indexed by analysis - facts []map[types.Object][]analysis.Fact - pkgFacts [][]analysis.Fact - - // canClearTypes is set to true if we can discard type - // information after the package and its dependents have been - // processed. This is the case when no cumulative checkers are - // being run. - canClearTypes bool -} - -type cachedPackage struct { - Problems []Problem - Ignores []Ignore - Config *config.Config -} - -func (pkg *Package) decUse() { - ret := atomic.AddUint64(&pkg.dependents, ^uint64(0)) - if ret == 0 { - // nobody depends on this package anymore - if pkg.canClearTypes { - pkg.Types = nil - } - pkg.facts = nil - pkg.pkgFacts = nil - - for _, imp := range pkg.Imports { - imp.decUse() - } - } -} - -type result struct { - v interface{} - err error - ready chan struct{} -} - -type Runner struct { - cache *cache.Cache - goVersion int - stats *Stats - repeatAnalyzers uint - - analyzerIDs analyzerIDs - problemsCacheKey string - - // limits parallelism of loading packages - loadSem chan struct{} -} - -type analyzerIDs struct { - m map[*analysis.Analyzer]int -} - -func (ids analyzerIDs) get(a *analysis.Analyzer) int { - id, ok := ids.m[a] - if !ok { - panic(fmt.Sprintf("no analyzer ID for %s", a.Name)) - } - return id -} - -type Fact struct { - Path string - Fact analysis.Fact -} - -type analysisAction struct { - analyzer *analysis.Analyzer - analyzerID int - pkg *Package - newPackageFacts []analysis.Fact - problems []Problem - - pkgFacts map[*types.Package][]analysis.Fact -} - -func (ac *analysisAction) String() string { - return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg) -} - -func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact { - out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID])) - for obj, facts := range ac.pkg.facts[ac.analyzerID] { - for _, fact := range facts { - out = append(out, analysis.ObjectFact{ - Object: obj, - Fact: fact, - }) - } - } - return out -} - -func (ac *analysisAction) allPackageFacts() []analysis.PackageFact { - out := make([]analysis.PackageFact, 0, len(ac.pkgFacts)) - for pkg, facts := range ac.pkgFacts { - for _, fact := range facts { - out = append(out, analysis.PackageFact{ - Package: pkg, - Fact: fact, - }) - } - } - return out -} - -func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool { - if sanityCheck && len(ac.analyzer.FactTypes) == 0 { - panic("analysis doesn't export any facts") - } - for _, f := range ac.pkg.facts[ac.analyzerID][obj] { - if reflect.TypeOf(f) == reflect.TypeOf(fact) { - reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) - return true - } - } - return false -} - -func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool { - if sanityCheck && len(ac.analyzer.FactTypes) == 0 { - panic("analysis doesn't export any facts") - } - for _, f := range ac.pkgFacts[pkg] { - if reflect.TypeOf(f) == reflect.TypeOf(fact) { - reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) - return true - } - } - return false -} - -func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) { - if sanityCheck && len(ac.analyzer.FactTypes) == 0 { - panic("analysis doesn't export any facts") - } - ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact) -} - -func (ac *analysisAction) exportPackageFact(fact analysis.Fact) { - if sanityCheck && len(ac.analyzer.FactTypes) == 0 { - panic("analysis doesn't export any facts") - } - ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact) - ac.newPackageFacts = append(ac.newPackageFacts, fact) -} - -func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) { - p := Problem{ - Pos: DisplayPosition(pass.Fset, d.Pos), - End: DisplayPosition(pass.Fset, d.End), - Message: d.Message, - Check: pass.Analyzer.Name, - } - for _, r := range d.Related { - p.Related = append(p.Related, Related{ - Pos: DisplayPosition(pass.Fset, r.Pos), - End: DisplayPosition(pass.Fset, r.End), - Message: r.Message, - }) - } - ac.problems = append(ac.problems, p) -} - -func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) { - ac.pkg.resultsMu.Lock() - res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] - if res != nil { - ac.pkg.resultsMu.Unlock() - <-res.ready - return res.v, res.err - } else { - res = &result{ - ready: make(chan struct{}), - } - ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res - ac.pkg.resultsMu.Unlock() - - defer func() { - res.v = ret - res.err = err - close(res.ready) - }() - - pass := new(analysis.Pass) - *pass = analysis.Pass{ - Analyzer: ac.analyzer, - Fset: ac.pkg.Fset, - Files: ac.pkg.Syntax, - // type information may be nil or may be populated. if it is - // nil, it will get populated later. - Pkg: ac.pkg.Types, - TypesInfo: ac.pkg.TypesInfo, - TypesSizes: ac.pkg.TypesSizes, - ResultOf: map[*analysis.Analyzer]interface{}{}, - ImportObjectFact: ac.importObjectFact, - ImportPackageFact: ac.importPackageFact, - ExportObjectFact: ac.exportObjectFact, - ExportPackageFact: ac.exportPackageFact, - Report: func(d analysis.Diagnostic) { - ac.report(pass, d) - }, - AllObjectFacts: ac.allObjectFacts, - AllPackageFacts: ac.allPackageFacts, - } - - if !ac.pkg.initial { - // Don't report problems in dependencies - pass.Report = func(analysis.Diagnostic) {} - } - return r.runAnalysisUser(pass, ac) - } -} - -func (r *Runner) loadCachedPackage(pkg *Package, analyzers []*analysis.Analyzer) (cachedPackage, bool) { - // OPT(dh): we can cache this computation, it'll be the same for all packages - id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey) - - b, _, err := r.cache.GetBytes(id) - if err != nil { - return cachedPackage{}, false - } - var cpkg cachedPackage - if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&cpkg); err != nil { - return cachedPackage{}, false - } - return cpkg, true -} - -func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) { - if len(a.FactTypes) == 0 { - return nil, true - } - - var facts []Fact - // Look in the cache for facts - aID := passActionID(pkg, a) - aID = cache.Subkey(aID, "facts") - b, _, err := r.cache.GetBytes(aID) - if err != nil { - // No cached facts, analyse this package like a user-provided one, but ignore diagnostics - return nil, false - } - - if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil { - // Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics - return nil, false - } - return facts, true -} - -type dependencyError struct { - dep string - err error -} - -func (err dependencyError) nested() dependencyError { - if o, ok := err.err.(dependencyError); ok { - return o.nested() - } - return err -} - -func (err dependencyError) Error() string { - if o, ok := err.err.(dependencyError); ok { - return o.Error() - } - return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err) -} - -func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction { - aid := r.analyzerIDs.get(a) - ac := &analysisAction{ - analyzer: a, - analyzerID: aid, - pkg: pkg, - } - - if len(a.FactTypes) == 0 { - return ac - } - - // Merge all package facts of dependencies - ac.pkgFacts = map[*types.Package][]analysis.Fact{} - seen := map[*Package]struct{}{} - var dfs func(*Package) - dfs = func(pkg *Package) { - if _, ok := seen[pkg]; ok { - return - } - seen[pkg] = struct{}{} - s := pkg.pkgFacts[aid] - ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)] - for _, imp := range pkg.Imports { - dfs(imp) - } - } - dfs(pkg) - - return ac -} - -// analyzes that we always want to run, even if they're not being run -// explicitly or as dependencies. these are necessary for the inner -// workings of the runner. -var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer} - -func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) { - if !ac.pkg.fromSource { - panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg)) - } - - // User-provided package, analyse it - // First analyze it with dependencies - for _, req := range ac.analyzer.Requires { - acReq := r.makeAnalysisAction(req, ac.pkg) - ret, err := r.runAnalysis(acReq) - if err != nil { - // We couldn't run a dependency, no point in going on - return nil, dependencyError{req.Name, err} - } - - pass.ResultOf[req] = ret - } - - // Then with this analyzer - var ret interface{} - for i := uint(0); i < r.repeatAnalyzers+1; i++ { - var err error - t := time.Now() - ret, err = ac.analyzer.Run(pass) - r.stats.MeasureAnalyzer(ac.analyzer, ac.pkg, time.Since(t)) - if err != nil { - return nil, err - } - } - - if len(ac.analyzer.FactTypes) > 0 { - // Merge new facts into the package and persist them. - var facts []Fact - for _, fact := range ac.newPackageFacts { - id := r.analyzerIDs.get(ac.analyzer) - ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact) - facts = append(facts, Fact{"", fact}) - } - for obj, afacts := range ac.pkg.facts[ac.analyzerID] { - if obj.Pkg() != ac.pkg.Package.Types { - continue - } - path, err := objectpath.For(obj) - if err != nil { - continue - } - for _, fact := range afacts { - facts = append(facts, Fact{string(path), fact}) - } - } - - if err := r.cacheData(facts, ac.pkg, ac.analyzer, "facts"); err != nil { - return nil, err - } - } - - return ret, nil -} - -func (r *Runner) cacheData(v interface{}, pkg *Package, a *analysis.Analyzer, subkey string) error { - buf := &bytes.Buffer{} - if err := gob.NewEncoder(buf).Encode(v); err != nil { - return err - } - aID := passActionID(pkg, a) - aID = cache.Subkey(aID, subkey) - if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil { - return err - } - return nil -} - -func NewRunner(stats *Stats) (*Runner, error) { - cache, err := cache.Default() - if err != nil { - return nil, err - } - - return &Runner{ - cache: cache, - stats: stats, - }, nil -} - -// Run loads packages corresponding to patterns and analyses them with -// analyzers. It returns the loaded packages, which contain reported -// diagnostics as well as extracted ignore directives. -// -// Note that diagnostics have not been filtered at this point yet, to -// accommodate cumulative analyzes that require additional steps to -// produce diagnostics. -func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) { - checkerNames := make([]string, len(analyzers)) - for i, a := range analyzers { - checkerNames[i] = a.Name - } - sort.Strings(checkerNames) - r.problemsCacheKey = strings.Join(checkerNames, " ") - - var allAnalyzers []*analysis.Analyzer - r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}} - id := 0 - seen := map[*analysis.Analyzer]struct{}{} - var dfs func(a *analysis.Analyzer) - dfs = func(a *analysis.Analyzer) { - if _, ok := seen[a]; ok { - return - } - seen[a] = struct{}{} - allAnalyzers = append(allAnalyzers, a) - r.analyzerIDs.m[a] = id - id++ - for _, f := range a.FactTypes { - gob.Register(f) - } - for _, req := range a.Requires { - dfs(req) - } - } - for _, a := range analyzers { - if v := a.Flags.Lookup("go"); v != nil { - v.Value.Set(fmt.Sprintf("1.%d", r.goVersion)) - } - dfs(a) - } - for _, a := range injectedAnalyses { - dfs(a) - } - // Run all analyzers on all packages (subject to further - // restrictions enforced later). This guarantees that if analyzer - // A1 depends on A2, and A2 has facts, that A2 will run on the - // dependencies of user-provided packages, even though A1 won't. - analyzers = allAnalyzers - - var dcfg packages.Config - if cfg != nil { - dcfg = *cfg - } - - atomic.StoreUint32(&r.stats.State, StateGraph) - initialPkgs, err := loader.Graph(dcfg, patterns...) - if err != nil { - return nil, err - } - defer r.cache.Trim() - - var allPkgs []*Package - m := map[*packages.Package]*Package{} - packages.Visit(initialPkgs, nil, func(l *packages.Package) { - m[l] = &Package{ - Package: l, - results: make([]*result, len(r.analyzerIDs.m)), - facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)), - pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)), - done: make(chan struct{}), - // every package needs itself - dependents: 1, - canClearTypes: !hasCumulative, - } - allPkgs = append(allPkgs, m[l]) - for i := range m[l].facts { - m[l].facts[i] = map[types.Object][]analysis.Fact{} - } - for _, err := range l.Errors { - m[l].errs = append(m[l].errs, err) - } - for _, v := range l.Imports { - m[v].dependents++ - m[l].Imports = append(m[l].Imports, m[v]) - } - - m[l].hash, err = r.packageHash(m[l]) - m[l].actionID = packageActionID(m[l]) - if err != nil { - m[l].errs = append(m[l].errs, err) - } - }) - - pkgs := make([]*Package, len(initialPkgs)) - for i, l := range initialPkgs { - pkgs[i] = m[l] - pkgs[i].initial = true - } - - atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs))) - atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs))) - atomic.StoreUint32(&r.stats.State, StateProcessing) - - var wg sync.WaitGroup - wg.Add(len(allPkgs)) - r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1)) - atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem))) - for _, pkg := range allPkgs { - pkg := pkg - go func() { - r.processPkg(pkg, analyzers) - - if pkg.initial { - atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1) - } - atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems))) - wg.Done() - }() - } - wg.Wait() - - return pkgs, nil -} - -var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`) - -func parsePos(pos string) (token.Position, int, error) { - if pos == "-" || pos == "" { - return token.Position{}, 0, nil - } - parts := posRe.FindStringSubmatch(pos) - if parts == nil { - return token.Position{}, 0, fmt.Errorf("malformed position %q", pos) - } - file := parts[1] - line, _ := strconv.Atoi(parts[2]) - col, _ := strconv.Atoi(parts[3]) - return token.Position{ - Filename: file, - Line: line, - Column: col, - }, len(parts[0]), nil -} - -// loadPkg loads a Go package. It may be loaded from a combination of -// caches, or from source. -func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error { - if pkg.Types != nil { - panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package)) - } - - if pkg.initial { - // Try to load cached package - cpkg, ok := r.loadCachedPackage(pkg, analyzers) - if ok { - pkg.problems = cpkg.Problems - pkg.ignores = cpkg.Ignores - pkg.cfg = cpkg.Config - } else { - pkg.fromSource = true - return loader.LoadFromSource(pkg.Package) - } - } - - // At this point we're either working with a non-initial package, - // or we managed to load cached problems for the package. We still - // need export data and facts. - - // OPT(dh): we don't need type information for this package if no - // other package depends on it. this may be the case for initial - // packages. - - // Load package from export data - if err := loader.LoadFromExport(pkg.Package); err != nil { - // We asked Go to give us up to date export data, yet - // we can't load it. There must be something wrong. - // - // Attempt loading from source. This should fail (because - // otherwise there would be export data); we just want to - // get the compile errors. If loading from source succeeds - // we discard the result, anyway. Otherwise we'll fail - // when trying to reload from export data later. - // - // FIXME(dh): we no longer reload from export data, so - // theoretically we should be able to continue - pkg.fromSource = true - if err := loader.LoadFromSource(pkg.Package); err != nil { - return err - } - // Make sure this package can't be imported successfully - pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{ - Pos: "-", - Msg: fmt.Sprintf("could not load export data: %s", err), - Kind: packages.ParseError, - }) - return fmt.Errorf("could not load export data: %s", err) - } - - failed := false - seen := make([]bool, len(r.analyzerIDs.m)) - var dfs func(*analysis.Analyzer) - dfs = func(a *analysis.Analyzer) { - if seen[r.analyzerIDs.get(a)] { - return - } - seen[r.analyzerIDs.get(a)] = true - - if len(a.FactTypes) > 0 { - facts, ok := r.loadCachedFacts(a, pkg) - if !ok { - failed = true - return - } - - for _, f := range facts { - if f.Path == "" { - // This is a package fact - pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact) - continue - } - obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path)) - if err != nil { - // Be lenient about these errors. For example, when - // analysing io/ioutil from source, we may get a fact - // for methods on the devNull type, and objectpath - // will happily create a path for them. However, when - // we later load io/ioutil from export data, the path - // no longer resolves. - // - // If an exported type embeds the unexported type, - // then (part of) the unexported type will become part - // of the type information and our path will resolve - // again. - continue - } - pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact) - } - } - - for _, req := range a.Requires { - dfs(req) - } - } - for _, a := range analyzers { - dfs(a) - } - - if !failed { - return nil - } - - // We failed to load some cached facts - pkg.fromSource = true - // XXX we added facts to the maps, we need to get rid of those - return loader.LoadFromSource(pkg.Package) -} - -type analysisError struct { - analyzer *analysis.Analyzer - pkg *Package - err error -} - -func (err analysisError) Error() string { - return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err) -} - -// processPkg processes a package. This involves loading the package, -// either from export data or from source. For packages loaded from -// source, the provides analyzers will be run on the package. -func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) { - defer func() { - // Clear information we no longer need. Make sure to do this - // when returning from processPkg so that we clear - // dependencies, not just initial packages. - pkg.TypesInfo = nil - pkg.Syntax = nil - pkg.results = nil - - atomic.AddUint32(&r.stats.ProcessedPackages, 1) - pkg.decUse() - close(pkg.done) - }() - - // Ensure all packages have the generated map and config. This is - // required by internals of the runner. Analyses that themselves - // make use of either have an explicit dependency so that other - // runners work correctly, too. - analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...) - - if len(pkg.errs) != 0 { - return - } - - for _, imp := range pkg.Imports { - <-imp.done - if len(imp.errs) > 0 { - if imp.initial { - // Don't print the error of the dependency since it's - // an initial package and we're already printing the - // error. - pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg)) - } else { - var s string - for _, err := range imp.errs { - s += "\n\t" + err.Error() - } - pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s)) - } - return - } - } - if pkg.PkgPath == "unsafe" { - pkg.Types = types.Unsafe - return - } - - r.loadSem <- struct{}{} - atomic.AddUint32(&r.stats.ActiveWorkers, 1) - defer func() { - <-r.loadSem - atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0)) - }() - if err := r.loadPkg(pkg, analyzers); err != nil { - pkg.errs = append(pkg.errs, err) - return - } - - // A package's object facts is the union of all of its dependencies. - for _, imp := range pkg.Imports { - for ai, m := range imp.facts { - for obj, facts := range m { - pkg.facts[ai][obj] = facts[0:len(facts):len(facts)] - } - } - } - - if !pkg.fromSource { - // Nothing left to do for the package. - return - } - - // Run analyses on initial packages and those missing facts - var wg sync.WaitGroup - wg.Add(len(analyzers)) - errs := make([]error, len(analyzers)) - var acs []*analysisAction - for i, a := range analyzers { - i := i - a := a - ac := r.makeAnalysisAction(a, pkg) - acs = append(acs, ac) - go func() { - defer wg.Done() - // Only initial packages and packages with missing - // facts will have been loaded from source. - if pkg.initial || len(a.FactTypes) > 0 { - if _, err := r.runAnalysis(ac); err != nil { - errs[i] = analysisError{a, pkg, err} - return - } - } - }() - } - wg.Wait() - - depErrors := map[dependencyError]int{} - for _, err := range errs { - if err == nil { - continue - } - switch err := err.(type) { - case analysisError: - switch err := err.err.(type) { - case dependencyError: - depErrors[err.nested()]++ - default: - pkg.errs = append(pkg.errs, err) - } - default: - pkg.errs = append(pkg.errs, err) - } - } - for err, count := range depErrors { - pkg.errs = append(pkg.errs, - fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err)) - } - - // We can't process ignores at this point because `unused` needs - // to see more than one package to make its decision. - // - // OPT(dh): can't we guard this block of code by pkg.initial? - ignores, problems := parseDirectives(pkg.Package) - pkg.ignores = append(pkg.ignores, ignores...) - pkg.problems = append(pkg.problems, problems...) - for _, ac := range acs { - pkg.problems = append(pkg.problems, ac.problems...) - } - - if pkg.initial { - // Only initial packages have these analyzers run, and only - // initial packages need these. - if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil { - pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config) - } - pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator) - } - - // In a previous version of the code, we would throw away all type - // information and reload it from export data. That was - // nonsensical. The *types.Package doesn't keep any information - // live that export data wouldn't also. We only need to discard - // the AST and the TypesInfo maps; that happens after we return - // from processPkg. -} - -func parseDirective(s string) (cmd string, args []string) { - if !strings.HasPrefix(s, "//lint:") { - return "", nil - } - s = strings.TrimPrefix(s, "//lint:") - fields := strings.Split(s, " ") - return fields[0], fields[1:] -} - -// parseDirectives extracts all linter directives from the source -// files of the package. Malformed directives are returned as problems. -func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) { - var ignores []Ignore - var problems []Problem - - for _, f := range pkg.Syntax { - found := false - commentLoop: - for _, cg := range f.Comments { - for _, c := range cg.List { - if strings.Contains(c.Text, "//lint:") { - found = true - break commentLoop - } - } - } - if !found { - continue - } - cm := ast.NewCommentMap(pkg.Fset, f, f.Comments) - for node, cgs := range cm { - for _, cg := range cgs { - for _, c := range cg.List { - if !strings.HasPrefix(c.Text, "//lint:") { - continue - } - cmd, args := parseDirective(c.Text) - switch cmd { - case "ignore", "file-ignore": - if len(args) < 2 { - p := Problem{ - Pos: DisplayPosition(pkg.Fset, c.Pos()), - Message: "malformed linter directive; missing the required reason field?", - Severity: Error, - Check: "compile", - } - problems = append(problems, p) - continue - } - default: - // unknown directive, ignore - continue - } - checks := strings.Split(args[0], ",") - pos := DisplayPosition(pkg.Fset, node.Pos()) - var ig Ignore - switch cmd { - case "ignore": - ig = &LineIgnore{ - File: pos.Filename, - Line: pos.Line, - Checks: checks, - Pos: DisplayPosition(pkg.Fset, c.Pos()), - } - case "file-ignore": - ig = &FileIgnore{ - File: pos.Filename, - Checks: checks, - } - } - ignores = append(ignores, ig) - } - } - } - } - - return ignores, problems -} - -// packageHash computes a package's hash. The hash is based on all Go -// files that make up the package, as well as the hashes of imported -// packages. -func (r *Runner) packageHash(pkg *Package) (string, error) { - key := cache.NewHash("package hash") - fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) - fmt.Fprintf(key, "go %d\n", r.goVersion) - for _, f := range pkg.CompiledGoFiles { - h, err := cache.FileHash(f) - if err != nil { - return "", err - } - fmt.Fprintf(key, "file %s %x\n", f, h) - } - - // Actually load the configuration to calculate its hash. This - // will take into consideration inheritance of configuration - // files, as well as the default configuration. - // - // OPT(dh): doing this means we'll load the config twice: once for - // computing the hash, and once when analyzing the package from - // source. - cdir := config.Dir(pkg.GoFiles) - if cdir == "" { - fmt.Fprintf(key, "file %s %x\n", config.ConfigName, [cache.HashSize]byte{}) - } else { - cfg, err := config.Load(cdir) - if err != nil { - return "", err - } - h := cache.NewHash(config.ConfigName) - if _, err := h.Write([]byte(cfg.String())); err != nil { - return "", err - } - fmt.Fprintf(key, "file %s %x\n", config.ConfigName, h.Sum()) - } - - imps := make([]*Package, len(pkg.Imports)) - copy(imps, pkg.Imports) - sort.Slice(imps, func(i, j int) bool { - return imps[i].PkgPath < imps[j].PkgPath - }) - for _, dep := range imps { - if dep.PkgPath == "unsafe" { - continue - } - - fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash) - } - h := key.Sum() - return hex.EncodeToString(h[:]), nil -} - -func packageActionID(pkg *Package) cache.ActionID { - key := cache.NewHash("package ID") - fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) - fmt.Fprintf(key, "pkghash %s\n", pkg.hash) - return key.Sum() -} - -// passActionID computes an ActionID for an analysis pass. -func passActionID(pkg *Package, analyzer *analysis.Analyzer) cache.ActionID { - return cache.Subkey(pkg.actionID, fmt.Sprintf("analyzer %s", analyzer.Name)) -} diff --git a/lint/stats.go b/lint/stats.go deleted file mode 100644 index 85eb97844..000000000 --- a/lint/stats.go +++ /dev/null @@ -1,38 +0,0 @@ -package lint - -import ( - "time" - - "golang.org/x/tools/go/analysis" -) - -const ( - StateInitializing = 0 - StateGraph = 1 - StateProcessing = 2 - StateCumulative = 3 -) - -type Stats struct { - State uint32 - - InitialPackages uint32 - TotalPackages uint32 - ProcessedPackages uint32 - ProcessedInitialPackages uint32 - Problems uint32 - ActiveWorkers uint32 - TotalWorkers uint32 - PrintAnalyzerMeasurement func(*analysis.Analyzer, *Package, time.Duration) -} - -type AnalysisMeasurementKey struct { - Analysis string - Pkg string -} - -func (s *Stats) MeasureAnalyzer(analysis *analysis.Analyzer, pkg *Package, d time.Duration) { - if s.PrintAnalyzerMeasurement != nil { - s.PrintAnalyzerMeasurement(analysis, pkg, d) - } -} diff --git a/loader/buildid.go b/loader/buildid.go new file mode 100644 index 000000000..32c026b7d --- /dev/null +++ b/loader/buildid.go @@ -0,0 +1,238 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +import ( + "bytes" + "debug/elf" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +var errBuildIDMalformed = fmt.Errorf("malformed object file") + +var ( + bangArch = []byte("!") + pkgdef = []byte("__.PKGDEF") + goobject = []byte("go object ") + buildid = []byte("build id ") +) + +// ReadFile reads the build ID from an archive or executable file. +func ReadFile(name string) (id string, err error) { + f, err := os.Open(name) + if err != nil { + return "", err + } + defer f.Close() + + buf := make([]byte, 8) + if _, err := f.ReadAt(buf, 0); err != nil { + return "", err + } + if string(buf) != "!\n" { + if string(buf) == "\n" { + return "", errors.New("unsupported") + } + return readBinary(name, f) + } + + // Read just enough of the target to fetch the build ID. + // The archive is expected to look like: + // + // ! + // __.PKGDEF 0 0 0 644 7955 ` + // go object darwin amd64 devel X:none + // build id "b41e5c45250e25c9fd5e9f9a1de7857ea0d41224" + // + // The variable-sized strings are GOOS, GOARCH, and the experiment list (X:none). + // Reading the first 1024 bytes should be plenty. + data := make([]byte, 1024) + n, err := io.ReadFull(f, data) + if err != nil && n == 0 { + return "", err + } + + tryGccgo := func() (string, error) { + return readGccgoArchive(name, f) + } + + // Archive header. + for i := 0; ; i++ { // returns during i==3 + j := bytes.IndexByte(data, '\n') + if j < 0 { + return tryGccgo() + } + line := data[:j] + data = data[j+1:] + switch i { + case 0: + if !bytes.Equal(line, bangArch) { + return tryGccgo() + } + case 1: + if !bytes.HasPrefix(line, pkgdef) { + return tryGccgo() + } + case 2: + if !bytes.HasPrefix(line, goobject) { + return tryGccgo() + } + case 3: + if !bytes.HasPrefix(line, buildid) { + // Found the object header, just doesn't have a build id line. + // Treat as successful, with empty build id. + return "", nil + } + id, err := strconv.Unquote(string(line[len(buildid):])) + if err != nil { + return tryGccgo() + } + return id, nil + } + } +} + +// readGccgoArchive tries to parse the archive as a standard Unix +// archive file, and fetch the build ID from the _buildid.o entry. +// The _buildid.o entry is written by (*Builder).gccgoBuildIDELFFile +// in cmd/go/internal/work/exec.go. +func readGccgoArchive(name string, f *os.File) (string, error) { + bad := func() (string, error) { + return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed} + } + + off := int64(8) + for { + if _, err := f.Seek(off, io.SeekStart); err != nil { + return "", err + } + + // TODO(iant): Make a debug/ar package, and use it + // here and in cmd/link. + var hdr [60]byte + if _, err := io.ReadFull(f, hdr[:]); err != nil { + if err == io.EOF { + // No more entries, no build ID. + return "", nil + } + return "", err + } + off += 60 + + sizeStr := strings.TrimSpace(string(hdr[48:58])) + size, err := strconv.ParseInt(sizeStr, 0, 64) + if err != nil { + return bad() + } + + name := strings.TrimSpace(string(hdr[:16])) + if name == "_buildid.o/" { + sr := io.NewSectionReader(f, off, size) + e, err := elf.NewFile(sr) + if err != nil { + return bad() + } + s := e.Section(".go.buildid") + if s == nil { + return bad() + } + data, err := s.Data() + if err != nil { + return bad() + } + return string(data), nil + } + + off += size + if off&1 != 0 { + off++ + } + } +} + +var ( + goBuildPrefix = []byte("\xff Go build ID: \"") + goBuildEnd = []byte("\"\n \xff") + + elfPrefix = []byte("\x7fELF") + + machoPrefixes = [][]byte{ + {0xfe, 0xed, 0xfa, 0xce}, + {0xfe, 0xed, 0xfa, 0xcf}, + {0xce, 0xfa, 0xed, 0xfe}, + {0xcf, 0xfa, 0xed, 0xfe}, + } +) + +var readSize = 32 * 1024 // changed for testing + +// readBinary reads the build ID from a binary. +// +// ELF binaries store the build ID in a proper PT_NOTE section. +// +// Other binary formats are not so flexible. For those, the linker +// stores the build ID as non-instruction bytes at the very beginning +// of the text segment, which should appear near the beginning +// of the file. This is clumsy but fairly portable. Custom locations +// can be added for other binary types as needed, like we did for ELF. +func readBinary(name string, f *os.File) (id string, err error) { + // Read the first 32 kB of the binary file. + // That should be enough to find the build ID. + // In ELF files, the build ID is in the leading headers, + // which are typically less than 4 kB, not to mention 32 kB. + // In Mach-O files, there's no limit, so we have to parse the file. + // On other systems, we're trying to read enough that + // we get the beginning of the text segment in the read. + // The offset where the text segment begins in a hello + // world compiled for each different object format today: + // + // Plan 9: 0x20 + // Windows: 0x600 + // + data := make([]byte, readSize) + _, err = io.ReadFull(f, data) + if err == io.ErrUnexpectedEOF { + err = nil + } + if err != nil { + return "", err + } + + if bytes.HasPrefix(data, elfPrefix) { + return readELF(name, f, data) + } + for _, m := range machoPrefixes { + if bytes.HasPrefix(data, m) { + return readMacho(name, f, data) + } + } + return readRaw(name, data) +} + +// readRaw finds the raw build ID stored in text segment data. +func readRaw(name string, data []byte) (id string, err error) { + i := bytes.Index(data, goBuildPrefix) + if i < 0 { + // Missing. Treat as successful but build ID empty. + return "", nil + } + + j := bytes.Index(data[i+len(goBuildPrefix):], goBuildEnd) + if j < 0 { + return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed} + } + + quoted := data[i+len(goBuildPrefix)-1 : i+len(goBuildPrefix)+j+1] + id, err = strconv.Unquote(string(quoted)) + if err != nil { + return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed} + } + return id, nil +} diff --git a/loader/hash.go b/loader/hash.go new file mode 100644 index 000000000..30ef5a96b --- /dev/null +++ b/loader/hash.go @@ -0,0 +1,70 @@ +package loader + +import ( + "fmt" + "runtime" + "sort" + + "honnef.co/go/tools/internal/cache" +) + +// OPT(dh): our current hashing algorithm means that _any_ change to a +// dependency will change a dependent's hash, even if the change +// didn't affect the exported API. Should we include dependencies via +// their export data hash instead? + +// computeHash computes a package's hash. The hash is based on all Go +// files that make up the package, as well as the hashes of imported +// packages. +func computeHash(pkg *PackageSpec) (cache.ActionID, error) { + key := cache.NewHash("package " + pkg.PkgPath) + fmt.Fprintf(key, "goos %s goarch %s", runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(key, "import %q\n", pkg.PkgPath) + for _, f := range pkg.CompiledGoFiles { + h, err := cache.FileHash(f) + if err != nil { + return cache.ActionID{}, err + } + fmt.Fprintf(key, "file %s %x\n", f, h) + } + + imps := make([]*PackageSpec, 0, len(pkg.Imports)) + for _, v := range pkg.Imports { + imps = append(imps, v) + } + sort.Slice(imps, func(i, j int) bool { + return imps[i].PkgPath < imps[j].PkgPath + }) + + for _, dep := range imps { + if dep.ExportFile == "" { + fmt.Fprintf(key, "import %s \n", dep.PkgPath) + } else { + id, err := getBuildid(dep.ExportFile) + if err == nil { + fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, id) + } else { + fh, err := cache.FileHash(dep.ExportFile) + if err != nil { + return cache.ActionID{}, err + } + fmt.Fprintf(key, "import %s %x\n", dep.PkgPath, fh) + } + } + } + return key.Sum(), nil +} + +var buildidCache = map[string]string{} + +func getBuildid(f string) (string, error) { + if h, ok := buildidCache[f]; ok { + return h, nil + } + h, err := ReadFile(f) + if err != nil { + return "", err + } + buildidCache[f] = h + return h, nil +} diff --git a/loader/loader.go b/loader/loader.go index a14f274d2..19b9995cd 100644 --- a/loader/loader.go +++ b/loader/loader.go @@ -8,30 +8,107 @@ import ( "go/scanner" "go/token" "go/types" - "log" "os" + "time" + + "honnef.co/go/tools/config" + "honnef.co/go/tools/internal/cache" + "honnef.co/go/tools/internal/go/gcexportdata" - "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/go/packages" ) +type PackageSpec struct { + ID string + Name string + PkgPath string + // Errors that occured while building the import graph. These will + // primarily be parse errors or failure to resolve imports, but + // may also be other errors. + Errors []packages.Error + GoFiles []string + CompiledGoFiles []string + OtherFiles []string + ExportFile string + Imports map[string]*PackageSpec + TypesSizes types.Sizes + Hash cache.ActionID + + Config config.Config +} + +func (spec *PackageSpec) String() string { + return spec.ID +} + +type Package struct { + *PackageSpec + + // Errors that occured while loading the package. These will + // primarily be parse or type errors, but may also be lower-level + // failures such as file-system ones. + Errors []packages.Error + Types *types.Package + Fset *token.FileSet + Syntax []*ast.File + TypesInfo *types.Info +} + // Graph resolves patterns and returns packages with all the // information required to later load type information, and optionally // syntax trees. // // The provided config can set any setting with the exception of Mode. -func Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) { - cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes - pkgs, err := packages.Load(&cfg, patterns...) +func Graph(cfg *packages.Config, patterns ...string) ([]*PackageSpec, error) { + var dcfg packages.Config + if cfg != nil { + dcfg = *cfg + } + dcfg.Mode = packages.NeedName | + packages.NeedImports | + packages.NeedDeps | + packages.NeedExportsFile | + packages.NeedFiles | + packages.NeedCompiledGoFiles | + packages.NeedTypesSizes + pkgs, err := packages.Load(&dcfg, patterns...) if err != nil { return nil, err } - fset := token.NewFileSet() + + m := map[*packages.Package]*PackageSpec{} packages.Visit(pkgs, nil, func(pkg *packages.Package) { - pkg.Fset = fset + spec := &PackageSpec{ + ID: pkg.ID, + Name: pkg.Name, + PkgPath: pkg.PkgPath, + Errors: pkg.Errors, + GoFiles: pkg.GoFiles, + CompiledGoFiles: pkg.CompiledGoFiles, + OtherFiles: pkg.OtherFiles, + ExportFile: pkg.ExportFile, + Imports: map[string]*PackageSpec{}, + TypesSizes: pkg.TypesSizes, + } + for path, imp := range pkg.Imports { + spec.Imports[path] = m[imp] + } + if cdir := config.Dir(pkg.GoFiles); cdir != "" { + cfg, err := config.Load(cdir) + if err != nil { + spec.Errors = append(spec.Errors, convertError(err)...) + } + spec.Config = cfg + } else { + spec.Config = config.DefaultConfig + } + spec.Hash, err = computeHash(spec) + if err != nil { + spec.Errors = append(spec.Errors, convertError(err)...) + } + m[pkg] = spec }) - - n := 0 + out := make([]*PackageSpec, 0, len(pkgs)) for _, pkg := range pkgs { if len(pkg.CompiledGoFiles) == 0 && len(pkg.Errors) == 0 && pkg.PkgPath != "unsafe" { // If a package consists only of test files, then @@ -44,86 +121,125 @@ func Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) // errors. continue } - pkgs[n] = pkg - n++ + out = append(out, m[pkg]) } - return pkgs[:n], nil + + return out, nil +} + +type program struct { + fset *token.FileSet + packages map[string]*types.Package } -// LoadFromExport loads a package from export data. All of its -// dependencies must have been loaded already. -func LoadFromExport(pkg *packages.Package) error { - pkg.IllTyped = true - for path, pkg := range pkg.Imports { - if pkg.Types == nil { - return fmt.Errorf("dependency %q hasn't been loaded yet", path) +type Stats struct { + Source time.Duration + Export map[*PackageSpec]time.Duration +} + +// Load loads the package described in spec. Imports will be loaded +// from export data, while the package itself will be loaded from +// source. +// +// An error will only be returned for system failures, such as failure +// to read export data from disk. Syntax and type errors, among +// others, will only populate the returned package's Errors field. +func Load(spec *PackageSpec) (*Package, Stats, error) { + prog := &program{ + fset: token.NewFileSet(), + packages: map[string]*types.Package{}, + } + + stats := Stats{ + Export: map[*PackageSpec]time.Duration{}, + } + for _, imp := range spec.Imports { + if imp.PkgPath == "unsafe" { + continue + } + t := time.Now() + _, err := prog.LoadFromExport(imp) + stats.Export[imp] = time.Since(t) + if err != nil { + return nil, stats, err } } - if pkg.ExportFile == "" { - return fmt.Errorf("no export data for %q", pkg.ID) + t := time.Now() + pkg := prog.LoadFromSource(spec) + stats.Source = time.Since(t) + return pkg, stats, nil +} + +// LoadFromExport loads a package from export data. +func (prog *program) LoadFromExport(spec *PackageSpec) (*Package, error) { + // log.Printf("Loading package %s from export", spec) + if spec.ExportFile == "" { + return nil, fmt.Errorf("no export data for %q", spec.ID) } - f, err := os.Open(pkg.ExportFile) + f, err := os.Open(spec.ExportFile) if err != nil { - return err + return nil, err } defer f.Close() r, err := gcexportdata.NewReader(f) if err != nil { - return err - } - - view := make(map[string]*types.Package) // view seen by gcexportdata - seen := make(map[*packages.Package]bool) // all visited packages - var visit func(pkgs map[string]*packages.Package) - visit = func(pkgs map[string]*packages.Package) { - for _, pkg := range pkgs { - if !seen[pkg] { - seen[pkg] = true - view[pkg.PkgPath] = pkg.Types - visit(pkg.Imports) - } - } + return nil, err } - visit(pkg.Imports) - tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath) + tpkg, err := gcexportdata.Read(r, prog.fset, prog.packages, spec.PkgPath) if err != nil { - return err + return nil, err + } + pkg := &Package{ + PackageSpec: spec, + Types: tpkg, + Fset: prog.fset, } - pkg.Types = tpkg - pkg.IllTyped = false - return nil + // runtime.SetFinalizer(pkg, func(pkg *Package) { + // log.Println("Unloading package", pkg.PkgPath) + // }) + return pkg, nil } // LoadFromSource loads a package from source. All of its dependencies // must have been loaded already. -func LoadFromSource(pkg *packages.Package) error { - pkg.IllTyped = true - pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) +func (prog *program) LoadFromSource(spec *PackageSpec) *Package { + if len(spec.Errors) > 0 { + panic("LoadFromSource called on package with errors") + } + + pkg := &Package{ + PackageSpec: spec, + Types: types.NewPackage(spec.PkgPath, spec.Name), + Syntax: make([]*ast.File, len(spec.CompiledGoFiles)), + Fset: prog.fset, + TypesInfo: &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + }, + } + // runtime.SetFinalizer(pkg, func(pkg *Package) { + // log.Println("Unloading package", pkg.PkgPath) + // }) // OPT(dh): many packages have few files, much fewer than there // are CPU cores. Additionally, parsing each individual file is // very fast. A naive parallel implementation of this loop won't // be faster, and tends to be slower due to extra scheduling, // bookkeeping and potentially false sharing of cache lines. - pkg.Syntax = make([]*ast.File, len(pkg.CompiledGoFiles)) - for i, file := range pkg.CompiledGoFiles { - f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments) + for i, file := range spec.CompiledGoFiles { + // XXX some errors should be returned as actual errors, e.g. when a source file is missing. + f, err := parser.ParseFile(prog.fset, file, nil, parser.ParseComments) if err != nil { pkg.Errors = append(pkg.Errors, convertError(err)...) - return err + return pkg } pkg.Syntax[i] = f } - pkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - } - importer := func(path string) (*types.Package, error) { if path == "unsafe" { return types.Unsafe, nil @@ -134,14 +250,15 @@ func LoadFromSource(pkg *packages.Package) error { // we'll encounter the raw C import. return nil, errors.New("cgo preprocessing failed") } - imp := pkg.Imports[path] - if imp == nil { - return nil, nil + ispecpkg := spec.Imports[path] + if ispecpkg == nil { + return nil, fmt.Errorf("trying to import %q in the context of %q returned nil PackageSpec", path, spec) } - if len(imp.Errors) > 0 { - return nil, imp.Errors[0] + ipkg := prog.packages[ispecpkg.PkgPath] + if ipkg == nil { + return nil, fmt.Errorf("trying to import %q (%q) in the context of %q returned nil PackageSpec", ispecpkg.PkgPath, path, spec) } - return imp.Types, nil + return ipkg, nil } tc := &types.Config{ Importer: importerFunc(importer), @@ -149,12 +266,8 @@ func LoadFromSource(pkg *packages.Package) error { pkg.Errors = append(pkg.Errors, convertError(err)...) }, } - err := types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax) - if err != nil { - return err - } - pkg.IllTyped = false - return nil + types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax) + return pkg } func convertError(err error) []packages.Error { @@ -192,15 +305,11 @@ func convertError(err error) []packages.Error { }) default: - // unexpected impoverished error from parser? errs = append(errs, packages.Error{ Pos: "-", Msg: err.Error(), Kind: packages.UnknownError, }) - - // If you see this error message, please file a bug. - log.Printf("internal error: error %q (%T) without position", err, err) } return errs } diff --git a/loader/note.go b/loader/note.go new file mode 100644 index 000000000..a56ee7005 --- /dev/null +++ b/loader/note.go @@ -0,0 +1,207 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +import ( + "bytes" + "debug/elf" + "debug/macho" + "encoding/binary" + "fmt" + "io" + "os" +) + +func readAligned4(r io.Reader, sz int32) ([]byte, error) { + full := (sz + 3) &^ 3 + data := make([]byte, full) + _, err := io.ReadFull(r, data) + if err != nil { + return nil, err + } + data = data[:sz] + return data, nil +} + +func ReadELFNote(filename, name string, typ int32) ([]byte, error) { + f, err := elf.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + for _, sect := range f.Sections { + if sect.Type != elf.SHT_NOTE { + continue + } + r := sect.Open() + for { + var namesize, descsize, noteType int32 + err = binary.Read(r, f.ByteOrder, &namesize) + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("read namesize failed: %v", err) + } + err = binary.Read(r, f.ByteOrder, &descsize) + if err != nil { + return nil, fmt.Errorf("read descsize failed: %v", err) + } + err = binary.Read(r, f.ByteOrder, ¬eType) + if err != nil { + return nil, fmt.Errorf("read type failed: %v", err) + } + noteName, err := readAligned4(r, namesize) + if err != nil { + return nil, fmt.Errorf("read name failed: %v", err) + } + desc, err := readAligned4(r, descsize) + if err != nil { + return nil, fmt.Errorf("read desc failed: %v", err) + } + if name == string(noteName) && typ == noteType { + return desc, nil + } + } + } + return nil, nil +} + +var elfGoNote = []byte("Go\x00\x00") +var elfGNUNote = []byte("GNU\x00") + +// The Go build ID is stored in a note described by an ELF PT_NOTE prog +// header. The caller has already opened filename, to get f, and read +// at least 4 kB out, in data. +func readELF(name string, f *os.File, data []byte) (buildid string, err error) { + // Assume the note content is in the data, already read. + // Rewrite the ELF header to set shnum to 0, so that we can pass + // the data to elf.NewFile and it will decode the Prog list but not + // try to read the section headers and the string table from disk. + // That's a waste of I/O when all we care about is the Prog list + // and the one ELF note. + switch elf.Class(data[elf.EI_CLASS]) { + case elf.ELFCLASS32: + data[48] = 0 + data[49] = 0 + case elf.ELFCLASS64: + data[60] = 0 + data[61] = 0 + } + + const elfGoBuildIDTag = 4 + const gnuBuildIDTag = 3 + + ef, err := elf.NewFile(bytes.NewReader(data)) + if err != nil { + return "", &os.PathError{Path: name, Op: "parse", Err: err} + } + var gnu string + for _, p := range ef.Progs { + if p.Type != elf.PT_NOTE || p.Filesz < 16 { + continue + } + + var note []byte + if p.Off+p.Filesz < uint64(len(data)) { + note = data[p.Off : p.Off+p.Filesz] + } else { + // For some linkers, such as the Solaris linker, + // the buildid may not be found in data (which + // likely contains the first 16kB of the file) + // or even the first few megabytes of the file + // due to differences in note segment placement; + // in that case, extract the note data manually. + _, err = f.Seek(int64(p.Off), io.SeekStart) + if err != nil { + return "", err + } + + note = make([]byte, p.Filesz) + _, err = io.ReadFull(f, note) + if err != nil { + return "", err + } + } + + filesz := p.Filesz + off := p.Off + for filesz >= 16 { + nameSize := ef.ByteOrder.Uint32(note) + valSize := ef.ByteOrder.Uint32(note[4:]) + tag := ef.ByteOrder.Uint32(note[8:]) + nname := note[12:16] + if nameSize == 4 && 16+valSize <= uint32(len(note)) && tag == elfGoBuildIDTag && bytes.Equal(nname, elfGoNote) { + return string(note[16 : 16+valSize]), nil + } + + if nameSize == 4 && 16+valSize <= uint32(len(note)) && tag == gnuBuildIDTag && bytes.Equal(nname, elfGNUNote) { + gnu = string(note[16 : 16+valSize]) + } + + nameSize = (nameSize + 3) &^ 3 + valSize = (valSize + 3) &^ 3 + notesz := uint64(12 + nameSize + valSize) + if filesz <= notesz { + break + } + off += notesz + align := p.Align + alignedOff := (off + align - 1) &^ (align - 1) + notesz += alignedOff - off + off = alignedOff + filesz -= notesz + note = note[notesz:] + } + } + + // If we didn't find a Go note, use a GNU note if available. + // This is what gccgo uses. + if gnu != "" { + return gnu, nil + } + + // No note. Treat as successful but build ID empty. + return "", nil +} + +// The Go build ID is stored at the beginning of the Mach-O __text segment. +// The caller has already opened filename, to get f, and read a few kB out, in data. +// Sadly, that's not guaranteed to hold the note, because there is an arbitrary amount +// of other junk placed in the file ahead of the main text. +func readMacho(name string, f *os.File, data []byte) (buildid string, err error) { + // If the data we want has already been read, don't worry about Mach-O parsing. + // This is both an optimization and a hedge against the Mach-O parsing failing + // in the future due to, for example, the name of the __text section changing. + if b, err := readRaw(name, data); b != "" && err == nil { + return b, err + } + + mf, err := macho.NewFile(f) + if err != nil { + return "", &os.PathError{Path: name, Op: "parse", Err: err} + } + + sect := mf.Section("__text") + if sect == nil { + // Every binary has a __text section. Something is wrong. + return "", &os.PathError{Path: name, Op: "parse", Err: fmt.Errorf("cannot find __text section")} + } + + // It should be in the first few bytes, but read a lot just in case, + // especially given our past problems on OS X with the build ID moving. + // There shouldn't be much difference between reading 4kB and 32kB: + // the hard part is getting to the data, not transferring it. + n := sect.Size + if n > uint64(readSize) { + n = uint64(readSize) + } + buf := make([]byte, n) + if _, err := f.ReadAt(buf, int64(sect.Offset)); err != nil { + return "", err + } + + return readRaw(name, buf) +} diff --git a/pattern/match.go b/pattern/match.go index ff039baa7..0c42178f7 100644 --- a/pattern/match.go +++ b/pattern/match.go @@ -7,7 +7,7 @@ import ( "go/types" "reflect" - "honnef.co/go/tools/lint" + "honnef.co/go/tools/code" ) var tokensByString = map[string]Token{ @@ -452,7 +452,7 @@ func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) { obj = m.TypesInfo.ObjectOf(node) switch obj := obj.(type) { case *types.Func: - name = lint.FuncName(obj) + name = code.FuncName(obj) case *types.Builtin: name = obj.Name() default: @@ -464,7 +464,7 @@ func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) { if !ok { return nil, false } - name = lint.FuncName(obj.(*types.Func)) + name = code.FuncName(obj.(*types.Func)) default: return nil, false } diff --git a/report/report.go b/report/report.go index 9b8b6ee74..5b5343617 100644 --- a/report/report.go +++ b/report/report.go @@ -5,12 +5,12 @@ import ( "go/ast" "go/printer" "go/token" + "path/filepath" "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/astutil" "honnef.co/go/tools/facts" - "honnef.co/go/tools/lint" ) type Options struct { @@ -148,7 +148,7 @@ func Report(pass *analysis.Pass, node Positioner, message string, opts ...Option opt(cfg) } - file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename + file := DisplayPosition(pass.Fset, node.Pos()).Filename if cfg.FilterGenerated { m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) if _, ok := m[file]; ok { @@ -182,3 +182,21 @@ func RenderArgs(pass *analysis.Pass, args []ast.Expr) string { } return strings.Join(ss, ", ") } + +func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position { + if p == token.NoPos { + return token.Position{} + } + + // Only use the adjusted position if it points to another Go file. + // This means we'll point to the original file for cgo files, but + // we won't point to a YACC grammar file. + pos := fset.PositionFor(p, false) + adjPos := fset.PositionFor(p, true) + + if filepath.Ext(adjPos.Filename) == ".go" { + return adjPos + } + + return pos +} diff --git a/runner/runner.go b/runner/runner.go new file mode 100644 index 000000000..fde6713a2 --- /dev/null +++ b/runner/runner.go @@ -0,0 +1,1175 @@ +// Package runner implements a go/analysis runner. It makes heavy use +// of on-disk caching to reduce overall memory usage and to speed up +// repeat runs. +// +// Public API +// +// A Runner maps a list of analyzers and package patterns to a list of +// results. Results provide access to diagnostics, directives, errors +// encountered, and information about packages. Results explicitly do +// not contain ASTs or type information. All position information is +// returned in the form of token.Position, not token.Pos. All work +// that requires access to the loaded representation of a package has +// to occur inside analyzers. +// +// Planning and execution +// +// Analyzing packages is split into two phases: planning and +// execution. +// +// During planning, a directed acyclic graph of package dependencies +// is computed. We materialize the full graph so that we can execute +// the graph from the bottom up, without keeping unnecessary data in +// memory during a DFS and with simplified parallel execution. +// +// During execution, leaf nodes (nodes with no outstanding +// dependencies) get executed in parallel, bounded by a semaphore +// sized according to the number of CPUs. Conceptually, this happens +// in a loop, processing new leaf nodes as they appear, until no more +// nodes are left. In the actual implementation, nodes know their +// dependents, and the last dependency of a node to be processed is +// responsible for scheduling its dependent. +// +// The graph is rooted at a synthetic root node. Upon execution of the +// root node, the algorithm terminates. +// +// Analyzing a package repeats the same planning + execution steps, +// but this time on a graph of analyzers for the package. Parallel +// execution of individual analyzers is bounded by the same semaphore +// as executing packages. +// +// Parallelism +// +// Actions are executed in parallel where the dependency graph allows. +// Overall parallelism is bounded by a semaphore, sized according to +// runtime.NumCPU(). Each concurrently processed package takes up a +// token, as does each analyzer – but a package can always execute at +// least one analyzer, using the package's token. +// +// Depending on the overall shape of the graph, there may be NumCPU +// packages running a single analyzer each, a single package running +// NumCPU analyzers, or anything in between. +// +// Total memory consumption grows roughly linearly with the number of +// CPUs, while total execution time is inversely proportional to the +// number of CPUs. Overall, parallelism is affected by the shape of +// the dependency graph. A lot of inter-connected packages will see +// less parallelism than a lot of independent packages. +// +// Caching +// +// The runner caches facts, directives and diagnostics in a +// content-addressable cache that is designed after Go's own cache. +// Additionally, it makes use of Go's export data. +// +// This cache not only speeds up repeat runs, it also reduces peak +// memory usage. When we've analyzed a package, we cache the results +// and drop them from memory. When a dependent needs any of this +// information, or when analysis is complete and we wish to render the +// results, the data gets loaded from disk again. +// +// Data only exists in memory when it is immediately needed, not +// retained for possible future uses. This trades increased CPU usage +// for reduced memory usage. A single dependency may be loaded many +// times over, but it greatly reduces peak memory usage, as an +// arbitrary amount of time may pass between analyzing a dependency +// and its dependent, during which other packages will be processed. +package runner + +// OPT(dh): we could reduce disk storage usage of cached data by +// compressing it, either directly at the cache layer, or by feeding +// compressed data to the cache. Of course doing so may negatively +// affect CPU usage, and there are lower hanging fruit, such as +// needing to cache less data in the first place. + +// OPT(dh): right now, each package is analyzed completely +// independently. Each package loads all of its dependencies from +// export data and cached facts. If we have two packages A and B, +// which both depend on C, and which both get analyzed in parallel, +// then C will be loaded twice. This wastes CPU time and memory. It +// would be nice if we could reuse a single C for the analysis of both +// A and B. +// +// We can't reuse the actual types.Package or facts, because each +// package gets its own token.FileSet. Sharing a global FileSet has +// several drawbacks, including increased memory usage and running the +// risk of running out of FileSet address space. +// +// We could however avoid loading the same raw export data from disk +// twice, as well as deserializing gob data twice. One possible +// solution would be a duplicate-suppressing in-memory cache that +// caches data for a limited amount of time. When the same package +// needs to be loaded twice in close succession, we can reuse work, +// without holding unnecessary data in memory for an extended period +// of time. +// +// We would likely need to do extensive benchmarking to figure out how +// long to keep data around to find a sweetspot where we reduce CPU +// load without increasing memory usage. +// +// We can probably populate the cache after we've analyzed a package, +// on the assumption that it will have to be loaded again in the near +// future. + +// XXX are we accidentally returning the root node in Run? + +// XXX restore live stats functionality + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/token" + "go/types" + "io" + "os" + "reflect" + "runtime" + "sort" + "strings" + "sync/atomic" + "time" + + "honnef.co/go/tools/config" + "honnef.co/go/tools/facts" + "honnef.co/go/tools/internal/cache" + tsync "honnef.co/go/tools/internal/sync" + "honnef.co/go/tools/loader" + "honnef.co/go/tools/report" + "honnef.co/go/tools/unused" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/types/objectpath" +) + +type Diagnostic struct { + Position token.Position + End token.Position + Category string + Message string + + SuggestedFixed []SuggestedFix + Related []RelatedInformation +} + +// RelatedInformation provides additional context for a diagnostic. +type RelatedInformation struct { + Position token.Position + End token.Position + Message string +} + +type SuggestedFix struct { + Message string + TextEdits []TextEdit +} + +type TextEdit struct { + Position token.Position + End token.Position + NewText []byte +} + +// A Result describes the result of analyzing a single package. +// +// It holds references to cached diagnostics and directives. They can +// be loaded on demand with Diagnostics and Directives respectively. +type Result struct { + Package *loader.PackageSpec + Config config.Config + + Failed bool + Errors []error + // Action results, paths to files + diagnostics string + directives string + unused string +} + +// Diagnostics loads and returns the diagnostics found while analyzing +// the package. +func (r Result) Diagnostics() ([]Diagnostic, error) { + if r.Failed { + panic("Diagnostics called on failed Result") + } + if r.diagnostics == "" { + // this package was only a dependency + return nil, nil + } + var diags []Diagnostic + f, err := os.Open(r.diagnostics) + if err != nil { + return nil, fmt.Errorf("failed loading diagnostics: %w", err) + } + defer f.Close() + dec := gob.NewDecoder(f) + for { + var diag Diagnostic + err := dec.Decode(&diag) + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed loading diagnostics: %w", err) + } + diags = append(diags, diag) + } + return diags, nil +} + +// Directives loads and returns the directives found while analyzing +// the package. +func (r Result) Directives() ([]facts.SerializedDirective, error) { + if r.Failed { + panic("Directives called on failed Result") + } + if r.directives == "" { + // this package was only a dependency + return nil, nil + } + var dirs []facts.SerializedDirective + f, err := os.Open(r.directives) + if err != nil { + return nil, fmt.Errorf("failed loading directives: %w", err) + } + defer f.Close() + if err := gob.NewDecoder(f).Decode(&dirs); err != nil { + return nil, fmt.Errorf("failed loading directives: %w", err) + } + return dirs, nil +} + +func (r Result) Unused() (unused.SerializedResult, error) { + if r.Failed { + panic("Unused called on failed Result") + } + if r.unused == "" { + // this package was only a dependency + return unused.SerializedResult{}, nil + } + var res unused.SerializedResult + f, err := os.Open(r.unused) + if err != nil { + return unused.SerializedResult{}, fmt.Errorf("failed loading unused: %w", err) + } + defer f.Close() + if err := gob.NewDecoder(f).Decode(&res); err != nil { + return unused.SerializedResult{}, fmt.Errorf("failed loading unused: %w", err) + } + return res, nil +} + +type action interface { + Deps() []action + Triggers() []action + DecrementPending() bool + MarkFailed() + IsFailed() bool + AddError(error) +} + +type baseAction struct { + // Action description + + deps []action + triggers []action + pending uint32 + + // Action results + + // failed is set to true if the action couldn't be processed. This + // may either be due to an error specific to this action, in + // which case the errors field will be populated, or due to a + // dependency being marked as failed, in which case errors will be + // empty. + failed bool + errors []error +} + +func (act *baseAction) Deps() []action { return act.deps } +func (act *baseAction) Triggers() []action { return act.triggers } +func (act *baseAction) DecrementPending() bool { + return atomic.AddUint32(&act.pending, ^uint32(0)) == 0 +} +func (act *baseAction) MarkFailed() { act.failed = true } +func (act *baseAction) IsFailed() bool { return act.failed } +func (act *baseAction) AddError(err error) { act.errors = append(act.errors, err) } + +// packageAction describes the act of loading a package, fully +// analyzing it, and storing the results. +type packageAction struct { + baseAction + + // Action description + + Package *loader.PackageSpec + factsOnly bool + hash cache.ActionID + + // Action results + + cfg config.Config + vetx string + directives string + diagnostics string + unused string +} + +func (act *packageAction) String() string { + return fmt.Sprintf("packageAction(%s)", act.Package) +} + +type objectFactKey struct { + Obj types.Object + Type reflect.Type +} + +type packageFactKey struct { + Pkg *types.Package + Type reflect.Type +} + +type gobFact struct { + PkgPath string + ObjPath string + Fact analysis.Fact +} + +// analyzerAction describes the act of analyzing a package with a +// single analyzer. +type analyzerAction struct { + baseAction + + // Action description + + Analyzer *analysis.Analyzer + + // Action results + + // We can store actual results here without worrying about memory + // consumption because analyzer actions get garbage collected once + // a package has been fully analyzed. + Result interface{} + Diagnostics []analysis.Diagnostic + ObjectFacts map[objectFactKey]analysis.Fact + PackageFacts map[packageFactKey]analysis.Fact + Pass *analysis.Pass +} + +func (act *analyzerAction) String() string { + return fmt.Sprintf("analyzerAction(%s)", act.Analyzer) +} + +// A Runner executes analyzers on packages. +type Runner struct { + Stats Stats + + // Config that gets merged with per-package configs + cfg config.Config + cache *cache.Cache + semaphore tsync.Semaphore +} + +type subrunner struct { + *Runner + analyzers []*analysis.Analyzer + analyzerNames string +} + +// New returns a new Runner. +func New(cfg config.Config) (*Runner, error) { + cache, err := cache.Default() + if err != nil { + return nil, err + } + + return &Runner{ + cfg: cfg, + cache: cache, + semaphore: tsync.NewSemaphore(runtime.NumCPU()), + }, nil +} + +func newSubrunner(r *Runner, analyzers []*analysis.Analyzer) *subrunner { + analyzerNames := make([]string, len(analyzers)) + for i, a := range analyzers { + analyzerNames[i] = a.Name + } + sort.Strings(analyzerNames) + return &subrunner{ + Runner: r, + analyzers: analyzers, + analyzerNames: strings.Join(analyzerNames, ","), + } +} + +func newPackageActionRoot(pkg *loader.PackageSpec, cache map[*loader.PackageSpec]*packageAction) *packageAction { + a := newPackageAction(pkg, cache) + a.factsOnly = false + return a +} + +func newPackageAction(pkg *loader.PackageSpec, cache map[*loader.PackageSpec]*packageAction) *packageAction { + if a, ok := cache[pkg]; ok { + return a + } + + a := &packageAction{ + Package: pkg, + factsOnly: true, // will be overwritten by any call to Action + } + cache[pkg] = a + + if len(pkg.Errors) > 0 { + for _, err := range pkg.Errors { + a.errors = append(a.errors, err) + } + a.failed = true + + // We don't need to process our imports if this package is + // already broken. + return a + } + + for _, dep := range pkg.Imports { + if dep.PkgPath == "unsafe" { + continue + } + depa := newPackageAction(dep, cache) + depa.triggers = append(depa.triggers, a) + a.deps = append(a.deps, depa) + + if depa.failed { + a.failed = true + } + } + // sort dependencies because the list of dependencies is part of + // the cache key + sort.Slice(a.deps, func(i, j int) bool { + return a.deps[i].(*packageAction).Package.ID < a.deps[j].(*packageAction).Package.ID + }) + + a.pending = uint32(len(a.deps)) + + return a +} + +func newAnalyzerAction(an *analysis.Analyzer, cache map[*analysis.Analyzer]*analyzerAction) *analyzerAction { + if a, ok := cache[an]; ok { + return a + } + + a := &analyzerAction{ + Analyzer: an, + ObjectFacts: map[objectFactKey]analysis.Fact{}, + PackageFacts: map[packageFactKey]analysis.Fact{}, + } + cache[an] = a + for _, dep := range an.Requires { + depa := newAnalyzerAction(dep, cache) + depa.triggers = append(depa.triggers, a) + a.deps = append(a.deps, depa) + } + a.pending = uint32(len(a.deps)) + return a +} + +func (r *subrunner) do(act action) error { + a := act.(*packageAction) + defer func() { + r.Stats.finishPackage() + if !a.factsOnly { + r.Stats.finishInitialPackage() + } + }() + + // compute hash of action + a.cfg = a.Package.Config.Merge(r.cfg) + h := cache.NewHash("staticcheck " + a.Package.PkgPath) + + // Note that we do not filter the list of analyzers by the + // package's configuration. We don't allow configuration to + // accidentally break dependencies between analyzers, and it's + // easier to always run all checks and filter the output. This + // also makes cached data more reusable. + + // OPT(dh): not all changes in configuration invalidate cached + // data. specifically, when a.factsOnly == true, we only care + // about checks that produce facts, and settings that affect those + // checks. + + // Config used for constructing the hash; this config doesn't have + // Checks populated, because we always run all checks. + hashCfg := a.cfg + hashCfg.Checks = nil + // note that we don't hash staticcheck's version; it is set as the + // salt by a package main. + fmt.Fprintf(h, "cfg %#v\n", hashCfg) + fmt.Fprintf(h, "pkg %x\n", a.Package.Hash) + fmt.Fprintf(h, "analyzers %s\n", r.analyzerNames) + for _, dep := range a.deps { + dep := dep.(*packageAction) + vetxHash, err := cache.FileHash(dep.vetx) + if err != nil { + return fmt.Errorf("failed computing hash: %w", err) + } + fmt.Fprintf(h, "vetout %q %x\n", dep.Package.PkgPath, vetxHash) + } + a.hash = cache.ActionID(h.Sum()) + + // try to fetch hashed data (vetx, directives, diagnostics) + var err1, err2, err3, err4 error + var f1, f2, f3, f4 string + f1, _, err1 = r.cache.GetFile(cache.Subkey(a.hash, "vetx")) + if !a.factsOnly { + // OPT(dh): is there any reason to store these in different + // files? wouldn't it be simpler to store all of these in a + // single file? + f2, _, err2 = r.cache.GetFile(cache.Subkey(a.hash, "directives")) + f3, _, err3 = r.cache.GetFile(cache.Subkey(a.hash, "diagnostics")) + f4, _, err4 = r.cache.GetFile(cache.Subkey(a.hash, "unused")) + } + // OPT(dh): only load "unused" data if we're running the U1000 analyzer + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + result, err := r.doUncached(a) + if err != nil { + return err + } + if a.failed { + return nil + } + + // OPT(dh): doUncached returns facts in one format, only for + // us to immediately convert them to another format. + + // OPT(dh) instead of collecting all object facts and encoding + // them after analysis finishes, we could encode them as we + // go. however, that would require some locking. + gobFacts := &bytes.Buffer{} + enc := gob.NewEncoder(gobFacts) + for _, f := range result.objFacts { + objPath, err := objectpath.For(f.Object) + if err != nil { + continue + } + gf := gobFact{ + PkgPath: f.Object.Pkg().Path(), + ObjPath: string(objPath), + Fact: f.Fact, + } + if err := enc.Encode(gf); err != nil { + return fmt.Errorf("failed gob encoding data: %w", err) + } + } + for _, f := range result.pkgFacts { + gf := gobFact{ + PkgPath: f.Package.Path(), + Fact: f.Fact, + } + if err := enc.Encode(gf); err != nil { + return fmt.Errorf("failed gob encoding data: %w", err) + } + } + + // OPT(dh): We could sort gobFacts for more consistent output, + // but it doesn't matter. The hash of a package includes all + // of its files, so whether the vetx hash changes or not, a + // change to a package requires re-analyzing all dependents, + // even if the vetx data stayed the same. See also the note at + // the top of loader/hash.go. + + f1, err = r.writeCache(a, "vetx", gobFacts.Bytes()) + if err != nil { + return err + } + + dirs := make([]facts.SerializedDirective, len(result.dirs)) + for i, dir := range result.dirs { + dirs[i] = facts.SerializeDirective(dir, result.lpkg.Fset) + } + f2, err = r.writeCacheGob(a, "directives", dirs) + if err != nil { + return err + } + + gobDiags := &bytes.Buffer{} + enc = gob.NewEncoder(gobDiags) + for _, diag := range result.diags { + d := Diagnostic{ + Position: report.DisplayPosition(result.lpkg.Fset, diag.Pos), + End: report.DisplayPosition(result.lpkg.Fset, diag.End), + Category: diag.Category, + Message: diag.Message, + } + for _, sugg := range diag.SuggestedFixes { + s := SuggestedFix{ + Message: sugg.Message, + } + for _, edit := range sugg.TextEdits { + s.TextEdits = append(s.TextEdits, TextEdit{ + Position: report.DisplayPosition(result.lpkg.Fset, edit.Pos), + End: report.DisplayPosition(result.lpkg.Fset, edit.End), + NewText: edit.NewText, + }) + } + d.SuggestedFixed = append(d.SuggestedFixed, s) + } + for _, rel := range diag.Related { + d.Related = append(d.Related, RelatedInformation{ + Position: report.DisplayPosition(result.lpkg.Fset, rel.Pos), + End: report.DisplayPosition(result.lpkg.Fset, rel.End), + Message: rel.Message, + }) + } + if err := enc.Encode(d); err != nil { + return fmt.Errorf("failed gob encoding data: %w", err) + } + } + f3, err = r.writeCache(a, "diagnostics", gobDiags.Bytes()) + if err != nil { + return err + } + + f4, err = r.writeCacheGob(a, "unused", result.unused) + if err != nil { + return err + } + } + a.vetx = f1 + a.directives = f2 + a.diagnostics = f3 + a.unused = f4 + return nil +} + +// ActiveWorkers returns the number of currently running workers. +func (r *Runner) ActiveWorkers() int { + return r.semaphore.Len() +} + +// TotalWorkers returns the maximum number of possible workers. +func (r *Runner) TotalWorkers() int { + return r.semaphore.Cap() +} + +func (r *Runner) writeCache(a *packageAction, kind string, data []byte) (string, error) { + h := cache.Subkey(a.hash, kind) + if err := r.cache.PutBytes(h, data); err != nil { + return "", fmt.Errorf("failed caching data: %w", err) + } + // OPT(dh): change PutBytes signature so we get the file name right away, not requiring a call to GetFile + f, _, err := r.cache.GetFile(h) + if err != nil { + return "", fmt.Errorf("failed finding cache entry: %w", err) + } + return f, nil +} + +func (r *Runner) writeCacheGob(a *packageAction, kind string, data interface{}) (string, error) { + buf := bytes.NewBuffer(nil) + if err := gob.NewEncoder(buf).Encode(data); err != nil { + return "", fmt.Errorf("failed gob encoding data: %w", err) + } + return r.writeCache(a, kind, buf.Bytes()) +} + +type packageActionResult struct { + objFacts []analysis.ObjectFact + pkgFacts []analysis.PackageFact + diags []analysis.Diagnostic + unused unused.SerializedResult + dirs []facts.Directive + lpkg *loader.Package +} + +func (r *subrunner) doUncached(a *packageAction) (packageActionResult, error) { + // OPT(dh): for a -> b; c -> b; if both a and b are being + // processed concurrently, we shouldn't load b's export data + // twice. + + pkg, _, err := loader.Load(a.Package) + if err != nil { + return packageActionResult{}, err + } + + if len(pkg.Errors) > 0 { + // this handles errors that occured during type-checking the + // package in loader.Load + for _, err := range pkg.Errors { + a.errors = append(a.errors, err) + } + a.failed = true + return packageActionResult{}, nil + } + + // OPT(dh): instead of parsing directives twice (twice because + // U1000 depends on the facts.Directives analyzer), reuse the + // existing result + dirs := facts.ParseDirectives(pkg.Syntax, pkg.Fset) + res, err := r.runAnalyzers(a, pkg) + + return packageActionResult{ + objFacts: res.objFacts, + pkgFacts: res.pkgFacts, + diags: res.diagnostics, + unused: res.unused, + dirs: dirs, + lpkg: pkg, + }, err +} + +func pkgPaths(root *types.Package) map[string]*types.Package { + out := map[string]*types.Package{} + var dfs func(*types.Package) + dfs = func(pkg *types.Package) { + if _, ok := out[pkg.Path()]; ok { + return + } + out[pkg.Path()] = pkg + for _, imp := range pkg.Imports() { + dfs(imp) + } + } + dfs(root) + return out +} + +func (r *Runner) loadFacts(root *types.Package, dep *packageAction, objFacts map[objectFactKey]analysis.Fact, pkgFacts map[packageFactKey]analysis.Fact) error { + // Load facts of all imported packages + vetx, err := os.Open(dep.vetx) + if err != nil { + return fmt.Errorf("failed loading cached facts: %w", err) + } + defer vetx.Close() + + pathToPkg := pkgPaths(root) + dec := gob.NewDecoder(vetx) + for { + var gf gobFact + err := dec.Decode(&gf) + if err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("failed loading cached facts: %w", err) + } + + pkg, ok := pathToPkg[gf.PkgPath] + if !ok { + continue + } + if gf.ObjPath == "" { + pkgFacts[packageFactKey{ + Pkg: pkg, + Type: reflect.TypeOf(gf.Fact), + }] = gf.Fact + } else { + obj, err := objectpath.Object(pkg, objectpath.Path(gf.ObjPath)) + if err != nil { + continue + } + objFacts[objectFactKey{ + Obj: obj, + Type: reflect.TypeOf(gf.Fact), + }] = gf.Fact + } + } + return nil +} + +func genericHandle(a action, root action, queue chan action, sem *tsync.Semaphore, exec func(a action) error) { + if a == root { + close(queue) + if sem != nil { + sem.Release() + } + return + } + if !a.IsFailed() { + // the action may have already been marked as failed during + // construction of the action graph, for example because of + // unresolved imports. + + for _, dep := range a.Deps() { + if dep.IsFailed() { + // One of our dependencies failed, so mark this package as + // failed and bail. We don't need to record an error for + // this package, the relevant error will have been + // reported by the first package in the chain that failed. + a.MarkFailed() + break + } + } + } + + if !a.IsFailed() { + if err := exec(a); err != nil { + a.MarkFailed() + a.AddError(err) + } + } + if sem != nil { + sem.Release() + } + + for _, t := range a.Triggers() { + if t.DecrementPending() { + queue <- t + } + } +} + +type analyzerRunner struct { + pkg *loader.Package + // object facts of our dependencies; may contain facts of + // analyzers other than the current one + depObjFacts map[objectFactKey]analysis.Fact + // package facts of our dependencies; may contain facts of + // analyzers other than the current one + depPkgFacts map[packageFactKey]analysis.Fact + factsOnly bool + + stats *Stats +} + +func (ar *analyzerRunner) do(act action) error { + a := act.(*analyzerAction) + results := map[*analysis.Analyzer]interface{}{} + // XXX does this have to be recursive? + for _, dep := range a.deps { + dep := dep.(*analyzerAction) + results[dep.Analyzer] = dep.Result + } + factTypes := map[reflect.Type]struct{}{} + for _, typ := range a.Analyzer.FactTypes { + factTypes[reflect.TypeOf(typ)] = struct{}{} + } + filterFactType := func(typ reflect.Type) bool { + _, ok := factTypes[typ] + return ok + } + a.Pass = &analysis.Pass{ + Analyzer: a.Analyzer, + Fset: ar.pkg.Fset, + Files: ar.pkg.Syntax, + OtherFiles: ar.pkg.OtherFiles, + Pkg: ar.pkg.Types, + TypesInfo: ar.pkg.TypesInfo, + TypesSizes: ar.pkg.TypesSizes, + Report: func(d analysis.Diagnostic) { + if !ar.factsOnly { + if d.Category == "" { + d.Category = a.Analyzer.Name + } + a.Diagnostics = append(a.Diagnostics, d) + } + }, + ResultOf: results, + ImportObjectFact: func(obj types.Object, fact analysis.Fact) bool { + key := objectFactKey{ + Obj: obj, + Type: reflect.TypeOf(fact), + } + if f, ok := ar.depObjFacts[key]; ok { + reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) + return true + } else if f, ok := a.ObjectFacts[key]; ok { + reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) + return true + } + return false + }, + ImportPackageFact: func(pkg *types.Package, fact analysis.Fact) bool { + key := packageFactKey{ + Pkg: pkg, + Type: reflect.TypeOf(fact), + } + if f, ok := ar.depPkgFacts[key]; ok { + reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) + return true + } else if f, ok := a.PackageFacts[key]; ok { + reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) + return true + } + return false + }, + ExportObjectFact: func(obj types.Object, fact analysis.Fact) { + key := objectFactKey{ + Obj: obj, + Type: reflect.TypeOf(fact), + } + a.ObjectFacts[key] = fact + }, + ExportPackageFact: func(fact analysis.Fact) { + key := packageFactKey{ + Pkg: ar.pkg.Types, + Type: reflect.TypeOf(fact), + } + a.PackageFacts[key] = fact + }, + AllPackageFacts: func() []analysis.PackageFact { + out := make([]analysis.PackageFact, 0, len(ar.depPkgFacts)+len(a.PackageFacts)) + for key, fact := range ar.depPkgFacts { + out = append(out, analysis.PackageFact{ + Package: key.Pkg, + Fact: fact, + }) + } + for key, fact := range a.PackageFacts { + out = append(out, analysis.PackageFact{ + Package: key.Pkg, + Fact: fact, + }) + } + return out + }, + AllObjectFacts: func() []analysis.ObjectFact { + out := make([]analysis.ObjectFact, 0, len(ar.depObjFacts)+len(a.ObjectFacts)) + for key, fact := range ar.depObjFacts { + if filterFactType(key.Type) { + out = append(out, analysis.ObjectFact{ + Object: key.Obj, + Fact: fact, + }) + } + } + for key, fact := range a.ObjectFacts { + if filterFactType(key.Type) { + out = append(out, analysis.ObjectFact{ + Object: key.Obj, + Fact: fact, + }) + } + } + return out + }, + } + + t := time.Now() + res, err := a.Analyzer.Run(a.Pass) + ar.stats.measureAnalyzer(a.Analyzer, ar.pkg.PackageSpec, time.Since(t)) + if err != nil { + return err + } + a.Result = res + return nil +} + +type analysisResult struct { + objFacts []analysis.ObjectFact + pkgFacts []analysis.PackageFact + diagnostics []analysis.Diagnostic + unused unused.SerializedResult +} + +func (r *subrunner) runAnalyzers(pkgAct *packageAction, pkg *loader.Package) (analysisResult, error) { + depObjFacts := map[objectFactKey]analysis.Fact{} + depPkgFacts := map[packageFactKey]analysis.Fact{} + + for _, dep := range pkgAct.deps { + if err := r.loadFacts(pkg.Types, dep.(*packageAction), depObjFacts, depPkgFacts); err != nil { + return analysisResult{}, err + } + } + + // OPT(dh): this computation is the same for all packages + // (depending on act.factsOnly), we should cache it in the runner. + analyzerActionCache := map[*analysis.Analyzer]*analyzerAction{} + root := &analyzerAction{} + for _, a := range r.analyzers { + // When analyzing non-initial packages, we only care about + // analyzers that produce facts. + if !pkgAct.factsOnly || len(a.FactTypes) > 0 { + a := newAnalyzerAction(a, analyzerActionCache) + root.deps = append(root.deps, a) + a.triggers = append(a.triggers, root) + } + } + root.pending = uint32(len(root.deps)) + all := actionList(root) + + ar := &analyzerRunner{ + pkg: pkg, + factsOnly: pkgAct.factsOnly, + depObjFacts: depObjFacts, + depPkgFacts: depPkgFacts, + stats: &r.Stats, + } + queue := make(chan action, len(all)) + for _, a := range all { + if len(a.Deps()) == 0 { + queue <- a + } + } + + for item := range queue { + b := r.semaphore.AcquireMaybe() + if b { + go genericHandle(item, root, queue, &r.semaphore, ar.do) + } else { + // the semaphore is exhausted; run the analysis under the + // token we've acquired for analyzing the package. + genericHandle(item, root, queue, nil, ar.do) + } + } + + var unusedResult unused.SerializedResult + for _, a := range all { + a := a.(*analyzerAction) + + if a != root && a.Analyzer.Name == "U1000" { + // TODO(dh): figure out a clean abstraction, instead of + // special-casing U1000. + unusedResult = unused.Serialize(a.Pass, a.Result.(unused.Result), pkg.Fset) + } + + for key, fact := range a.ObjectFacts { + depObjFacts[key] = fact + } + for key, fact := range a.PackageFacts { + depPkgFacts[key] = fact + } + } + + // OPT(dh): cull objects not reachable via the exported closure + objFacts := make([]analysis.ObjectFact, 0, len(depObjFacts)) + pkgFacts := make([]analysis.PackageFact, 0, len(depPkgFacts)) + for key, fact := range depObjFacts { + objFacts = append(objFacts, analysis.ObjectFact{Object: key.Obj, Fact: fact}) + } + for key, fact := range depPkgFacts { + pkgFacts = append(pkgFacts, analysis.PackageFact{Package: key.Pkg, Fact: fact}) + } + + var diags []analysis.Diagnostic + for _, a := range root.deps { + a := a.(*analyzerAction) + diags = append(diags, a.Diagnostics...) + } + return analysisResult{ + objFacts: objFacts, + pkgFacts: pkgFacts, + diagnostics: diags, + unused: unusedResult, + }, nil +} + +func actionList(root action) []action { + seen := map[action]struct{}{} + all := []action{} + var walk func(action) + walk = func(a action) { + if _, ok := seen[a]; ok { + return + } + seen[a] = struct{}{} + for _, a1 := range a.Deps() { + walk(a1) + } + all = append(all, a) + } + walk(root) + return all +} + +func registerGobTypes(analyzers []*analysis.Analyzer) { + for _, a := range analyzers { + for _, typ := range a.FactTypes { + // XXX use RegisterName so we can work around collisions + // in names. For pointer-types, gob incorrectly qualifies + // type names with the package name, not the import path. + gob.Register(typ) + } + } +} + +func allAnalyzers(analyzers []*analysis.Analyzer) []*analysis.Analyzer { + seen := map[*analysis.Analyzer]struct{}{} + out := make([]*analysis.Analyzer, 0, len(analyzers)) + var dfs func(*analysis.Analyzer) + dfs = func(a *analysis.Analyzer) { + if _, ok := seen[a]; ok { + return + } + seen[a] = struct{}{} + out = append(out, a) + for _, dep := range a.Requires { + dfs(dep) + } + } + for _, a := range analyzers { + dfs(a) + } + return out +} + +// Run loads the packages specified by patterns, runs analyzers on +// them and returns the results. Each result corresponds to a single +// package. Results will be returned for all packages, including +// dependencies. Errors specific to packages will be reported in the +// respective results. +// +// If cfg is nil, a default config will be used. Otherwise, cfg will +// be used, with the exception of the Mode field. +// +// Run can be called multiple times on the same Runner and it is safe +// for concurrent use. All runs will share the same semaphore. +func (r *Runner) Run(cfg *packages.Config, analyzers []*analysis.Analyzer, patterns []string) ([]Result, error) { + analyzers = allAnalyzers(analyzers) + registerGobTypes(analyzers) + + r.Stats.setState(StateLoadPackageGraph) + lpkgs, err := loader.Graph(cfg, patterns...) + if err != nil { + return nil, err + } + r.Stats.setInitialPackages(len(lpkgs)) + + r.Stats.setState(StateBuildActionGraph) + packageActionCache := map[*loader.PackageSpec]*packageAction{} + root := &packageAction{} + for _, lpkg := range lpkgs { + a := newPackageActionRoot(lpkg, packageActionCache) + root.deps = append(root.deps, a) + a.triggers = append(a.triggers, root) + } + root.pending = uint32(len(root.deps)) + + all := actionList(root) + queue := make(chan action) + r.Stats.setTotalPackages(len(all) - 1) + + r.Stats.setState(StateProcessing) + go func() { + for _, a := range all { + if len(a.Deps()) == 0 { + queue <- a + } + } + }() + + sr := newSubrunner(r, analyzers) + for item := range queue { + r.semaphore.Acquire() + go genericHandle(item, root, queue, &r.semaphore, func(act action) error { + return sr.do(act) + }) + } + + r.Stats.setState(StateFinalizing) + out := make([]Result, len(all)) + for i, item := range all { + item := item.(*packageAction) + out[i] = Result{ + Package: item.Package, + Config: item.cfg, + Failed: item.failed, + Errors: item.errors, + diagnostics: item.diagnostics, + directives: item.directives, + unused: item.unused, + } + } + return out, nil +} diff --git a/runner/stats.go b/runner/stats.go new file mode 100644 index 000000000..ce3d589df --- /dev/null +++ b/runner/stats.go @@ -0,0 +1,48 @@ +package runner + +import ( + "sync/atomic" + "time" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/loader" +) + +const ( + StateInitializing = iota + StateLoadPackageGraph + StateBuildActionGraph + StateProcessing + StateFinalizing +) + +type Stats struct { + state uint32 + initialPackages uint32 + totalPackages uint32 + processedPackages uint32 + processedInitialPackages uint32 + + // optional function to call every time an analyzer has finished analyzing a package. + PrintAnalyzerMeasurement func(*analysis.Analyzer, *loader.PackageSpec, time.Duration) +} + +func (s *Stats) setState(state uint32) { atomic.StoreUint32(&s.state, state) } +func (s *Stats) State() int { return int(atomic.LoadUint32(&s.state)) } +func (s *Stats) setInitialPackages(n int) { atomic.StoreUint32(&s.initialPackages, uint32(n)) } +func (s *Stats) InitialPackages() int { return int(atomic.LoadUint32(&s.initialPackages)) } +func (s *Stats) setTotalPackages(n int) { atomic.StoreUint32(&s.totalPackages, uint32(n)) } +func (s *Stats) TotalPackages() int { return int(atomic.LoadUint32(&s.totalPackages)) } + +func (s *Stats) finishPackage() { atomic.AddUint32(&s.processedPackages, 1) } +func (s *Stats) finishInitialPackage() { atomic.AddUint32(&s.processedInitialPackages, 1) } +func (s *Stats) ProcessedPackages() int { return int(atomic.LoadUint32(&s.processedPackages)) } +func (s *Stats) ProcessedInitialPackages() int { + return int(atomic.LoadUint32(&s.processedInitialPackages)) +} + +func (s *Stats) measureAnalyzer(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration) { + if s.PrintAnalyzerMeasurement != nil { + s.PrintAnalyzerMeasurement(analysis, pkg, d) + } +} diff --git a/staticcheck/lint.go b/staticcheck/lint.go index 7d7c5021d..1e9f5fcb3 100644 --- a/staticcheck/lint.go +++ b/staticcheck/lint.go @@ -28,7 +28,6 @@ import ( "honnef.co/go/tools/internal/sharedcheck" "honnef.co/go/tools/ir" "honnef.co/go/tools/ir/irutil" - "honnef.co/go/tools/lint" . "honnef.co/go/tools/lint/lintdsl" "honnef.co/go/tools/pattern" "honnef.co/go/tools/printf" @@ -2396,7 +2395,7 @@ func CheckCyclicFinalizer(pass *analysis.Pass) (interface{}, error) { } for _, b := range mc.Bindings { if b == v { - pos := lint.DisplayPosition(pass.Fset, mc.Fn.Pos()) + pos := report.DisplayPosition(pass.Fset, mc.Fn.Pos()) report.Report(pass, site, fmt.Sprintf("the finalizer closes over the object, preventing the finalizer from ever running (at %s)", pos)) } } @@ -2957,7 +2956,7 @@ func checkCalls(pass *analysis.Pass, rules map[string]CallCheck) (interface{}, e return } - r, ok := rules[lint.FuncName(obj)] + r, ok := rules[code.FuncName(obj)] if !ok { return } diff --git a/stylecheck/analysis.go b/stylecheck/analysis.go index 0f93f4436..7d8e6f3e0 100644 --- a/stylecheck/analysis.go +++ b/stylecheck/analysis.go @@ -1,12 +1,13 @@ package stylecheck import ( - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" "honnef.co/go/tools/config" "honnef.co/go/tools/facts" "honnef.co/go/tools/internal/passes/buildir" "honnef.co/go/tools/lint/lintutil" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" ) var Analyzers = lintutil.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer{ @@ -38,8 +39,7 @@ var Analyzers = lintutil.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer Requires: []*analysis.Analyzer{inspect.Analyzer}, }, "ST1012": { - Run: CheckErrorVarNames, - Requires: []*analysis.Analyzer{config.Analyzer}, + Run: CheckErrorVarNames, }, "ST1013": { Run: CheckHTTPStatusCodes, @@ -64,7 +64,7 @@ var Analyzers = lintutil.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer }, "ST1019": { Run: CheckDuplicatedImports, - Requires: []*analysis.Analyzer{facts.Generated, config.Analyzer}, + Requires: []*analysis.Analyzer{facts.Generated}, }, "ST1020": { Run: CheckExportedFunctionDocs, diff --git a/unused/edge.go b/unused/edge.go index 02e0d09cf..5fcf1465a 100644 --- a/unused/edge.go +++ b/unused/edge.go @@ -51,4 +51,5 @@ const ( edgeUnsafeConversion edgeUsedConstant edgeVarDecl + edgeIgnored ) diff --git a/unused/implements.go b/unused/implements.go index 835baac69..f62018572 100644 --- a/unused/implements.go +++ b/unused/implements.go @@ -37,7 +37,7 @@ func sameId(obj types.Object, pkg *types.Package, name string) bool { return pkg.Path() == obj.Pkg().Path() } -func (g *Graph) implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) { +func (g *graph) implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) { // fast path for common case if T.Empty() { return nil, true diff --git a/unused/testdata/src/alias/alias.go b/unused/testdata/src/alias/alias.go index 911501e59..5d05524db 100644 --- a/unused/testdata/src/alias/alias.go +++ b/unused/testdata/src/alias/alias.go @@ -1,15 +1,15 @@ package main -type t1 struct{} -type t2 struct{} // want `t2` -type t3 struct{} +type t1 struct{} // used +type t2 struct{} // unused +type t3 struct{} // used -type alias1 = t1 -type alias2 = t2 // want `alias2` -type alias3 = t3 -type alias4 = int +type alias1 = t1 // used +type alias2 = t2 // unused +type alias3 = t3 // used +type alias4 = int // used -func main() { +func main() { // used var _ alias1 var _ t3 } diff --git a/unused/testdata/src/anonymous/anonymous.go b/unused/testdata/src/anonymous/anonymous.go index d0577a737..70ac16184 100644 --- a/unused/testdata/src/anonymous/anonymous.go +++ b/unused/testdata/src/anonymous/anonymous.go @@ -2,17 +2,17 @@ package pkg import "fmt" -type Node interface { - position() int +type Node interface { // used + position() int // used } -type noder struct{} +type noder struct{} // used -func (noder) position() int { panic("unreachable") } +func (noder) position() int { panic("unreachable") } // used -func Fn() { +func Fn() { // used nodes := []Node{struct { - noder + noder // used }{}} fmt.Println(nodes) } diff --git a/unused/testdata/src/blank/blank.go b/unused/testdata/src/blank/blank.go index ee707b626..fe61bd2f7 100644 --- a/unused/testdata/src/blank/blank.go +++ b/unused/testdata/src/blank/blank.go @@ -2,28 +2,28 @@ package pkg import _ "fmt" -type t1 struct{} // want `t1` -type t2 struct { - _ int +type t1 struct{} // unused +type t2 struct { // used + _ int // used } -type t3 struct{} -type t4 struct{} -type t5 struct{} +type t3 struct{} // used +type t4 struct{} // used +type t5 struct{} // used var _ = t2{} -func fn1() { // want `fn1` +func fn1() { // unused _ = t1{} var _ = t1{} } -func fn2() { +func fn2() { // used _ = t3{} var _ t4 var _ *t5 = nil } -func init() { +func init() { // used fn2() } diff --git a/unused/testdata/src/cgo/cgo.go b/unused/testdata/src/cgo/cgo.go index 4b852d173..8506e9e05 100644 --- a/unused/testdata/src/cgo/cgo.go +++ b/unused/testdata/src/cgo/cgo.go @@ -1,6 +1,6 @@ package pkg //go:cgo_export_dynamic -func foo() {} +func foo() {} // used -func bar() {} // want `bar` +func bar() {} // unused diff --git a/unused/testdata/src/consts/consts.go b/unused/testdata/src/consts/consts.go index 1cab7ddde..7d3636e90 100644 --- a/unused/testdata/src/consts/consts.go +++ b/unused/testdata/src/consts/consts.go @@ -1,35 +1,35 @@ package pkg -const c1 = 1 +const c1 = 1 // used -const c2 = 1 -const c3 = 1 -const c4 = 1 -const C5 = 1 +const c2 = 1 // used +const c3 = 1 // used +const c4 = 1 // used +const C5 = 1 // used const ( - c6 = 0 - c7 - c8 + c6 = 0 // used + c7 // used + c8 // used - c9 // want `c9` - c10 // want `c10` - c11 // want `c11` + c9 // unused + c10 // unused + c11 // unused ) var _ = []int{c3: 1} -type T1 struct { - F1 [c1]int +type T1 struct { // used + F1 [c1]int // used } -func init() { +func init() { // used _ = []int{c2: 1} var _ [c4]int _ = c7 } -func Fn() { - const X = 1 // want `X` +func Fn() { // used + const X = 1 // unused } diff --git a/unused/testdata/src/conversion/conversion.go b/unused/testdata/src/conversion/conversion.go index 0821c67da..c487a93fa 100644 --- a/unused/testdata/src/conversion/conversion.go +++ b/unused/testdata/src/conversion/conversion.go @@ -5,57 +5,57 @@ import ( "unsafe" ) -type t1 struct { - a int - b int +type t1 struct { // used + a int // used + b int // used } -type t2 struct { - a int - b int +type t2 struct { // used + a int // used + b int // used } -type t3 struct { - a int - b int // want `b` +type t3 struct { // used + a int // used + b int // unused } -type t4 struct { - a int - b int // want `b` +type t4 struct { // used + a int // used + b int // unused } -type t5 struct { - a int - b int +type t5 struct { // used + a int // used + b int // used } -type t6 struct { - a int - b int +type t6 struct { // used + a int // used + b int // used } -type t7 struct { - a int - b int +type t7 struct { // used + a int // used + b int // used } -type t8 struct { - a int - b int +type t8 struct { // used + a int // used + b int // used } -type t9 struct { - Offset int64 - Err error +type t9 struct { // used + Offset int64 // used + Err error // used } -type t10 struct { - a int - b int +type t10 struct { // used + a int // used + b int // used } -func fn() { +func fn() { // used // All fields in t2 used because they're initialised in t1 v1 := t1{0, 1} v2 := t2(v1) @@ -89,4 +89,4 @@ func fn() { _ = v10 } -func init() { fn() } +func init() { fn() } // used diff --git a/unused/testdata/src/cyclic/cyclic.go b/unused/testdata/src/cyclic/cyclic.go index b9dfc952d..0e259575a 100644 --- a/unused/testdata/src/cyclic/cyclic.go +++ b/unused/testdata/src/cyclic/cyclic.go @@ -1,9 +1,9 @@ package pkg -func a() { // want `a` +func a() { // unused b() } -func b() { // want `b` +func b() { // unused a() } diff --git a/unused/testdata/src/defer/defer.go b/unused/testdata/src/defer/defer.go index a6cfdee73..e24592bbe 100644 --- a/unused/testdata/src/defer/defer.go +++ b/unused/testdata/src/defer/defer.go @@ -1,13 +1,13 @@ package pkg -type t struct{} +type t struct{} // used -func (t) fn1() {} -func (t) fn2() {} -func fn1() {} -func fn2() {} +func (t) fn1() {} // used +func (t) fn2() {} // used +func fn1() {} // used +func fn2() {} // used -func Fn() { +func Fn() { // used var v t defer fn1() defer v.fn1() diff --git a/unused/testdata/src/elem/elem.go b/unused/testdata/src/elem/elem.go index 24cbf03cc..a0e5ecbd9 100644 --- a/unused/testdata/src/elem/elem.go +++ b/unused/testdata/src/elem/elem.go @@ -2,15 +2,17 @@ package pkg -type t15 struct{ f151 int } -type a2 [1]t15 +type t15 struct { // used + f151 int // used +} +type a2 [1]t15 // used -type t16 struct{} -type a3 [1][1]t16 +type t16 struct{} // used +type a3 [1][1]t16 // used -func foo() { - _ = a2{0: {1}} - _ = a3{{{}}} +func foo() { // used + _ = a2{0: {1}} // used + _ = a3{{{}}} // used } -func init() { foo() } +func init() { foo() } // used diff --git a/unused/testdata/src/embedded_call/embedded_call.go b/unused/testdata/src/embedded_call/embedded_call.go index 196ac0dec..a66cba503 100644 --- a/unused/testdata/src/embedded_call/embedded_call.go +++ b/unused/testdata/src/embedded_call/embedded_call.go @@ -1,20 +1,22 @@ package pkg -var t1 struct { - t2 - t3 - t4 +var t1 struct { // used + t2 // used + t3 // used + t4 // used } -type t2 struct{} -type t3 struct{} -type t4 struct{ t5 } -type t5 struct{} +type t2 struct{} // used +type t3 struct{} // used +type t4 struct { // used + t5 // used +} +type t5 struct{} // used -func (t2) foo() {} -func (t3) bar() {} -func (t5) baz() {} -func init() { +func (t2) foo() {} // used +func (t3) bar() {} // used +func (t5) baz() {} // used +func init() { // used t1.foo() _ = t1.bar t1.baz() diff --git a/unused/testdata/src/embedding/embedding.go b/unused/testdata/src/embedding/embedding.go index b907e2918..aefbe32df 100644 --- a/unused/testdata/src/embedding/embedding.go +++ b/unused/testdata/src/embedding/embedding.go @@ -1,77 +1,87 @@ package pkg -type I interface { - f1() - f2() +type I interface { // used + f1() // used + f2() // used } -func init() { +func init() { // used var _ I } -type t1 struct{} -type T2 struct{ t1 } +type t1 struct{} // used +type T2 struct { // used + t1 // used +} -func (t1) f1() {} -func (T2) f2() {} +func (t1) f1() {} // used +func (T2) f2() {} // used -func Fn() { +func Fn() { // used var v T2 _ = v.t1 } -type I2 interface { - f3() - f4() +type I2 interface { // used + f3() // used + f4() // used } -type t3 struct{} -type t4 struct { - x int // want `x` - y int // want `y` - t3 +type t3 struct{} // used +type t4 struct { // used + x int // unused + y int // unused + t3 // used } -func (*t3) f3() {} -func (*t4) f4() {} +func (*t3) f3() {} // used +func (*t4) f4() {} // used -func init() { +func init() { // used var i I2 = &t4{} i.f3() i.f4() } -type i3 interface { - F() +type i3 interface { // used + F() // used } -type I4 interface { - i3 +type I4 interface { // used + i3 // used } -type T5 struct { - t6 +type T5 struct { // used + t6 // used } -type t6 struct { - F int +type t6 struct { // used + F int // used } -type t7 struct{ X int } -type t8 struct{ t7 } -type t9 struct{ t8 } +type t7 struct { // used + X int // used +} +type t8 struct { // used + t7 // used +} +type t9 struct { // used + t8 // used +} var _ = t9{} -type t10 struct{} +type t10 struct{} // used -func (*t10) Foo() {} +func (*t10) Foo() {} // used -type t11 struct{ t10 } +type t11 struct { // used + t10 // used +} var _ = t11{} -type i5 interface{} -type I6 interface { - i5 +type i5 interface{} // used +type I6 interface { // used + i5 // used } diff --git a/unused/testdata/src/embedding2/embedding2.go b/unused/testdata/src/embedding2/embedding2.go index 7efb67429..b151e72fa 100644 --- a/unused/testdata/src/embedding2/embedding2.go +++ b/unused/testdata/src/embedding2/embedding2.go @@ -1,28 +1,28 @@ package main -type AA interface { - A() +type AA interface { // used + A() // used } -type BB interface { - AA +type BB interface { // used + AA // used } -type CC interface { - BB - C() +type CC interface { // used + BB // used + C() // used } -func c(cc CC) { +func c(cc CC) { // used cc.A() } -type z struct{} +type z struct{} // used -func (z) A() {} -func (z) B() {} -func (z) C() {} +func (z) A() {} // used +func (z) B() {} // used +func (z) C() {} // used -func main() { +func main() { // used c(z{}) } diff --git a/unused/testdata/src/exported_fields/exported_fields.go b/unused/testdata/src/exported_fields/exported_fields.go index 2c909a8c4..1c102e174 100644 --- a/unused/testdata/src/exported_fields/exported_fields.go +++ b/unused/testdata/src/exported_fields/exported_fields.go @@ -1,36 +1,36 @@ package pkg -type t1 struct { - F1 int +type t1 struct { // used + F1 int // used } -type T2 struct { - F2 int +type T2 struct { // used + F2 int // used } -var v struct { - T3 +var v struct { // used + T3 // used } -type T3 struct{} +type T3 struct{} // used -func (T3) Foo() {} +func (T3) Foo() {} // used -func init() { +func init() { // used v.Foo() } -func init() { +func init() { // used _ = t1{} } -type codeResponse struct { - Tree *codeNode `json:"tree"` +type codeResponse struct { // used + Tree *codeNode `json:"tree"` // used } -type codeNode struct { +type codeNode struct { // used } -func init() { +func init() { // used _ = codeResponse{} } diff --git a/unused/testdata/src/exported_fields_main/exported_fields_main.go b/unused/testdata/src/exported_fields_main/exported_fields_main.go index ffb99d990..0a5f85757 100644 --- a/unused/testdata/src/exported_fields_main/exported_fields_main.go +++ b/unused/testdata/src/exported_fields_main/exported_fields_main.go @@ -1,14 +1,14 @@ package main -type t1 struct { - F1 int +type t1 struct { // used + F1 int // used } -type T2 struct { - F2 int +type T2 struct { // used + F2 int // used } -func init() { +func init() { // used _ = t1{} _ = T2{} } diff --git a/unused/testdata/src/exported_method_test/exported_method_test.go b/unused/testdata/src/exported_method_test/exported_method_test.go index 346056d68..698383a69 100644 --- a/unused/testdata/src/exported_method_test/exported_method_test.go +++ b/unused/testdata/src/exported_method_test/exported_method_test.go @@ -7,18 +7,18 @@ import ( "testing" ) -type countReadSeeker struct { - io.ReadSeeker - N int64 +type countReadSeeker struct { // used_test + io.ReadSeeker // used_test + N int64 // used_test } -func (rs *countReadSeeker) Read(buf []byte) (int, error) { +func (rs *countReadSeeker) Read(buf []byte) (int, error) { // used_test n, err := rs.ReadSeeker.Read(buf) rs.N += int64(n) return n, err } -func TestFoo(t *testing.T) { +func TestFoo(t *testing.T) { // used_test r := bytes.NewReader([]byte("Hello, world!")) cr := &countReadSeeker{ReadSeeker: r} ioutil.ReadAll(cr) @@ -27,12 +27,12 @@ func TestFoo(t *testing.T) { } } -var sink int +var sink int // used_test -func BenchmarkFoo(b *testing.B) { +func BenchmarkFoo(b *testing.B) { // used_test for i := 0; i < b.N; i++ { sink = fn() } } -func fn() int { return 0 } +func fn() int { return 0 } // used_test diff --git a/unused/testdata/src/fields/fields.go b/unused/testdata/src/fields/fields.go index 8b07c8152..c56e5fb90 100644 --- a/unused/testdata/src/fields/fields.go +++ b/unused/testdata/src/fields/fields.go @@ -2,44 +2,94 @@ package pkg -type t1 struct{ f11, f12 int } -type t2 struct{ f21, f22 int } -type t3 struct{ f31 t4 } -type t4 struct{ f41 int } -type t5 struct{ f51 int } -type t6 struct{ f61 int } -type t7 struct{ f71 int } -type m1 map[string]t7 -type t8 struct{ f81 int } -type t9 struct{ f91 int } -type t10 struct{ f101 int } -type t11 struct{ f111 int } -type s1 []t11 -type t12 struct{ f121 int } -type s2 []t12 -type t13 struct{ f131 int } -type t14 struct{ f141 int } -type a1 [1]t14 -type t15 struct{ f151 int } -type a2 [1]t15 -type t16 struct{ f161 int } -type t17 struct{ f171, f172 int } // want `t17` -type t18 struct{ f181, f182, f183 int } // want `f182` `f183` +type t1 struct { // used + f11 int // used + f12 int // used +} +type t2 struct { // used + f21 int // used + f22 int // used +} +type t3 struct { // used + f31 t4 // used +} +type t4 struct { // used + f41 int // used +} +type t5 struct { // used + f51 int // used +} +type t6 struct { // used + f61 int // used +} +type t7 struct { // used + f71 int // used +} +type m1 map[string]t7 // used +type t8 struct { // used + f81 int // used +} +type t9 struct { // used + f91 int // used +} +type t10 struct { // used + f101 int // used +} +type t11 struct { // used + f111 int // used +} +type s1 []t11 // used +type t12 struct { // used + f121 int // used +} +type s2 []t12 // used +type t13 struct { // used + f131 int // used +} +type t14 struct { // used + f141 int // used +} +type a1 [1]t14 // used +type t15 struct { // used + f151 int // used +} +type a2 [1]t15 // used +type t16 struct { // used + f161 int // used +} +type t17 struct { // unused + f171 int // unused + f172 int // unused +} +type t18 struct { // used + f181 int // used + f182 int // unused + f183 int // unused +} -type t19 struct{ f191 int } -type m2 map[string]t19 +type t19 struct { // used + f191 int // used +} +type m2 map[string]t19 // used -type t20 struct{ f201 int } -type m3 map[string]t20 +type t20 struct { // used + f201 int // used +} +type m3 map[string]t20 // used -type t21 struct{ f211, f212 int } // want `f211` +type t21 struct { // used + f211 int // unused + f212 int // used +} -func foo() { +func foo() { // used _ = t10{1} _ = t21{f212: 1} _ = []t1{{1, 2}} _ = t2{1, 2} - _ = []struct{ a int }{{1}} + _ = []struct { + a int // used + }{{1}} // XXX // _ = []struct{ foo struct{ bar int } }{{struct{ bar int }{1}}} @@ -57,11 +107,13 @@ func foo() { _ = a1{{1}} _ = a2{0: {1}} _ = map[[1]t16]int{{{1}}: 1} - y := struct{ x int }{} + y := struct { + x int // used + }{} _ = y _ = t18{f181: 1} _ = []m2{{"a": {1}}} _ = [][]m3{{{"a": {1}}}} } -func init() { foo() } +func init() { foo() } // used diff --git a/unused/testdata/src/functions/functions.go b/unused/testdata/src/functions/functions.go index cb74a895f..56f6c15da 100644 --- a/unused/testdata/src/functions/functions.go +++ b/unused/testdata/src/functions/functions.go @@ -1,35 +1,35 @@ package main -type state func() state +type state func() state // used -func a() state { +func a() state { // used return a } -func main() { +func main() { // used st := a _ = st() } -type t1 struct{} // want `t1` -type t2 struct{} -type t3 struct{} +type t1 struct{} // unused +type t2 struct{} // used +type t3 struct{} // used -func fn1() t1 { return t1{} } // want `fn1` -func fn2() (x t2) { return } -func fn3() *t3 { return nil } +func fn1() t1 { return t1{} } // unused +func fn2() (x t2) { return } // used +func fn3() *t3 { return nil } // used -func fn4() { - const x = 1 - const y = 2 // want `y` - type foo int // want `foo` - type bar int +func fn4() { // used + const x = 1 // used + const y = 2 // unused + type foo int // unused + type bar int // used _ = x _ = bar(0) } -func init() { +func init() { // used fn2() fn3() fn4() diff --git a/unused/testdata/src/ignored/ignored.go b/unused/testdata/src/ignored/ignored.go new file mode 100644 index 000000000..5f4ae188f --- /dev/null +++ b/unused/testdata/src/ignored/ignored.go @@ -0,0 +1,47 @@ +package pkg + +//lint:ignore U1000 consider yourself used +type t1 struct{} // used +type t2 struct{} // used +type t3 struct{} // used + +func (t1) fn1() {} // used +func (t1) fn2() {} // used +func (t1) fn3() {} // used + +//lint:ignore U1000 be gone +func (t2) fn1() {} // used +func (t2) fn2() {} // unused +func (t2) fn3() {} // unused + +func (t3) fn1() {} // unused +func (t3) fn2() {} // unused +func (t3) fn3() {} // unused + +//lint:ignore U1000 consider yourself used +func fn() { // used + var _ t2 + var _ t3 +} + +//lint:ignore U1000 bye +type t4 struct { // used + x int // used +} + +func (t4) bar() {} // used + +//lint:ignore U1000 hai +type t5 map[int]struct { // used + y int // used +} + +//lint:ignore U1000 hi +type t6 interface { // used + foo() // used +} + +//lint:ignore U1000 lol +type t7 = struct { // used + z int // used +} diff --git a/unused/testdata/src/interfaces/interfaces.go b/unused/testdata/src/interfaces/interfaces.go index 59b1be73e..48490c21b 100644 --- a/unused/testdata/src/interfaces/interfaces.go +++ b/unused/testdata/src/interfaces/interfaces.go @@ -1,39 +1,41 @@ package pkg -type I interface { - fn1() +type I interface { // used + fn1() // used } -type t struct{} +type t struct{} // used -func (t) fn1() {} -func (t) fn2() {} // want `fn2` +func (t) fn1() {} // used +func (t) fn2() {} // unused -func init() { +func init() { // used _ = t{} } -type I1 interface { - Foo() +type I1 interface { // used + Foo() // used } -type I2 interface { - Foo() - bar() +type I2 interface { // used + Foo() // used + bar() // used } -type t1 struct{} -type t2 struct{} -type t3 struct{} -type t4 struct{ t3 } +type t1 struct{} // used +type t2 struct{} // used +type t3 struct{} // used +type t4 struct { // used + t3 // used +} -func (t1) Foo() {} -func (t2) Foo() {} -func (t2) bar() {} -func (t3) Foo() {} -func (t3) bar() {} +func (t1) Foo() {} // used +func (t2) Foo() {} // used +func (t2) bar() {} // used +func (t3) Foo() {} // used +func (t3) bar() {} // used -func Fn() { +func Fn() { // used var v1 t1 var v2 t2 var v3 t3 diff --git a/unused/testdata/src/interfaces2/interfaces.go b/unused/testdata/src/interfaces2/interfaces.go index d038ef699..15407b6dc 100644 --- a/unused/testdata/src/interfaces2/interfaces.go +++ b/unused/testdata/src/interfaces2/interfaces.go @@ -1,12 +1,14 @@ package pkg -type I interface { - foo() +type I interface { // used + foo() // used } -type T struct{} +type T struct{} // used -func (T) foo() {} -func (T) bar() {} // want `bar` +func (T) foo() {} // used +func (T) bar() {} // unused -var _ struct{ T } +var _ struct { + T // used +} diff --git a/unused/testdata/src/linkname/linkname.go b/unused/testdata/src/linkname/linkname.go index 1423a2148..eb33d6ae7 100644 --- a/unused/testdata/src/linkname/linkname.go +++ b/unused/testdata/src/linkname/linkname.go @@ -6,22 +6,22 @@ import _ "unsafe" //go:linkname ol other4 //go:linkname foo other1 -func foo() {} +func foo() {} // used //go:linkname bar other2 -var bar int +var bar int // used var ( - baz int // want `baz` + baz int // unused //go:linkname qux other3 - qux int + qux int // used ) //go:linkname fisk other3 var ( - fisk int + fisk int // used ) -var ol int +var ol int // used //go:linkname doesnotexist other5 diff --git a/unused/testdata/src/main/main.go b/unused/testdata/src/main/main.go index ae5c913ae..c688829e1 100644 --- a/unused/testdata/src/main/main.go +++ b/unused/testdata/src/main/main.go @@ -1,14 +1,15 @@ package main -func Fn1() {} -func Fn2() {} // want `Fn2` +func Fn1() {} // used +func Fn2() {} // used +func fn3() {} // unused -const X = 1 // want `X` +const X = 1 // used -var Y = 2 // want `Y` +var Y = 2 // used -type Z struct{} // want `Z` +type Z struct{} // used -func main() { +func main() { // used Fn1() } diff --git a/unused/testdata/src/mapslice/mapslice.go b/unused/testdata/src/mapslice/mapslice.go index 2769b2c21..f19c64ee9 100644 --- a/unused/testdata/src/mapslice/mapslice.go +++ b/unused/testdata/src/mapslice/mapslice.go @@ -1,8 +1,8 @@ package pkg -type M map[int]int +type M map[int]int // used -func Fn() { +func Fn() { // used var n M _ = []M{n} } diff --git a/unused/testdata/src/methods/methods.go b/unused/testdata/src/methods/methods.go index 0eaf6ee7f..f590584de 100644 --- a/unused/testdata/src/methods/methods.go +++ b/unused/testdata/src/methods/methods.go @@ -1,14 +1,16 @@ package pkg -type t1 struct{} -type t2 struct{ t3 } -type t3 struct{} +type t1 struct{} // used +type t2 struct { // used + t3 // used +} +type t3 struct{} // used -func (t1) Foo() {} -func (t3) Foo() {} -func (t3) foo() {} // want `foo` +func (t1) Foo() {} // used +func (t3) Foo() {} // used +func (t3) foo() {} // unused -func init() { +func init() { // used _ = t1{} _ = t2{} } diff --git a/unused/testdata/src/named/named.go b/unused/testdata/src/named/named.go index 7105f0a0e..dbaf30c37 100644 --- a/unused/testdata/src/named/named.go +++ b/unused/testdata/src/named/named.go @@ -1,4 +1,4 @@ package pkg -type t1 struct{} -type T2 t1 +type t1 struct{} // used +type T2 t1 // used diff --git a/unused/testdata/src/nested/nested.go b/unused/testdata/src/nested/nested.go index 7e108a28c..5fb6c4669 100644 --- a/unused/testdata/src/nested/nested.go +++ b/unused/testdata/src/nested/nested.go @@ -1,14 +1,29 @@ package pkg -type t struct{} // want `t` +type t1 struct{} // unused -func (t) fragment() {} +func (t1) fragment() {} // unused -func fn() bool { // want `fn` - var v interface{} = t{} +func fn1() bool { // unused + var v interface{} = t1{} switch obj := v.(type) { case interface { - fragment() + fragment() // unused + }: + obj.fragment() + } + return false +} + +type t2 struct{} // used + +func (t2) fragment() {} // used + +func Fn() bool { // used + var v interface{} = t2{} + switch obj := v.(type) { + case interface { + fragment() // used }: obj.fragment() } diff --git a/unused/testdata/src/nocopy-main/nocopy-main.go b/unused/testdata/src/nocopy-main/nocopy-main.go index 369a5d503..62d91b68e 100644 --- a/unused/testdata/src/nocopy-main/nocopy-main.go +++ b/unused/testdata/src/nocopy-main/nocopy-main.go @@ -1,24 +1,26 @@ package main -type myNoCopy1 struct{} -type myNoCopy2 struct{} -type locker struct{} // want `locker` -type someStruct struct{ x int } // want `someStruct` +type myNoCopy1 struct{} // used +type myNoCopy2 struct{} // used +type locker struct{} // unused +type someStruct struct { // unused + x int // unused +} -func (myNoCopy1) Lock() {} -func (recv myNoCopy2) Lock() {} -func (locker) Lock() {} -func (locker) Unlock() {} -func (someStruct) Lock() {} +func (myNoCopy1) Lock() {} // used +func (recv myNoCopy2) Lock() {} // used +func (locker) Lock() {} // unused +func (locker) Unlock() {} // unused +func (someStruct) Lock() {} // unused -type T struct { - noCopy1 myNoCopy1 - noCopy2 myNoCopy2 - field1 someStruct // want `field1` - field2 locker // want `field2` - field3 int // want `field3` +type T struct { // used + noCopy1 myNoCopy1 // used + noCopy2 myNoCopy2 // used + field1 someStruct // unused + field2 locker // unused + field3 int // unused } -func main() { +func main() { // used _ = T{} } diff --git a/unused/testdata/src/nocopy/nocopy.go b/unused/testdata/src/nocopy/nocopy.go index 98e46d4eb..8317aa217 100644 --- a/unused/testdata/src/nocopy/nocopy.go +++ b/unused/testdata/src/nocopy/nocopy.go @@ -1,20 +1,22 @@ package bar -type myNoCopy1 struct{} -type myNoCopy2 struct{} -type locker struct{} // want `locker` -type someStruct struct{ x int } // want `someStruct` +type myNoCopy1 struct{} // used +type myNoCopy2 struct{} // used +type locker struct{} // unused +type someStruct struct { // unused + x int // unused +} -func (myNoCopy1) Lock() {} -func (recv myNoCopy2) Lock() {} -func (locker) Lock() {} -func (locker) Unlock() {} -func (someStruct) Lock() {} +func (myNoCopy1) Lock() {} // used +func (recv myNoCopy2) Lock() {} // used +func (locker) Lock() {} // unused +func (locker) Unlock() {} // unused +func (someStruct) Lock() {} // unused -type T struct { - noCopy1 myNoCopy1 - noCopy2 myNoCopy2 - field1 someStruct // want `field1` - field2 locker // want `field2` - field3 int // want `field3` +type T struct { // used + noCopy1 myNoCopy1 // used + noCopy2 myNoCopy2 // used + field1 someStruct // unused + field2 locker // unused + field3 int // unused } diff --git a/unused/testdata/src/pointer-type-embedding/pointer-type-embedding.go b/unused/testdata/src/pointer-type-embedding/pointer-type-embedding.go index fb577f97c..cbf99f213 100644 --- a/unused/testdata/src/pointer-type-embedding/pointer-type-embedding.go +++ b/unused/testdata/src/pointer-type-embedding/pointer-type-embedding.go @@ -1,17 +1,17 @@ package pkg -func init() { +func init() { // used var p P _ = p.n } -type T0 struct { - m int // want `m` - n int +type T0 struct { // used + m int // unused + n int // used } -type T1 struct { - T0 +type T1 struct { // used + T0 // used } -type P *T1 +type P *T1 // used diff --git a/unused/testdata/src/quiet/quiet.go b/unused/testdata/src/quiet/quiet.go index 82f8479b8..2e6531c3c 100644 --- a/unused/testdata/src/quiet/quiet.go +++ b/unused/testdata/src/quiet/quiet.go @@ -1,34 +1,34 @@ package pkg -type iface interface { // want `iface` - foo() +type iface interface { // unused + foo() // unused } -type t1 struct{} // want `t1` -func (t1) foo() {} +type t1 struct{} // unused +func (t1) foo() {} // unused -type t2 struct{} +type t2 struct{} // used -func (t t2) bar(arg int) (ret int) { return 0 } // want `bar` +func (t t2) bar(arg int) (ret int) { return 0 } // unused -func init() { +func init() { // used _ = t2{} } -type t3 struct { // want `t3` - a int - b int +type t3 struct { // unused + a int // unused + b int // unused } -type T struct{} +type T struct{} // used -func fn1() { // want `fn1` +func fn1() { // unused meh := func(arg T) { } meh(T{}) } -type localityList []int // want `localityList` +type localityList []int // unused -func (l *localityList) Fn1() {} -func (l *localityList) Fn2() {} +func (l *localityList) Fn1() {} // unused +func (l *localityList) Fn2() {} // unused diff --git a/unused/testdata/src/selectors/selectors.go b/unused/testdata/src/selectors/selectors.go index 9ab337888..91743b320 100644 --- a/unused/testdata/src/selectors/selectors.go +++ b/unused/testdata/src/selectors/selectors.go @@ -1,14 +1,14 @@ package pkg -type t struct { - f int +type t struct { // used + f int // used } -func fn(v *t) { +func fn(v *t) { // used println(v.f) } -func init() { +func init() { // used var v t fn(&v) } diff --git a/unused/testdata/src/switch_interface/switch_interface.go b/unused/testdata/src/switch_interface/switch_interface.go index 99c2ce858..7fd63d544 100644 --- a/unused/testdata/src/switch_interface/switch_interface.go +++ b/unused/testdata/src/switch_interface/switch_interface.go @@ -1,19 +1,19 @@ package pkg -type t struct{} +type t struct{} // used -func (t) fragment() {} +func (t) fragment() {} // used -func fn() bool { +func fn() bool { // used var v interface{} = t{} switch obj := v.(type) { case interface { - fragment() + fragment() // used }: obj.fragment() } return false } -var x = fn() +var x = fn() // used var _ = x diff --git a/unused/testdata/src/tests-main/main_test.go b/unused/testdata/src/tests-main/main_test.go index fffcc5f62..7e644de94 100644 --- a/unused/testdata/src/tests-main/main_test.go +++ b/unused/testdata/src/tests-main/main_test.go @@ -4,8 +4,8 @@ import ( "testing" ) -type t1 struct{} +type t1 struct{} // used_test -func TestFoo(t *testing.T) { +func TestFoo(t *testing.T) { // used_test _ = t1{} } diff --git a/unused/testdata/src/tests/tests.go b/unused/testdata/src/tests/tests.go index ca2d5b3cd..4a5e69f59 100644 --- a/unused/testdata/src/tests/tests.go +++ b/unused/testdata/src/tests/tests.go @@ -1,3 +1,3 @@ package pkg -func fn() {} +func fn() {} // unused used_test diff --git a/unused/testdata/src/tests/tests_test.go b/unused/testdata/src/tests/tests_test.go index 4025030d5..74c5091da 100644 --- a/unused/testdata/src/tests/tests_test.go +++ b/unused/testdata/src/tests/tests_test.go @@ -2,6 +2,6 @@ package pkg import "testing" -func TestFn(t *testing.T) { +func TestFn(t *testing.T) { // used_test fn() } diff --git a/unused/testdata/src/type-dedup/dedup.go b/unused/testdata/src/type-dedup/dedup.go index 53cf2f989..83f3792be 100644 --- a/unused/testdata/src/type-dedup/dedup.go +++ b/unused/testdata/src/type-dedup/dedup.go @@ -1,16 +1,16 @@ package pkg -type t1 struct { - a int - b int // want `b` +type t1 struct { // used + a int // used + b int // unused } -type t2 struct { - a int // want `a` - b int +type t2 struct { // used + a int // unused + b int // used } -func Fn() { +func Fn() { // used x := t1{} y := t2{} println(x.a) diff --git a/unused/testdata/src/type-dedup2/dedup.go b/unused/testdata/src/type-dedup2/dedup.go index 56c7dc951..38ebc7f71 100644 --- a/unused/testdata/src/type-dedup2/dedup.go +++ b/unused/testdata/src/type-dedup2/dedup.go @@ -1,23 +1,23 @@ package pkg -func fn1(t struct { - a int - b int +func fn1(t struct { // used + a int // used + b int // used }) { println(t.a) fn2(t) } -func fn2(t struct { - a int - b int +func fn2(t struct { // used + a int // used + b int // used }) { println(t.b) } -func Fn() { +func Fn() { // used fn1(struct { - a int - b int + a int // used + b int // used }{}) } diff --git a/unused/testdata/src/type-dedup3/dedup.go b/unused/testdata/src/type-dedup3/dedup.go index 095e95f86..e796b6a20 100644 --- a/unused/testdata/src/type-dedup3/dedup.go +++ b/unused/testdata/src/type-dedup3/dedup.go @@ -1,23 +1,23 @@ package pkg -func fn1(t struct { - a int - b int +func fn1(t struct { // used + a int // used + b int // used }) { fn2(t) } -func fn2(t struct { - a int - b int +func fn2(t struct { // used + a int // used + b int // used }) { println(t.a) println(t.b) } -func Fn() { +func Fn() { // used fn1(struct { - a int - b int + a int // used + b int // used }{1, 2}) } diff --git a/unused/testdata/src/types/types.go b/unused/testdata/src/types/types.go index 393df3fa5..e8c010441 100644 --- a/unused/testdata/src/types/types.go +++ b/unused/testdata/src/types/types.go @@ -2,16 +2,16 @@ package pkg import "reflect" -type wkt interface { - XXX_WellKnownType() string +type wkt interface { // used + XXX_WellKnownType() string // used } -var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem() +var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem() // used -func Fn() { +func Fn() { // used _ = typeOfWkt } -type t *int +type t *int // used var _ t diff --git a/unused/testdata/src/unused-argument/unused-argument.go b/unused/testdata/src/unused-argument/unused-argument.go index 423592692..2dc76bc4f 100644 --- a/unused/testdata/src/unused-argument/unused-argument.go +++ b/unused/testdata/src/unused-argument/unused-argument.go @@ -1,10 +1,10 @@ package main -type t1 struct{} -type t2 struct{} +type t1 struct{} // used +type t2 struct{} // used -func (t1) foo(arg *t2) {} +func (t1) foo(arg *t2) {} // used -func init() { +func init() { // used t1{}.foo(nil) } diff --git a/unused/testdata/src/unused_type/unused_type.go b/unused/testdata/src/unused_type/unused_type.go index 0881ffe61..94672c16b 100644 --- a/unused/testdata/src/unused_type/unused_type.go +++ b/unused/testdata/src/unused_type/unused_type.go @@ -1,17 +1,17 @@ package pkg -type t1 struct{} // want `t1` +type t1 struct{} // unused -func (t1) Fn() {} +func (t1) Fn() {} // unused -type t2 struct{} +type t2 struct{} // used -func (*t2) Fn() {} +func (*t2) Fn() {} // used -func init() { +func init() { // used (*t2).Fn(nil) } -type t3 struct{} // want `t3` +type t3 struct{} // unused -func (t3) fn() +func (t3) fn() // unused diff --git a/unused/testdata/src/variables/variables.go b/unused/testdata/src/variables/variables.go index d5129a833..658694470 100644 --- a/unused/testdata/src/variables/variables.go +++ b/unused/testdata/src/variables/variables.go @@ -1,22 +1,23 @@ package pkg -var a byte -var b [16]byte +var a byte // used +var b [16]byte // used -type t1 struct{} -type t2 struct{} -type t3 struct{} -type t4 struct{} -type t5 struct{} +type t1 struct{} // used +type t2 struct{} // used +type t3 struct{} // used +type t4 struct{} // used +type t5 struct{} // used -type iface interface{} +type iface interface{} // used -var x t1 -var y = t2{} -var j, k = t3{}, t4{} -var l iface = t5{} +var x t1 // used +var y = t2{} // used +var j = t3{} // used +var k = t4{} // used +var l iface = t5{} // used -func Fn() { +func Fn() { // used println(a) _ = b[:] diff --git a/unused/testdata/src/variables/vartype.go b/unused/testdata/src/variables/vartype.go index ede73ffa5..744d20f7f 100644 --- a/unused/testdata/src/variables/vartype.go +++ b/unused/testdata/src/variables/vartype.go @@ -1,10 +1,10 @@ package pkg -type t181025 struct{} +type t181025 struct{} // used -func (t181025) F() {} +func (t181025) F() {} // used // package-level variable after function declaration used to trigger a // bug in unused. -var V181025 t181025 +var V181025 t181025 // used diff --git a/unused/unused.go b/unused/unused.go index 0df5fc8ff..5a0793c64 100644 --- a/unused/unused.go +++ b/unused/unused.go @@ -6,18 +6,20 @@ import ( "go/token" "go/types" "io" + "reflect" "strings" - "sync" - "sync/atomic" "golang.org/x/tools/go/analysis" "honnef.co/go/tools/code" + "honnef.co/go/tools/facts" "honnef.co/go/tools/go/types/typeutil" "honnef.co/go/tools/internal/passes/buildir" "honnef.co/go/tools/ir" - "honnef.co/go/tools/lint" + "honnef.co/go/tools/report" ) +var Debug io.Writer + // The graph we construct omits nodes along a path that do not // contribute any new information to the solution. For example, the // full graph for a function with a receiver would be Func -> @@ -36,10 +38,10 @@ import ( /* - packages use: - - (1.1) exported named types (unless in package main) - - (1.2) exported functions (unless in package main) - - (1.3) exported variables (unless in package main) - - (1.4) exported constants (unless in package main) + - (1.1) exported named types + - (1.2) exported functions + - (1.3) exported variables + - (1.4) exported constants - (1.5) init functions - (1.6) functions exported to cgo - (1.7) the main function iff in the main package @@ -137,14 +139,6 @@ import ( positives. Thus, we only accurately track fields of named struct types, and assume that unnamed struct types use all their fields. - -- Differences in whole program mode: - - (e2) types aim to implement all exported interfaces from all packages - - (e3) exported identifiers aren't automatically used. for fields and - methods this poses extra issues due to reflection. We assume - that all exported fields are used. We also maintain a list of - known reflection-based method callers. - */ func assert(b bool) { @@ -153,24 +147,6 @@ func assert(b bool) { } } -func typString(obj types.Object) string { - switch obj := obj.(type) { - case *types.Func: - return "func" - case *types.Var: - if obj.IsField() { - return "field" - } - return "var" - case *types.Const: - return "const" - case *types.TypeName: - return "type" - default: - return "identifier" - } -} - // /usr/lib/go/src/runtime/proc.go:433:6: func badmorestackg0 is unused (U1000) // Functions defined in the Go runtime that may be called through @@ -421,79 +397,79 @@ type pkg struct { TypesSizes types.Sizes IR *ir.Package SrcFuncs []*ir.Function + Directives []facts.Directive } -type Checker struct { - WholeProgram bool - Debug io.Writer - - mu sync.Mutex - initialPackages map[*types.Package]struct{} - allPackages map[*types.Package]struct{} - graph *Graph +// TODO(dh): should we return a map instead of two slices? +type Result struct { + Used []types.Object + Unused []types.Object } -func NewChecker(wholeProgram bool) *Checker { - return &Checker{ - initialPackages: map[*types.Package]struct{}{}, - allPackages: map[*types.Package]struct{}{}, - WholeProgram: wholeProgram, - } +type SerializedResult struct { + Used []SerializedObject + Unused []SerializedObject } -func (c *Checker) Analyzer() *analysis.Analyzer { - name := "U1000" - if c.WholeProgram { - name = "U1001" - } - return &analysis.Analyzer{ - Name: name, - Doc: "Unused code", - Run: c.Run, - Requires: []*analysis.Analyzer{buildir.Analyzer}, - } +var Analyzer = &analysis.Analyzer{ + Name: "U1000", + Doc: "Unused code", + Run: run, + Requires: []*analysis.Analyzer{buildir.Analyzer, facts.Generated, facts.Directives}, + ResultType: reflect.TypeOf(Result{}), } -func (c *Checker) Run(pass *analysis.Pass) (interface{}, error) { - c.mu.Lock() - if c.graph == nil { - c.graph = NewGraph() - c.graph.wholeProgram = c.WholeProgram - c.graph.fset = pass.Fset - } +type SerializedObject struct { + PkgPath string + Name string + Position token.Position + DisplayPosition token.Position + Kind string + InGenerated bool +} - var visit func(pkg *types.Package) - visit = func(pkg *types.Package) { - if _, ok := c.allPackages[pkg]; ok { - return - } - c.allPackages[pkg] = struct{}{} - for _, imp := range pkg.Imports() { - visit(imp) +func typString(obj types.Object) string { + switch obj := obj.(type) { + case *types.Func: + return "func" + case *types.Var: + if obj.IsField() { + return "field" } + return "var" + case *types.Const: + return "const" + case *types.TypeName: + return "type" + default: + return "identifier" } - visit(pass.Pkg) +} - c.initialPackages[pass.Pkg] = struct{}{} - c.mu.Unlock() +func Serialize(pass *analysis.Pass, res Result, fset *token.FileSet) SerializedResult { + // OPT(dh): there's no point in serializing Used objects that are + // always used, such as exported names, blank identifiers, or + // anonymous struct fields. Used only exists to overrule Unused of + // a different package. If something can never be unused, then its + // presence in Used is useless. + // + // I'm not sure if this should happen when serializing, or when + // returning Result. - irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR) - pkg := &pkg{ - Fset: pass.Fset, - Files: pass.Files, - Pkg: pass.Pkg, - TypesInfo: pass.TypesInfo, - TypesSizes: pass.TypesSizes, - IR: irpkg.Pkg, - SrcFuncs: irpkg.SrcFuncs, + out := SerializedResult{ + Used: make([]SerializedObject, len(res.Used)), + Unused: make([]SerializedObject, len(res.Unused)), } - - c.processPkg(c.graph, pkg) - - return nil, nil + for i, obj := range res.Used { + out.Used[i] = serializeObject(pass, fset, obj) + } + for i, obj := range res.Unused { + out.Unused[i] = serializeObject(pass, fset, obj) + } + return out } -func (c *Checker) ProblemObject(fset *token.FileSet, obj types.Object) lint.Problem { +func serializeObject(pass *analysis.Pass, fset *token.FileSet, obj types.Object) SerializedObject { name := obj.Name() if sig, ok := obj.Type().(*types.Signature); ok && sig.Recv() != nil { switch sig.Recv().Type().(type) { @@ -506,305 +482,139 @@ func (c *Checker) ProblemObject(fset *token.FileSet, obj types.Object) lint.Prob } } } - - checkName := "U1000" - if c.WholeProgram { - checkName = "U1001" - } - return lint.Problem{ - Pos: lint.DisplayPosition(fset, obj.Pos()), - Message: fmt.Sprintf("%s %s is unused", typString(obj), name), - Check: checkName, + return SerializedObject{ + PkgPath: obj.Pkg().Path(), + Name: name, + Position: fset.PositionFor(obj.Pos(), false), + DisplayPosition: report.DisplayPosition(fset, obj.Pos()), + Kind: typString(obj), + InGenerated: code.IsGenerated(pass, obj.Pos()), } } -func (c *Checker) Result() []types.Object { - out := c.results() - - out2 := make([]types.Object, 0, len(out)) - for _, v := range out { - if _, ok := c.initialPackages[v.Pkg()]; !ok { - continue - } - out2 = append(out2, v) - } - - return out2 +type checker struct { + graph *graph } -func (c *Checker) debugf(f string, v ...interface{}) { - if c.Debug != nil { - fmt.Fprintf(c.Debug, f, v...) +func debugf(f string, v ...interface{}) { + if Debug != nil { + fmt.Fprintf(Debug, f, v...) } } -func (graph *Graph) quieten(node *Node) { - if node.seen { - return - } - switch obj := node.obj.(type) { - case *types.Named: - for i := 0; i < obj.NumMethods(); i++ { - m := obj.Method(i) - if node, ok := graph.nodeMaybe(m); ok { - node.quiet = true - } - } - case *types.Struct: - for i := 0; i < obj.NumFields(); i++ { - if node, ok := graph.nodeMaybe(obj.Field(i)); ok { - node.quiet = true - } - } - case *types.Interface: - for i := 0; i < obj.NumExplicitMethods(); i++ { - m := obj.ExplicitMethod(i) - if node, ok := graph.nodeMaybe(m); ok { - node.quiet = true - } - } +func run(pass *analysis.Pass) (interface{}, error) { + irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR) + dirs := pass.ResultOf[facts.Directives].([]facts.Directive) + pkg := &pkg{ + Fset: pass.Fset, + Files: pass.Files, + Pkg: pass.Pkg, + TypesInfo: pass.TypesInfo, + TypesSizes: pass.TypesSizes, + IR: irpkg.Pkg, + SrcFuncs: irpkg.SrcFuncs, + Directives: dirs, } -} -func (c *Checker) results() []types.Object { - if c.graph == nil { - // We never analyzed any packages - return nil + c := &checker{ + graph: newGraph(pkg), } - var out []types.Object - - if c.WholeProgram { - var ifaces []*types.Interface - var notIfaces []types.Type + c.graph.entry(pkg) + used, unused := c.results() - // implement as many interfaces as possible - c.graph.seenTypes.Iterate(func(t types.Type, _ interface{}) { - switch t := t.(type) { - case *types.Interface: - if t.NumMethods() > 0 { - ifaces = append(ifaces, t) - } - default: - if _, ok := t.Underlying().(*types.Interface); !ok { - notIfaces = append(notIfaces, t) - } - } - }) - - for pkg := range c.allPackages { - for _, iface := range interfacesFromExportData(pkg) { - if iface.NumMethods() > 0 { - ifaces = append(ifaces, iface) - } - } - } - - ctx := &context{ - g: c.graph, - seenTypes: &c.graph.seenTypes, - } - // (8.0) handle interfaces - // (e2) types aim to implement all exported interfaces from all packages - for _, t := range notIfaces { - // OPT(dh): it is unfortunate that we do not have access - // to a populated method set at this point. - ms := types.NewMethodSet(t) - for _, iface := range ifaces { - if sels, ok := c.graph.implements(t, iface, ms); ok { - for _, sel := range sels { - c.graph.useMethod(ctx, t, sel, t, edgeImplements) - } - } - } - } - } - - if c.Debug != nil { - debugNode := func(node *Node) { - if node.obj == nil { - c.debugf("n%d [label=\"Root\"];\n", node.id) + if Debug != nil { + debugNode := func(n *node) { + if n.obj == nil { + debugf("n%d [label=\"Root\"];\n", n.id) } else { - c.debugf("n%d [label=%q];\n", node.id, fmt.Sprintf("(%T) %s", node.obj, node.obj)) + color := "red" + if n.seen { + color = "green" + } + debugf("n%d [label=%q, color=%q];\n", n.id, fmt.Sprintf("(%T) %s", n.obj, n.obj), color) } - for _, e := range node.used { + for _, e := range n.used { for i := edgeKind(1); i < 64; i++ { if e.kind.is(1 << i) { - c.debugf("n%d -> n%d [label=%q];\n", node.id, e.node.id, edgeKind(1< n%d [label=%q];\n", n.id, e.node.id, edgeKind(1< 1 { - cg := &ConstGroup{} - ctx.see(cg) + cg := &constGroup{} + g.see(cg) for _, spec := range specs { for _, name := range spec.(*ast.ValueSpec).Names { obj := pkg.TypesInfo.ObjectOf(name) // (10.1) const groups - ctx.seeAndUse(obj, cg, edgeConstGroup) - ctx.use(cg, obj, edgeConstGroup) + g.seeAndUse(obj, cg, edgeConstGroup) + g.use(cg, obj, edgeConstGroup) } } } @@ -1238,16 +977,16 @@ func (g *Graph) entry(pkg *pkg) { for _, name := range v.Names { T := pkg.TypesInfo.TypeOf(name) if fn != nil { - ctx.seeAndUse(T, fn, edgeVarDecl) + g.seeAndUse(T, fn, edgeVarDecl) } else { // TODO(dh): we likely want to make // the type used by the variable, not // the package containing the // variable. But then we have to take // special care of blank identifiers. - ctx.seeAndUse(T, nil, edgeVarDecl) + g.seeAndUse(T, nil, edgeVarDecl) } - g.typ(ctx, T, nil) + g.typ(T, nil) } } case token.TYPE: @@ -1261,11 +1000,11 @@ func (g *Graph) entry(pkg *pkg) { v := spec.(*ast.TypeSpec) T := pkg.TypesInfo.TypeOf(v.Type) obj := pkg.TypesInfo.ObjectOf(v.Name) - ctx.see(obj) - ctx.see(T) - ctx.use(T, obj, edgeType) - g.typ(ctx, obj.Type(), nil) - g.typ(ctx, T, nil) + g.see(obj) + g.see(T) + g.use(T, obj, edgeType) + g.typ(obj.Type(), nil) + g.typ(T, nil) if v.Assign != 0 { aliasFor := obj.(*types.TypeName).Type() @@ -1276,10 +1015,10 @@ func (g *Graph) entry(pkg *pkg) { // just mark the alias used. // // FIXME(dh): what about aliases declared inside functions? - ctx.use(obj, nil, edgeAlias) + g.use(obj, nil, edgeAlias) } else { - ctx.see(aliasFor) - ctx.seeAndUse(obj, aliasFor, edgeAlias) + g.see(aliasFor) + g.seeAndUse(obj, aliasFor, edgeAlias) } } } @@ -1295,16 +1034,16 @@ func (g *Graph) entry(pkg *pkg) { // nothing to do, we collect all constants from Defs case *ir.Global: if m.Object() != nil { - ctx.see(m.Object()) - if g.trackExportedIdentifier(ctx, m.Object()) { - // (1.3) packages use exported variables (unless in package main) - ctx.use(m.Object(), nil, edgeExportedVariable) + g.see(m.Object()) + if m.Object().Exported() { + // (1.3) packages use exported variables + g.use(m.Object(), nil, edgeExportedVariable) } } case *ir.Function: mObj := owningObject(m) if mObj != nil { - ctx.see(mObj) + g.see(mObj) } //lint:ignore SA9003 handled implicitly if m.Name() == "init" { @@ -1315,17 +1054,17 @@ func (g *Graph) entry(pkg *pkg) { // be owned by the package. } // This branch catches top-level functions, not methods. - if m.Object() != nil && g.trackExportedIdentifier(ctx, m.Object()) { - // (1.2) packages use exported functions (unless in package main) - ctx.use(mObj, nil, edgeExportedFunction) + if m.Object() != nil && m.Object().Exported() { + // (1.2) packages use exported functions + g.use(mObj, nil, edgeExportedFunction) } if m.Name() == "main" && pkg.Pkg.Name() == "main" { // (1.7) packages use the main function iff in the main package - ctx.use(mObj, nil, edgeMainFunction) + g.use(mObj, nil, edgeMainFunction) } if pkg.Pkg.Path() == "runtime" && runtimeFuncs[m.Name()] { // (9.8) runtime functions that may be called from user code via the compiler - ctx.use(mObj, nil, edgeRuntimeFunction) + g.use(mObj, nil, edgeRuntimeFunction) } if m.Source() != nil { doc := m.Source().(*ast.FuncDecl).Doc @@ -1333,57 +1072,102 @@ func (g *Graph) entry(pkg *pkg) { for _, cmt := range doc.List { if strings.HasPrefix(cmt.Text, "//go:cgo_export_") { // (1.6) packages use functions exported to cgo - ctx.use(mObj, nil, edgeCgoExported) + g.use(mObj, nil, edgeCgoExported) } } } } - g.function(ctx, m) + g.function(m) case *ir.Type: if m.Object() != nil { - ctx.see(m.Object()) - if g.trackExportedIdentifier(ctx, m.Object()) { - // (1.1) packages use exported named types (unless in package main) - ctx.use(m.Object(), nil, edgeExportedType) + g.see(m.Object()) + if m.Object().Exported() { + // (1.1) packages use exported named types + g.use(m.Object(), nil, edgeExportedType) } } - g.typ(ctx, m.Type(), nil) + g.typ(m.Type(), nil) default: panic(fmt.Sprintf("unreachable: %T", m)) } } - if !g.wholeProgram { - // When not in whole program mode we reset seenTypes after each package, - // which means g.seenTypes only contains types of - // interest to us. In whole program mode, we're better off - // processing all interfaces at once, globally, both for - // performance reasons and because in whole program mode we - // actually care about all interfaces, not just the subset - // that has unexported methods. - - var ifaces []*types.Interface - var notIfaces []types.Type - - ctx.seenTypes.Iterate(func(t types.Type, _ interface{}) { - switch t := t.(type) { - case *types.Interface: - // OPT(dh): (8.1) we only need interfaces that have unexported methods - ifaces = append(ifaces, t) - default: - if _, ok := t.Underlying().(*types.Interface); !ok { - notIfaces = append(notIfaces, t) + var ifaces []*types.Interface + var notIfaces []types.Type + + g.seenTypes.Iterate(func(t types.Type, _ interface{}) { + switch t := t.(type) { + case *types.Interface: + // OPT(dh): (8.1) we only need interfaces that have unexported methods + ifaces = append(ifaces, t) + default: + if _, ok := t.Underlying().(*types.Interface); !ok { + notIfaces = append(notIfaces, t) + } + } + }) + + // (8.0) handle interfaces + for _, t := range notIfaces { + ms := pkg.IR.Prog.MethodSets.MethodSet(t) + for _, iface := range ifaces { + if sels, ok := g.implements(t, iface, ms); ok { + for _, sel := range sels { + g.useMethod(t, sel, t, edgeImplements) } } - }) + } + } + + type ignoredKey struct { + file string + line int + } + ignores := map[ignoredKey]struct{}{} + for _, dir := range g.pkg.Directives { + if dir.Command != "ignore" { + continue + } + if len(dir.Arguments) == 0 { + continue + } + for _, check := range strings.Split(dir.Arguments[0], ",") { + if check == "U1000" { + pos := g.pkg.Fset.PositionFor(dir.Node.Pos(), false) + key := ignoredKey{ + pos.Filename, + pos.Line, + } + ignores[key] = struct{}{} + break + } + } + } - // (8.0) handle interfaces - for _, t := range notIfaces { - ms := pkg.IR.Prog.MethodSets.MethodSet(t) - for _, iface := range ifaces { - if sels, ok := g.implements(t, iface, ms); ok { - for _, sel := range sels { - g.useMethod(ctx, t, sel, t, edgeImplements) + if len(ignores) > 0 { + // all objects annotated with a //lint:ignore U1000 are considered used + for obj := range g.Nodes { + if obj, ok := obj.(types.Object); ok { + pos := g.pkg.Fset.PositionFor(obj.Pos(), false) + key := ignoredKey{ + pos.Filename, + pos.Line, + } + if _, ok := ignores[key]; ok { + g.use(obj, nil, edgeIgnored) + + // use methods and fields of ignored types + if obj, ok := obj.(*types.TypeName); ok { + if typ, ok := obj.Type().(*types.Named); ok { + for i := 0; i < typ.NumMethods(); i++ { + g.use(typ.Method(i), nil, edgeIgnored) + } + } + if typ, ok := obj.Type().Underlying().(*types.Struct); ok { + for i := 0; i < typ.NumFields(); i++ { + g.use(typ.Field(i), nil, edgeIgnored) + } + } } } } @@ -1391,7 +1175,7 @@ func (g *Graph) entry(pkg *pkg) { } } -func (g *Graph) useMethod(ctx *context, t types.Type, sel *types.Selection, by interface{}, kind edgeKind) { +func (g *graph) useMethod(t types.Type, sel *types.Selection, by interface{}, kind edgeKind) { obj := sel.Obj() path := sel.Index() assert(obj != nil) @@ -1400,12 +1184,12 @@ func (g *Graph) useMethod(ctx *context, t types.Type, sel *types.Selection, by i for _, idx := range path[:len(path)-1] { next := base.Field(idx) // (6.3) structs use embedded fields that help implement interfaces - ctx.see(base) - ctx.seeAndUse(next, base, edgeProvidesMethod) + g.see(base) + g.seeAndUse(next, base, edgeProvidesMethod) base, _ = code.Dereference(next.Type()).Underlying().(*types.Struct) } } - ctx.seeAndUse(obj, by, kind) + g.seeAndUse(obj, by, kind) } func owningObject(fn *ir.Function) types.Object { @@ -1418,94 +1202,77 @@ func owningObject(fn *ir.Function) types.Object { return nil } -func (g *Graph) function(ctx *context, fn *ir.Function) { - if fn.Package() != nil && fn.Package() != ctx.pkg.IR { +func (g *graph) function(fn *ir.Function) { + if fn.Package() != nil && fn.Package() != g.pkg.IR { return } name := fn.RelString(nil) - if _, ok := ctx.seenFns[name]; ok { + if _, ok := g.seenFns[name]; ok { return } - ctx.seenFns[name] = struct{}{} + g.seenFns[name] = struct{}{} // (4.1) functions use all their arguments, return parameters and receivers - g.signature(ctx, fn.Signature, owningObject(fn)) - g.instructions(ctx, fn) + g.signature(fn.Signature, owningObject(fn)) + g.instructions(fn) for _, anon := range fn.AnonFuncs { // (4.2) functions use anonymous functions defined beneath them // // This fact is expressed implicitly. Anonymous functions have // no types.Object, so their owner is the surrounding // function. - g.function(ctx, anon) + g.function(anon) } } -func (g *Graph) typ(ctx *context, t types.Type, parent types.Type) { - if g.wholeProgram { - g.mu.Lock() - } - if ctx.seenTypes.At(t) != nil { - if g.wholeProgram { - g.mu.Unlock() - } +func (g *graph) typ(t types.Type, parent types.Type) { + if g.seenTypes.At(t) != nil { return } - if g.wholeProgram { - g.mu.Unlock() - } + if t, ok := t.(*types.Named); ok && t.Obj().Pkg() != nil { - if t.Obj().Pkg() != ctx.pkg.Pkg { + if t.Obj().Pkg() != g.pkg.Pkg { return } } - if g.wholeProgram { - g.mu.Lock() - } - ctx.seenTypes.Set(t, struct{}{}) - if g.wholeProgram { - g.mu.Unlock() - } + g.seenTypes.Set(t, struct{}{}) if isIrrelevant(t) { return } - ctx.see(t) + g.see(t) switch t := t.(type) { case *types.Struct: for i := 0; i < t.NumFields(); i++ { - ctx.see(t.Field(i)) + g.see(t.Field(i)) if t.Field(i).Exported() { // (6.2) structs use exported fields - ctx.use(t.Field(i), t, edgeExportedField) + g.use(t.Field(i), t, edgeExportedField) } else if t.Field(i).Name() == "_" { - ctx.use(t.Field(i), t, edgeBlankField) + g.use(t.Field(i), t, edgeBlankField) } else if isNoCopyType(t.Field(i).Type()) { // (6.1) structs use fields of type NoCopy sentinel - ctx.use(t.Field(i), t, edgeNoCopySentinel) + g.use(t.Field(i), t, edgeNoCopySentinel) } else if parent == nil { // (11.1) anonymous struct types use all their fields. - ctx.use(t.Field(i), t, edgeAnonymousStruct) + g.use(t.Field(i), t, edgeAnonymousStruct) } if t.Field(i).Anonymous() { - // (e3) exported identifiers aren't automatically used. - if !g.wholeProgram { - // does the embedded field contribute exported methods to the method set? - T := t.Field(i).Type() - if _, ok := T.Underlying().(*types.Pointer); !ok { - // An embedded field is addressable, so check - // the pointer type to get the full method set - T = types.NewPointer(T) - } - ms := ctx.pkg.IR.Prog.MethodSets.MethodSet(T) - for j := 0; j < ms.Len(); j++ { - if ms.At(j).Obj().Exported() { - // (6.4) structs use embedded fields that have exported methods (recursively) - ctx.use(t.Field(i), t, edgeExtendsExportedMethodSet) - break - } + // does the embedded field contribute exported methods to the method set? + T := t.Field(i).Type() + if _, ok := T.Underlying().(*types.Pointer); !ok { + // An embedded field is addressable, so check + // the pointer type to get the full method set + T = types.NewPointer(T) + } + ms := g.pkg.IR.Prog.MethodSets.MethodSet(T) + for j := 0; j < ms.Len(); j++ { + if ms.At(j).Obj().Exported() { + // (6.4) structs use embedded fields that have exported methods (recursively) + g.use(t.Field(i), t, edgeExtendsExportedMethodSet) + break } } @@ -1534,115 +1301,115 @@ func (g *Graph) typ(ctx *context, t types.Type, parent types.Type) { // does the embedded field contribute exported fields? if hasExportedField(t.Field(i).Type()) { // (6.5) structs use embedded structs that have exported fields (recursively) - ctx.use(t.Field(i), t, edgeExtendsExportedFields) + g.use(t.Field(i), t, edgeExtendsExportedFields) } } - g.variable(ctx, t.Field(i)) + g.variable(t.Field(i)) } case *types.Basic: // Nothing to do case *types.Named: // (9.3) types use their underlying and element types - ctx.seeAndUse(t.Underlying(), t, edgeUnderlyingType) - ctx.seeAndUse(t.Obj(), t, edgeTypeName) - ctx.seeAndUse(t, t.Obj(), edgeNamedType) + g.seeAndUse(t.Underlying(), t, edgeUnderlyingType) + g.seeAndUse(t.Obj(), t, edgeTypeName) + g.seeAndUse(t, t.Obj(), edgeNamedType) // (2.4) named types use the pointer type if _, ok := t.Underlying().(*types.Interface); !ok && t.NumMethods() > 0 { - ctx.seeAndUse(types.NewPointer(t), t, edgePointerType) + g.seeAndUse(types.NewPointer(t), t, edgePointerType) } for i := 0; i < t.NumMethods(); i++ { - ctx.see(t.Method(i)) + g.see(t.Method(i)) // don't use trackExportedIdentifier here, we care about // all exported methods, even in package main or in tests. - if t.Method(i).Exported() && !g.wholeProgram { + if t.Method(i).Exported() { // (2.1) named types use exported methods - ctx.use(t.Method(i), t, edgeExportedMethod) + g.use(t.Method(i), t, edgeExportedMethod) } - g.function(ctx, ctx.pkg.IR.Prog.FuncValue(t.Method(i))) + g.function(g.pkg.IR.Prog.FuncValue(t.Method(i))) } - g.typ(ctx, t.Underlying(), t) + g.typ(t.Underlying(), t) case *types.Slice: // (9.3) types use their underlying and element types - ctx.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(ctx, t.Elem(), nil) + g.seeAndUse(t.Elem(), t, edgeElementType) + g.typ(t.Elem(), nil) case *types.Map: // (9.3) types use their underlying and element types - ctx.seeAndUse(t.Elem(), t, edgeElementType) + g.seeAndUse(t.Elem(), t, edgeElementType) // (9.3) types use their underlying and element types - ctx.seeAndUse(t.Key(), t, edgeKeyType) - g.typ(ctx, t.Elem(), nil) - g.typ(ctx, t.Key(), nil) + g.seeAndUse(t.Key(), t, edgeKeyType) + g.typ(t.Elem(), nil) + g.typ(t.Key(), nil) case *types.Signature: - g.signature(ctx, t, nil) + g.signature(t, nil) case *types.Interface: for i := 0; i < t.NumMethods(); i++ { m := t.Method(i) // (8.3) All interface methods are marked as used - ctx.seeAndUse(m, t, edgeInterfaceMethod) - ctx.seeAndUse(m.Type().(*types.Signature), m, edgeSignature) - g.signature(ctx, m.Type().(*types.Signature), nil) + g.seeAndUse(m, t, edgeInterfaceMethod) + g.seeAndUse(m.Type().(*types.Signature), m, edgeSignature) + g.signature(m.Type().(*types.Signature), nil) } for i := 0; i < t.NumEmbeddeds(); i++ { tt := t.EmbeddedType(i) // (8.4) All embedded interfaces are marked as used - ctx.seeAndUse(tt, t, edgeEmbeddedInterface) + g.seeAndUse(tt, t, edgeEmbeddedInterface) } case *types.Array: // (9.3) types use their underlying and element types - ctx.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(ctx, t.Elem(), nil) + g.seeAndUse(t.Elem(), t, edgeElementType) + g.typ(t.Elem(), nil) case *types.Pointer: // (9.3) types use their underlying and element types - ctx.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(ctx, t.Elem(), nil) + g.seeAndUse(t.Elem(), t, edgeElementType) + g.typ(t.Elem(), nil) case *types.Chan: // (9.3) types use their underlying and element types - ctx.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(ctx, t.Elem(), nil) + g.seeAndUse(t.Elem(), t, edgeElementType) + g.typ(t.Elem(), nil) case *types.Tuple: for i := 0; i < t.Len(); i++ { // (9.3) types use their underlying and element types - ctx.seeAndUse(t.At(i).Type(), t, edgeTupleElement|edgeType) - g.typ(ctx, t.At(i).Type(), nil) + g.seeAndUse(t.At(i).Type(), t, edgeTupleElement|edgeType) + g.typ(t.At(i).Type(), nil) } default: panic(fmt.Sprintf("unreachable: %T", t)) } } -func (g *Graph) variable(ctx *context, v *types.Var) { +func (g *graph) variable(v *types.Var) { // (9.2) variables use their types - ctx.seeAndUse(v.Type(), v, edgeType) - g.typ(ctx, v.Type(), nil) + g.seeAndUse(v.Type(), v, edgeType) + g.typ(v.Type(), nil) } -func (g *Graph) signature(ctx *context, sig *types.Signature, fn types.Object) { +func (g *graph) signature(sig *types.Signature, fn types.Object) { var user interface{} = fn if fn == nil { user = sig - ctx.see(sig) + g.see(sig) } if sig.Recv() != nil { - ctx.seeAndUse(sig.Recv().Type(), user, edgeReceiver|edgeType) - g.typ(ctx, sig.Recv().Type(), nil) + g.seeAndUse(sig.Recv().Type(), user, edgeReceiver|edgeType) + g.typ(sig.Recv().Type(), nil) } for i := 0; i < sig.Params().Len(); i++ { param := sig.Params().At(i) - ctx.seeAndUse(param.Type(), user, edgeFunctionArgument|edgeType) - g.typ(ctx, param.Type(), nil) + g.seeAndUse(param.Type(), user, edgeFunctionArgument|edgeType) + g.typ(param.Type(), nil) } for i := 0; i < sig.Results().Len(); i++ { param := sig.Results().At(i) - ctx.seeAndUse(param.Type(), user, edgeFunctionResult|edgeType) - g.typ(ctx, param.Type(), nil) + g.seeAndUse(param.Type(), user, edgeFunctionResult|edgeType) + g.typ(param.Type(), nil) } } -func (g *Graph) instructions(ctx *context, fn *ir.Function) { +func (g *graph) instructions(fn *ir.Function) { fnObj := owningObject(fn) for _, b := range fn.Blocks { for _, instr := range b.Instrs { @@ -1663,17 +1430,17 @@ func (g *Graph) instructions(ctx *context, fn *ir.Function) { // (9.5) instructions use their operands // (4.4) functions use functions they return. we assume that someone else will call the returned function if owningObject(v) != nil { - ctx.seeAndUse(owningObject(v), fnObj, edgeInstructionOperand) + g.seeAndUse(owningObject(v), fnObj, edgeInstructionOperand) } - g.function(ctx, v) + g.function(v) case *ir.Const: // (9.6) instructions use their operands' types - ctx.seeAndUse(v.Type(), fnObj, edgeType) - g.typ(ctx, v.Type(), nil) + g.seeAndUse(v.Type(), fnObj, edgeType) + g.typ(v.Type(), nil) case *ir.Global: if v.Object() != nil { // (9.5) instructions use their operands - ctx.seeAndUse(v.Object(), fnObj, edgeInstructionOperand) + g.seeAndUse(v.Object(), fnObj, edgeInstructionOperand) } } }) @@ -1684,8 +1451,8 @@ func (g *Graph) instructions(ctx *context, fn *ir.Function) { // (4.8) instructions use their types // (9.4) conversions use the type they convert to - ctx.seeAndUse(v.Type(), fnObj, edgeType) - g.typ(ctx, v.Type(), nil) + g.seeAndUse(v.Type(), fnObj, edgeType) + g.typ(v.Type(), nil) } } switch instr := instr.(type) { @@ -1693,51 +1460,21 @@ func (g *Graph) instructions(ctx *context, fn *ir.Function) { st := instr.X.Type().Underlying().(*types.Struct) field := st.Field(instr.Field) // (4.7) functions use fields they access - ctx.seeAndUse(field, fnObj, edgeFieldAccess) + g.seeAndUse(field, fnObj, edgeFieldAccess) case *ir.FieldAddr: st := code.Dereference(instr.X.Type()).Underlying().(*types.Struct) field := st.Field(instr.Field) // (4.7) functions use fields they access - ctx.seeAndUse(field, fnObj, edgeFieldAccess) + g.seeAndUse(field, fnObj, edgeFieldAccess) case *ir.Store: // nothing to do, handled generically by operands case *ir.Call: c := instr.Common() if !c.IsInvoke() { // handled generically as an instruction operand - - if g.wholeProgram { - // (e3) special case known reflection-based method callers - switch code.CallName(c) { - case "net/rpc.Register", "net/rpc.RegisterName", "(*net/rpc.Server).Register", "(*net/rpc.Server).RegisterName": - var arg ir.Value - switch code.CallName(c) { - case "net/rpc.Register": - arg = c.Args[0] - case "net/rpc.RegisterName": - arg = c.Args[1] - case "(*net/rpc.Server).Register": - arg = c.Args[1] - case "(*net/rpc.Server).RegisterName": - arg = c.Args[2] - } - walkPhi(arg, func(v ir.Value) { - if v, ok := v.(*ir.MakeInterface); ok { - walkPhi(v.X, func(vv ir.Value) { - ms := ctx.pkg.IR.Prog.MethodSets.MethodSet(vv.Type()) - for i := 0; i < ms.Len(); i++ { - if ms.At(i).Obj().Exported() { - g.useMethod(ctx, vv.Type(), ms.At(i), fnObj, edgeNetRPCRegister) - } - } - }) - } - }) - } - } } else { // (4.5) functions use functions/interface methods they call - ctx.seeAndUse(c.Method, fnObj, edgeInterfaceCall) + g.seeAndUse(c.Method, fnObj, edgeInterfaceCall) } case *ir.Return: // nothing to do, handled generically by operands @@ -1754,14 +1491,14 @@ func (g *Graph) instructions(ctx *context, fn *ir.Function) { assert(s1.NumFields() == s2.NumFields()) for i := 0; i < s1.NumFields(); i++ { - ctx.see(s1.Field(i)) - ctx.see(s2.Field(i)) + g.see(s1.Field(i)) + g.see(s2.Field(i)) // (5.1) when converting between two equivalent structs, the fields in // either struct use each other. the fields are relevant for the // conversion, but only if the fields are also accessed outside the // conversion. - ctx.seeAndUse(s1.Field(i), s2.Field(i), edgeStructConversion) - ctx.seeAndUse(s2.Field(i), s1.Field(i), edgeStructConversion) + g.seeAndUse(s1.Field(i), s2.Field(i), edgeStructConversion) + g.seeAndUse(s2.Field(i), s1.Field(i), edgeStructConversion) } } case *ir.MakeInterface: @@ -1777,7 +1514,7 @@ func (g *Graph) instructions(ctx *context, fn *ir.Function) { if st, ok := ptr.Elem().Underlying().(*types.Struct); ok { for i := 0; i < st.NumFields(); i++ { // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. - ctx.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) + g.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) } } } @@ -1788,7 +1525,7 @@ func (g *Graph) instructions(ctx *context, fn *ir.Function) { if st, ok := ptr.Elem().Underlying().(*types.Struct); ok { for i := 0; i < st.NumFields(); i++ { // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. - ctx.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) + g.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) } } } @@ -1930,49 +1667,3 @@ func walkPhi(v ir.Value, fn func(v ir.Value)) { } impl(phi) } - -func interfacesFromExportData(pkg *types.Package) []*types.Interface { - var out []*types.Interface - scope := pkg.Scope() - for _, name := range scope.Names() { - obj := scope.Lookup(name) - out = append(out, interfacesFromObject(obj)...) - } - return out -} - -func interfacesFromObject(obj types.Object) []*types.Interface { - var out []*types.Interface - switch obj := obj.(type) { - case *types.Func: - sig := obj.Type().(*types.Signature) - for i := 0; i < sig.Results().Len(); i++ { - out = append(out, interfacesFromObject(sig.Results().At(i))...) - } - for i := 0; i < sig.Params().Len(); i++ { - out = append(out, interfacesFromObject(sig.Params().At(i))...) - } - case *types.TypeName: - if named, ok := obj.Type().(*types.Named); ok { - for i := 0; i < named.NumMethods(); i++ { - out = append(out, interfacesFromObject(named.Method(i))...) - } - - if iface, ok := named.Underlying().(*types.Interface); ok { - out = append(out, iface) - } - } - case *types.Var: - // No call to Underlying here. We want unnamed interfaces - // only. Named interfaces are gotten directly from the - // package's scope. - if iface, ok := obj.Type().(*types.Interface); ok { - out = append(out, iface) - } - case *types.Const: - case *types.Builtin: - default: - panic(fmt.Sprintf("unhandled type: %T", obj)) - } - return out -} diff --git a/unused/unused_test.go b/unused/unused_test.go index b4acc0d2f..6bd6172a6 100644 --- a/unused/unused_test.go +++ b/unused/unused_test.go @@ -1,105 +1,54 @@ package unused import ( - "fmt" - "go/parser" - "go/token" "go/types" - "os" - "sort" - "strconv" "strings" "testing" - "text/scanner" - "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/analysistest" - "golang.org/x/tools/go/packages" - "honnef.co/go/tools/lint" ) -// parseExpectations parses the content of a "// want ..." comment -// and returns the expectations, a mixture of diagnostics ("rx") and -// facts (name:"rx"). -func parseExpectations(text string) ([]string, error) { - var scanErr string - sc := new(scanner.Scanner).Init(strings.NewReader(text)) - sc.Error = func(s *scanner.Scanner, msg string) { - scanErr = msg // e.g. bad string escape - } - sc.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanRawStrings - - scanRegexp := func(tok rune) (string, error) { - if tok != scanner.String && tok != scanner.RawString { - return "", fmt.Errorf("got %s, want regular expression", - scanner.TokenString(tok)) - } - pattern, _ := strconv.Unquote(sc.TokenText()) // can't fail - return pattern, nil - } +type expectation bool - var expects []string - for { - tok := sc.Scan() - switch tok { - case scanner.String, scanner.RawString: - rx, err := scanRegexp(tok) - if err != nil { - return nil, err - } - expects = append(expects, rx) - - case scanner.EOF: - if scanErr != "" { - return nil, fmt.Errorf("%s", scanErr) - } - return expects, nil +const ( + shouldBeUsed = true + shouldBeUnused = false +) - default: - return nil, fmt.Errorf("unexpected %s", scanner.TokenString(tok)) - } +func (exp expectation) String() string { + switch exp { + case shouldBeUsed: + return "used" + case shouldBeUnused: + return "unused" + default: + panic("unreachable") } } -func check(t *testing.T, fset *token.FileSet, diagnostics []types.Object) { +func check(t *testing.T, res *analysistest.Result) { type key struct { file string line int } - + want := map[key]expectation{} files := map[string]struct{}{} - for _, d := range diagnostics { - files[fset.Position(d.Pos()).Filename] = struct{}{} - } - want := make(map[key][]string) - - // processComment parses expectations out of comments. - processComment := func(filename string, linenum int, text string) { - text = strings.TrimSpace(text) - - // Any comment starting with "want" is treated - // as an expectation, even without following whitespace. - if rest := strings.TrimPrefix(text, "want"); rest != text { - expects, err := parseExpectations(rest) - if err != nil { - t.Errorf("%s:%d: in 'want' comment: %s", filename, linenum, err) - return - } - if expects != nil { - want[key{filename, linenum}] = expects - } + isTest := false + for _, f := range res.Pass.Files { + filename := res.Pass.Fset.Position(f.Pos()).Filename + if strings.HasSuffix(filename, "_test.go") { + isTest = true + break } } - - // Extract 'want' comments from Go files. - fset2 := token.NewFileSet() - for f := range files { - af, err := parser.ParseFile(fset2, f, nil, parser.ParseComments) - if err != nil { - t.Fatal(err) + for _, f := range res.Pass.Files { + filename := res.Pass.Fset.Position(f.Pos()).Filename + if !strings.HasSuffix(filename, ".go") { + continue } - for _, cgroup := range af.Comments { + files[filename] = struct{}{} + for _, cgroup := range f.Comments { for _, c := range cgroup.List { text := strings.TrimPrefix(c.Text, "//") @@ -107,91 +56,104 @@ func check(t *testing.T, fset *token.FileSet, diagnostics []types.Object) { continue // not a //-comment } - // Hack: treat a comment of the form "//...// want..." - // as if it starts at 'want'. - // This allows us to add comments on comments, - // as required when testing the buildtag analyzer. - if i := strings.Index(text, "// want"); i >= 0 { - text = text[i+len("// "):] + fields := strings.Fields(text) + posn := res.Pass.Fset.Position(c.Pos()) + for _, field := range fields { + switch field { + case "used": + if !isTest { + want[key{posn.Filename, posn.Line}] = shouldBeUsed + } + case "unused": + if !isTest { + want[key{posn.Filename, posn.Line}] = shouldBeUnused + } + case "used_test": + if isTest { + want[key{posn.Filename, posn.Line}] = shouldBeUsed + } + case "unused_test": + if isTest { + want[key{posn.Filename, posn.Line}] = shouldBeUnused + } + } } - - // It's tempting to compute the filename - // once outside the loop, but it's - // incorrect because it can change due - // to //line directives. - posn := fset2.Position(c.Pos()) - processComment(posn.Filename, posn.Line, text) } } } - checkMessage := func(posn token.Position, name, message string) { - k := key{posn.Filename, posn.Line} - expects := want[k] - var unmatched []string - for i, exp := range expects { - if exp == message { - // matched: remove the expectation. - expects[i] = expects[len(expects)-1] - expects = expects[:len(expects)-1] - want[k] = expects - return + checkObjs := func(objs []types.Object, state expectation) { + for _, obj := range objs { + posn := res.Pass.Fset.Position(obj.Pos()) + if _, ok := files[posn.Filename]; !ok { + continue } - unmatched = append(unmatched, fmt.Sprintf("%q", exp)) - } - if unmatched == nil { - t.Errorf("%v: unexpected: %v", posn, message) - } else { - t.Errorf("%v: %q does not match pattern %s", - posn, message, strings.Join(unmatched, " or ")) - } - } - // Check the diagnostics match expectations. - for _, f := range diagnostics { - posn := fset.Position(f.Pos()) - checkMessage(posn, "", f.Name()) - } - - // Reject surplus expectations. - // - // Sometimes an Analyzer reports two similar diagnostics on a - // line with only one expectation. The reader may be confused by - // the error message. - // TODO(adonovan): print a better error: - // "got 2 diagnostics here; each one needs its own expectation". - var surplus []string - for key, expects := range want { - for _, exp := range expects { - err := fmt.Sprintf("%s:%d: no diagnostic was reported matching %q", key.file, key.line, exp) - surplus = append(surplus, err) + k := key{posn.Filename, posn.Line} + exp, ok := want[k] + if !ok { + t.Errorf("unexpected %s object at %s", state, posn) + continue + } + delete(want, k) + if state != exp { + t.Errorf("object at %s should be %s but is %s", posn, exp, state) + } } } - sort.Strings(surplus) - for _, err := range surplus { - t.Errorf("%s", err) - } + ures := res.Result.(Result) + checkObjs(ures.Used, shouldBeUsed) + checkObjs(ures.Unused, shouldBeUnused) } func TestAll(t *testing.T) { - c := NewChecker(false) - var stats lint.Stats - r, err := lint.NewRunner(&stats) - if err != nil { - t.Fatal(err) + dirs := []string{ + "tests", + "alias", + "anonymous", + "blank", + "cgo", + "consts", + "conversion", + "cyclic", + "defer", + "elem", + "embedded_call", + "embedding", + "embedding2", + "exported_fields", + "exported_fields_main", + "exported_method_test", + "fields", + "functions", + "ignored", + "interfaces", + "interfaces2", + "linkname", + "main", + "mapslice", + "methods", + "named", + "nested", + "nocopy", + "nocopy-main", + "pointer-type-embedding", + "quiet", + "selectors", + "switch_interface", + "tests", + "tests-main", + "type-dedup", + "type-dedup2", + "type-dedup3", + "types", + "unused-argument", + "unused_type", + "variables", } - dir := analysistest.TestData() - cfg := &packages.Config{ - Dir: dir, - Tests: true, - Env: append(os.Environ(), "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off"), - } - pkgs, err := r.Run(cfg, []string{"./..."}, []*analysis.Analyzer{c.Analyzer()}, true) - if err != nil { - t.Fatal(err) + results := analysistest.Run(t, analysistest.TestData(), Analyzer, dirs...) + for _, res := range results { + check(t, res) } - - res := c.Result() - check(t, pkgs[0].Fset, res) }