mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
Update to kube v1.17
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
327fcd1b1b
commit
3af1e26d7c
28
vendor/honnef.co/go/tools/lint/LICENSE
vendored
Normal file
28
vendor/honnef.co/go/tools/lint/LICENSE
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2016 Dominik Honnef. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
491
vendor/honnef.co/go/tools/lint/lint.go
vendored
Normal file
491
vendor/honnef.co/go/tools/lint/lint.go
vendored
Normal file
@ -0,0 +1,491 @@
|
||||
// Package lint provides the foundation for tools like staticcheck
|
||||
package lint // import "honnef.co/go/tools/lint"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"honnef.co/go/tools/config"
|
||||
)
|
||||
|
||||
type Documentation struct {
|
||||
Title string
|
||||
Text string
|
||||
Since string
|
||||
NonDefault bool
|
||||
Options []string
|
||||
}
|
||||
|
||||
func (doc *Documentation) String() string {
|
||||
b := &strings.Builder{}
|
||||
fmt.Fprintf(b, "%s\n\n", doc.Title)
|
||||
if doc.Text != "" {
|
||||
fmt.Fprintf(b, "%s\n\n", doc.Text)
|
||||
}
|
||||
fmt.Fprint(b, "Available since\n ")
|
||||
if doc.Since == "" {
|
||||
fmt.Fprint(b, "unreleased")
|
||||
} else {
|
||||
fmt.Fprintf(b, "%s", doc.Since)
|
||||
}
|
||||
if doc.NonDefault {
|
||||
fmt.Fprint(b, ", non-default")
|
||||
}
|
||||
fmt.Fprint(b, "\n")
|
||||
if len(doc.Options) > 0 {
|
||||
fmt.Fprintf(b, "\nOptions\n")
|
||||
for _, opt := range doc.Options {
|
||||
fmt.Fprintf(b, " %s", opt)
|
||||
}
|
||||
fmt.Fprint(b, "\n")
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
type Ignore interface {
|
||||
Match(p Problem) bool
|
||||
}
|
||||
|
||||
type LineIgnore struct {
|
||||
File string
|
||||
Line int
|
||||
Checks []string
|
||||
Matched bool
|
||||
Pos token.Pos
|
||||
}
|
||||
|
||||
func (li *LineIgnore) Match(p Problem) bool {
|
||||
pos := p.Pos
|
||||
if pos.Filename != li.File || pos.Line != li.Line {
|
||||
return false
|
||||
}
|
||||
for _, c := range li.Checks {
|
||||
if m, _ := filepath.Match(c, p.Check); m {
|
||||
li.Matched = true
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (li *LineIgnore) String() string {
|
||||
matched := "not matched"
|
||||
if li.Matched {
|
||||
matched = "matched"
|
||||
}
|
||||
return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
|
||||
}
|
||||
|
||||
type FileIgnore struct {
|
||||
File string
|
||||
Checks []string
|
||||
}
|
||||
|
||||
func (fi *FileIgnore) Match(p Problem) bool {
|
||||
if p.Pos.Filename != fi.File {
|
||||
return false
|
||||
}
|
||||
for _, c := range fi.Checks {
|
||||
if m, _ := filepath.Match(c, p.Check); m {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Severity uint8
|
||||
|
||||
const (
|
||||
Error Severity = iota
|
||||
Warning
|
||||
Ignored
|
||||
)
|
||||
|
||||
// Problem represents a problem in some source code.
|
||||
type Problem struct {
|
||||
Pos token.Position
|
||||
End token.Position
|
||||
Message string
|
||||
Check string
|
||||
Severity Severity
|
||||
}
|
||||
|
||||
func (p *Problem) String() string {
|
||||
return fmt.Sprintf("%s (%s)", p.Message, p.Check)
|
||||
}
|
||||
|
||||
// A Linter lints Go source code.
|
||||
type Linter struct {
|
||||
Checkers []*analysis.Analyzer
|
||||
CumulativeCheckers []CumulativeChecker
|
||||
GoVersion int
|
||||
Config config.Config
|
||||
Stats Stats
|
||||
}
|
||||
|
||||
type CumulativeChecker interface {
|
||||
Analyzer() *analysis.Analyzer
|
||||
Result() []types.Object
|
||||
ProblemObject(*token.FileSet, types.Object) Problem
|
||||
}
|
||||
|
||||
func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) {
|
||||
var allAnalyzers []*analysis.Analyzer
|
||||
allAnalyzers = append(allAnalyzers, l.Checkers...)
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
allAnalyzers = append(allAnalyzers, cum.Analyzer())
|
||||
}
|
||||
|
||||
// The -checks command line flag overrules all configuration
|
||||
// files, which means that for `-checks="foo"`, no check other
|
||||
// than foo can ever be reported to the user. Make use of this
|
||||
// fact to cull the list of analyses we need to run.
|
||||
|
||||
// replace "inherit" with "all", as we don't want to base the
|
||||
// list of all checks on the default configuration, which
|
||||
// disables certain checks.
|
||||
checks := make([]string, len(l.Config.Checks))
|
||||
copy(checks, l.Config.Checks)
|
||||
for i, c := range checks {
|
||||
if c == "inherit" {
|
||||
checks[i] = "all"
|
||||
}
|
||||
}
|
||||
|
||||
allowed := FilterChecks(allAnalyzers, checks)
|
||||
var allowedAnalyzers []*analysis.Analyzer
|
||||
for _, c := range l.Checkers {
|
||||
if allowed[c.Name] {
|
||||
allowedAnalyzers = append(allowedAnalyzers, c)
|
||||
}
|
||||
}
|
||||
hasCumulative := false
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
a := cum.Analyzer()
|
||||
if allowed[a.Name] {
|
||||
hasCumulative = true
|
||||
allowedAnalyzers = append(allowedAnalyzers, a)
|
||||
}
|
||||
}
|
||||
|
||||
r, err := NewRunner(&l.Stats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.goVersion = l.GoVersion
|
||||
|
||||
pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tpkgToPkg := map[*types.Package]*Package{}
|
||||
for _, pkg := range pkgs {
|
||||
tpkgToPkg[pkg.Types] = pkg
|
||||
|
||||
for _, e := range pkg.errs {
|
||||
switch e := e.(type) {
|
||||
case types.Error:
|
||||
p := Problem{
|
||||
Pos: e.Fset.PositionFor(e.Pos, false),
|
||||
Message: e.Msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
case packages.Error:
|
||||
msg := e.Msg
|
||||
if len(msg) != 0 && msg[0] == '\n' {
|
||||
// TODO(dh): See https://github.com/golang/go/issues/32363
|
||||
msg = msg[1:]
|
||||
}
|
||||
|
||||
var pos token.Position
|
||||
if e.Pos == "" {
|
||||
// Under certain conditions (malformed package
|
||||
// declarations, multiple packages in the same
|
||||
// directory), go list emits an error on stderr
|
||||
// instead of JSON. Those errors do not have
|
||||
// associated position information in
|
||||
// go/packages.Error, even though the output on
|
||||
// stderr may contain it.
|
||||
if p, n, err := parsePos(msg); err == nil {
|
||||
if abs, err := filepath.Abs(p.Filename); err == nil {
|
||||
p.Filename = abs
|
||||
}
|
||||
pos = p
|
||||
msg = msg[n+2:]
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
pos, _, err = parsePos(e.Pos)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", e))
|
||||
}
|
||||
}
|
||||
p := Problem{
|
||||
Pos: pos,
|
||||
Message: msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
case scanner.ErrorList:
|
||||
for _, e := range e {
|
||||
p := Problem{
|
||||
Pos: e.Pos,
|
||||
Message: e.Msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
}
|
||||
case error:
|
||||
p := Problem{
|
||||
Pos: token.Position{},
|
||||
Message: e.Error(),
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&r.stats.State, StateCumulative)
|
||||
var problems []Problem
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
for _, res := range cum.Result() {
|
||||
pkg := tpkgToPkg[res.Pkg()]
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
if allowedChecks[cum.Analyzer().Name] {
|
||||
pos := DisplayPosition(pkg.Fset, res.Pos())
|
||||
// FIXME(dh): why are we ignoring generated files
|
||||
// here? Surely this is specific to 'unused', not all
|
||||
// cumulative checkers
|
||||
if _, ok := pkg.gen[pos.Filename]; ok {
|
||||
continue
|
||||
}
|
||||
p := cum.ProblemObject(pkg.Fset, res)
|
||||
problems = append(problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
for _, ig := range pkg.ignores {
|
||||
for i := range pkg.problems {
|
||||
p := &pkg.problems[i]
|
||||
if ig.Match(*p) {
|
||||
p.Severity = Ignored
|
||||
}
|
||||
}
|
||||
for i := range problems {
|
||||
p := &problems[i]
|
||||
if ig.Match(*p) {
|
||||
p.Severity = Ignored
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pkg.cfg == nil {
|
||||
// The package failed to load, otherwise we would have a
|
||||
// valid config. Pass through all errors.
|
||||
problems = append(problems, pkg.problems...)
|
||||
} else {
|
||||
for _, p := range pkg.problems {
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
allowedChecks["compile"] = true
|
||||
if allowedChecks[p.Check] {
|
||||
problems = append(problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ig := range pkg.ignores {
|
||||
ig, ok := ig.(*LineIgnore)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if ig.Matched {
|
||||
continue
|
||||
}
|
||||
|
||||
couldveMatched := false
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
for _, c := range ig.Checks {
|
||||
if !allowedChecks[c] {
|
||||
continue
|
||||
}
|
||||
couldveMatched = true
|
||||
break
|
||||
}
|
||||
|
||||
if !couldveMatched {
|
||||
// The ignored checks were disabled for the containing package.
|
||||
// Don't flag the ignore for not having matched.
|
||||
continue
|
||||
}
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pkg.Fset, ig.Pos),
|
||||
Message: "this linter directive didn't match anything; should it be removed?",
|
||||
Check: "",
|
||||
}
|
||||
problems = append(problems, p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(problems) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sort.Slice(problems, func(i, j int) bool {
|
||||
pi := problems[i].Pos
|
||||
pj := problems[j].Pos
|
||||
|
||||
if pi.Filename != pj.Filename {
|
||||
return pi.Filename < pj.Filename
|
||||
}
|
||||
if pi.Line != pj.Line {
|
||||
return pi.Line < pj.Line
|
||||
}
|
||||
if pi.Column != pj.Column {
|
||||
return pi.Column < pj.Column
|
||||
}
|
||||
|
||||
return problems[i].Message < problems[j].Message
|
||||
})
|
||||
|
||||
var out []Problem
|
||||
out = append(out, problems[0])
|
||||
for i, p := range problems[1:] {
|
||||
// We may encounter duplicate problems because one file
|
||||
// can be part of many packages.
|
||||
if problems[i] != p {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool {
|
||||
// OPT(dh): this entire computation could be cached per package
|
||||
allowedChecks := map[string]bool{}
|
||||
|
||||
for _, check := range checks {
|
||||
b := true
|
||||
if len(check) > 1 && check[0] == '-' {
|
||||
b = false
|
||||
check = check[1:]
|
||||
}
|
||||
if check == "*" || check == "all" {
|
||||
// Match all
|
||||
for _, c := range allChecks {
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
} else if strings.HasSuffix(check, "*") {
|
||||
// Glob
|
||||
prefix := check[:len(check)-1]
|
||||
isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
|
||||
|
||||
for _, c := range allChecks {
|
||||
idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) })
|
||||
if isCat {
|
||||
// Glob is S*, which should match S1000 but not SA1000
|
||||
cat := c.Name[:idx]
|
||||
if prefix == cat {
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
} else {
|
||||
// Glob is S1*
|
||||
if strings.HasPrefix(c.Name, prefix) {
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Literal check name
|
||||
allowedChecks[check] = b
|
||||
}
|
||||
}
|
||||
return allowedChecks
|
||||
}
|
||||
|
||||
type Positioner interface {
|
||||
Pos() token.Pos
|
||||
}
|
||||
|
||||
func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
|
||||
if p == token.NoPos {
|
||||
return token.Position{}
|
||||
}
|
||||
|
||||
// Only use the adjusted position if it points to another Go file.
|
||||
// This means we'll point to the original file for cgo files, but
|
||||
// we won't point to a YACC grammar file.
|
||||
pos := fset.PositionFor(p, false)
|
||||
adjPos := fset.PositionFor(p, true)
|
||||
|
||||
if filepath.Ext(adjPos.Filename) == ".go" {
|
||||
return adjPos
|
||||
}
|
||||
return pos
|
||||
}
|
||||
|
||||
var bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.Grow(64)
|
||||
return buf
|
||||
},
|
||||
}
|
||||
|
||||
func FuncName(f *types.Func) string {
|
||||
buf := bufferPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
if f.Type() != nil {
|
||||
sig := f.Type().(*types.Signature)
|
||||
if recv := sig.Recv(); recv != nil {
|
||||
buf.WriteByte('(')
|
||||
if _, ok := recv.Type().(*types.Interface); ok {
|
||||
// gcimporter creates abstract methods of
|
||||
// named interfaces using the interface type
|
||||
// (not the named type) as the receiver.
|
||||
// Don't print it in full.
|
||||
buf.WriteString("interface")
|
||||
} else {
|
||||
types.WriteType(buf, recv.Type(), nil)
|
||||
}
|
||||
buf.WriteByte(')')
|
||||
buf.WriteByte('.')
|
||||
} else if f.Pkg() != nil {
|
||||
writePackage(buf, f.Pkg())
|
||||
}
|
||||
}
|
||||
buf.WriteString(f.Name())
|
||||
s := buf.String()
|
||||
bufferPool.Put(buf)
|
||||
return s
|
||||
}
|
||||
|
||||
func writePackage(buf *bytes.Buffer, pkg *types.Package) {
|
||||
if pkg == nil {
|
||||
return
|
||||
}
|
||||
s := pkg.Path()
|
||||
if s != "" {
|
||||
buf.WriteString(s)
|
||||
buf.WriteByte('.')
|
||||
}
|
||||
}
|
400
vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
vendored
Normal file
400
vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
vendored
Normal file
@ -0,0 +1,400 @@
|
||||
// Package lintdsl provides helpers for implementing static analysis
|
||||
// checks. Dot-importing this package is encouraged.
|
||||
package lintdsl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/lint"
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
type packager interface {
|
||||
Package() *ssa.Package
|
||||
}
|
||||
|
||||
func CallName(call *ssa.CallCommon) string {
|
||||
if call.IsInvoke() {
|
||||
return ""
|
||||
}
|
||||
switch v := call.Value.(type) {
|
||||
case *ssa.Function:
|
||||
fn, ok := v.Object().(*types.Func)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return lint.FuncName(fn)
|
||||
case *ssa.Builtin:
|
||||
return v.Name()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name }
|
||||
func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name }
|
||||
|
||||
func FilterDebug(instr []ssa.Instruction) []ssa.Instruction {
|
||||
var out []ssa.Instruction
|
||||
for _, ins := range instr {
|
||||
if _, ok := ins.(*ssa.DebugRef); !ok {
|
||||
out = append(out, ins)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func IsExample(fn *ssa.Function) bool {
|
||||
if !strings.HasPrefix(fn.Name(), "Example") {
|
||||
return false
|
||||
}
|
||||
f := fn.Prog.Fset.File(fn.Pos())
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
return strings.HasSuffix(f.Name(), "_test.go")
|
||||
}
|
||||
|
||||
func IsPointerLike(T types.Type) bool {
|
||||
switch T := T.Underlying().(type) {
|
||||
case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer:
|
||||
return true
|
||||
case *types.Basic:
|
||||
return T.Kind() == types.UnsafePointer
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsIdent(expr ast.Expr, ident string) bool {
|
||||
id, ok := expr.(*ast.Ident)
|
||||
return ok && id.Name == ident
|
||||
}
|
||||
|
||||
// isBlank returns whether id is the blank identifier "_".
|
||||
// If id == nil, the answer is false.
|
||||
func IsBlank(id ast.Expr) bool {
|
||||
ident, _ := id.(*ast.Ident)
|
||||
return ident != nil && ident.Name == "_"
|
||||
}
|
||||
|
||||
func IsIntLiteral(expr ast.Expr, literal string) bool {
|
||||
lit, ok := expr.(*ast.BasicLit)
|
||||
return ok && lit.Kind == token.INT && lit.Value == literal
|
||||
}
|
||||
|
||||
// Deprecated: use IsIntLiteral instead
|
||||
func IsZero(expr ast.Expr) bool {
|
||||
return IsIntLiteral(expr, "0")
|
||||
}
|
||||
|
||||
func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool {
|
||||
return IsType(pass.TypesInfo.TypeOf(expr), name)
|
||||
}
|
||||
|
||||
func IsInTest(pass *analysis.Pass, node lint.Positioner) bool {
|
||||
// FIXME(dh): this doesn't work for global variables with
|
||||
// initializers
|
||||
f := pass.Fset.File(node.Pos())
|
||||
return f != nil && strings.HasSuffix(f.Name(), "_test.go")
|
||||
}
|
||||
|
||||
func IsInMain(pass *analysis.Pass, node lint.Positioner) bool {
|
||||
if node, ok := node.(packager); ok {
|
||||
return node.Package().Pkg.Name() == "main"
|
||||
}
|
||||
return pass.Pkg.Name() == "main"
|
||||
}
|
||||
|
||||
func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string {
|
||||
info := pass.TypesInfo
|
||||
sel := info.Selections[expr]
|
||||
if sel == nil {
|
||||
if x, ok := expr.X.(*ast.Ident); ok {
|
||||
pkg, ok := info.ObjectOf(x).(*types.PkgName)
|
||||
if !ok {
|
||||
// This shouldn't happen
|
||||
return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name)
|
||||
}
|
||||
panic(fmt.Sprintf("unsupported selector: %v", expr))
|
||||
}
|
||||
return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
|
||||
}
|
||||
|
||||
func IsNil(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
return pass.TypesInfo.Types[expr].IsNil()
|
||||
}
|
||||
|
||||
func BoolConst(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
|
||||
return constant.BoolVal(val)
|
||||
}
|
||||
|
||||
func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
// We explicitly don't support typed bools because more often than
|
||||
// not, custom bool types are used as binary enums and the
|
||||
// explicit comparison is desired.
|
||||
|
||||
ident, ok := expr.(*ast.Ident)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
obj := pass.TypesInfo.ObjectOf(ident)
|
||||
c, ok := obj.(*types.Const)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
basic, ok := c.Type().(*types.Basic)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) {
|
||||
tv := pass.TypesInfo.Types[expr]
|
||||
if tv.Value == nil {
|
||||
return 0, false
|
||||
}
|
||||
if tv.Value.Kind() != constant.Int {
|
||||
return 0, false
|
||||
}
|
||||
return constant.Int64Val(tv.Value)
|
||||
}
|
||||
|
||||
func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) {
|
||||
val := pass.TypesInfo.Types[expr].Value
|
||||
if val == nil {
|
||||
return "", false
|
||||
}
|
||||
if val.Kind() != constant.String {
|
||||
return "", false
|
||||
}
|
||||
return constant.StringVal(val), true
|
||||
}
|
||||
|
||||
// Dereference returns a pointer's element type; otherwise it returns
|
||||
// T.
|
||||
func Dereference(T types.Type) types.Type {
|
||||
if p, ok := T.Underlying().(*types.Pointer); ok {
|
||||
return p.Elem()
|
||||
}
|
||||
return T
|
||||
}
|
||||
|
||||
// DereferenceR returns a pointer's element type; otherwise it returns
|
||||
// T. If the element type is itself a pointer, DereferenceR will be
|
||||
// applied recursively.
|
||||
func DereferenceR(T types.Type) types.Type {
|
||||
if p, ok := T.Underlying().(*types.Pointer); ok {
|
||||
return DereferenceR(p.Elem())
|
||||
}
|
||||
return T
|
||||
}
|
||||
|
||||
func IsGoVersion(pass *analysis.Pass, minor int) bool {
|
||||
version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int)
|
||||
return version >= minor
|
||||
}
|
||||
|
||||
func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string {
|
||||
switch fun := call.Fun.(type) {
|
||||
case *ast.SelectorExpr:
|
||||
fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return lint.FuncName(fn)
|
||||
case *ast.Ident:
|
||||
obj := pass.TypesInfo.ObjectOf(fun)
|
||||
switch obj := obj.(type) {
|
||||
case *types.Func:
|
||||
return lint.FuncName(obj)
|
||||
case *types.Builtin:
|
||||
return obj.Name()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool {
|
||||
call, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return CallNameAST(pass, call) == name
|
||||
}
|
||||
|
||||
func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool {
|
||||
for _, name := range names {
|
||||
if IsCallToAST(pass, node, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Render(pass *analysis.Pass, x interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
if err := printer.Fprint(&buf, pass.Fset, x); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func RenderArgs(pass *analysis.Pass, args []ast.Expr) string {
|
||||
var ss []string
|
||||
for _, arg := range args {
|
||||
ss = append(ss, Render(pass, arg))
|
||||
}
|
||||
return strings.Join(ss, ", ")
|
||||
}
|
||||
|
||||
func Preamble(f *ast.File) string {
|
||||
cutoff := f.Package
|
||||
if f.Doc != nil {
|
||||
cutoff = f.Doc.Pos()
|
||||
}
|
||||
var out []string
|
||||
for _, cmt := range f.Comments {
|
||||
if cmt.Pos() >= cutoff {
|
||||
break
|
||||
}
|
||||
out = append(out, cmt.Text())
|
||||
}
|
||||
return strings.Join(out, "\n")
|
||||
}
|
||||
|
||||
func Inspect(node ast.Node, fn func(node ast.Node) bool) {
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
ast.Inspect(node, fn)
|
||||
}
|
||||
|
||||
func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec {
|
||||
if len(specs) == 0 {
|
||||
return nil
|
||||
}
|
||||
groups := make([][]ast.Spec, 1)
|
||||
groups[0] = append(groups[0], specs[0])
|
||||
|
||||
for _, spec := range specs[1:] {
|
||||
g := groups[len(groups)-1]
|
||||
if fset.PositionFor(spec.Pos(), false).Line-1 !=
|
||||
fset.PositionFor(g[len(g)-1].End(), false).Line {
|
||||
|
||||
groups = append(groups, nil)
|
||||
}
|
||||
|
||||
groups[len(groups)-1] = append(groups[len(groups)-1], spec)
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
func IsObject(obj types.Object, name string) bool {
|
||||
var path string
|
||||
if pkg := obj.Pkg(); pkg != nil {
|
||||
path = pkg.Path() + "."
|
||||
}
|
||||
return path+obj.Name() == name
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Var *types.Var
|
||||
Tag string
|
||||
Path []int
|
||||
}
|
||||
|
||||
// FlattenFields recursively flattens T and embedded structs,
|
||||
// returning a list of fields. If multiple fields with the same name
|
||||
// exist, all will be returned.
|
||||
func FlattenFields(T *types.Struct) []Field {
|
||||
return flattenFields(T, nil, nil)
|
||||
}
|
||||
|
||||
func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field {
|
||||
if seen == nil {
|
||||
seen = map[types.Type]bool{}
|
||||
}
|
||||
if seen[T] {
|
||||
return nil
|
||||
}
|
||||
seen[T] = true
|
||||
var out []Field
|
||||
for i := 0; i < T.NumFields(); i++ {
|
||||
field := T.Field(i)
|
||||
tag := T.Tag(i)
|
||||
np := append(path[:len(path):len(path)], i)
|
||||
if field.Anonymous() {
|
||||
if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok {
|
||||
out = append(out, flattenFields(s, np, seen)...)
|
||||
}
|
||||
} else {
|
||||
out = append(out, Field{field, tag, np})
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func File(pass *analysis.Pass, node lint.Positioner) *ast.File {
|
||||
pass.Fset.PositionFor(node.Pos(), true)
|
||||
m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File)
|
||||
return m[pass.Fset.File(node.Pos())]
|
||||
}
|
||||
|
||||
// IsGenerated reports whether pos is in a generated file, It ignores
|
||||
// //line directives.
|
||||
func IsGenerated(pass *analysis.Pass, pos token.Pos) bool {
|
||||
_, ok := Generator(pass, pos)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Generator returns the generator that generated the file containing
|
||||
// pos. It ignores //line directives.
|
||||
func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) {
|
||||
file := pass.Fset.PositionFor(pos, false).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
g, ok := m[file]
|
||||
return g, ok
|
||||
}
|
||||
|
||||
func ReportfFG(pass *analysis.Pass, pos token.Pos, f string, args ...interface{}) {
|
||||
file := lint.DisplayPosition(pass.Fset, pos).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
if _, ok := m[file]; ok {
|
||||
return
|
||||
}
|
||||
pass.Reportf(pos, f, args...)
|
||||
}
|
||||
|
||||
func ReportNodef(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
pass.Report(analysis.Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg})
|
||||
}
|
||||
|
||||
func ReportNodefFG(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
|
||||
file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
if _, ok := m[file]; ok {
|
||||
return
|
||||
}
|
||||
ReportNodef(pass, node, format, args...)
|
||||
}
|
135
vendor/honnef.co/go/tools/lint/lintutil/format/format.go
vendored
Normal file
135
vendor/honnef.co/go/tools/lint/lintutil/format/format.go
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Package format provides formatters for linter problems.
|
||||
package format
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/tabwriter"
|
||||
|
||||
"honnef.co/go/tools/lint"
|
||||
)
|
||||
|
||||
func shortPath(path string) string {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
|
||||
return rel
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func relativePositionString(pos token.Position) string {
|
||||
s := shortPath(pos.Filename)
|
||||
if pos.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type Statter interface {
|
||||
Stats(total, errors, warnings int)
|
||||
}
|
||||
|
||||
type Formatter interface {
|
||||
Format(p lint.Problem)
|
||||
}
|
||||
|
||||
type Text struct {
|
||||
W io.Writer
|
||||
}
|
||||
|
||||
func (o Text) Format(p lint.Problem) {
|
||||
fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Pos), p.String())
|
||||
}
|
||||
|
||||
type JSON struct {
|
||||
W io.Writer
|
||||
}
|
||||
|
||||
func severity(s lint.Severity) string {
|
||||
switch s {
|
||||
case lint.Error:
|
||||
return "error"
|
||||
case lint.Warning:
|
||||
return "warning"
|
||||
case lint.Ignored:
|
||||
return "ignored"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (o JSON) Format(p lint.Problem) {
|
||||
type location struct {
|
||||
File string `json:"file"`
|
||||
Line int `json:"line"`
|
||||
Column int `json:"column"`
|
||||
}
|
||||
jp := struct {
|
||||
Code string `json:"code"`
|
||||
Severity string `json:"severity,omitempty"`
|
||||
Location location `json:"location"`
|
||||
End location `json:"end"`
|
||||
Message string `json:"message"`
|
||||
}{
|
||||
Code: p.Check,
|
||||
Severity: severity(p.Severity),
|
||||
Location: location{
|
||||
File: p.Pos.Filename,
|
||||
Line: p.Pos.Line,
|
||||
Column: p.Pos.Column,
|
||||
},
|
||||
End: location{
|
||||
File: p.End.Filename,
|
||||
Line: p.End.Line,
|
||||
Column: p.End.Column,
|
||||
},
|
||||
Message: p.Message,
|
||||
}
|
||||
_ = json.NewEncoder(o.W).Encode(jp)
|
||||
}
|
||||
|
||||
type Stylish struct {
|
||||
W io.Writer
|
||||
|
||||
prevFile string
|
||||
tw *tabwriter.Writer
|
||||
}
|
||||
|
||||
func (o *Stylish) Format(p lint.Problem) {
|
||||
pos := p.Pos
|
||||
if pos.Filename == "" {
|
||||
pos.Filename = "-"
|
||||
}
|
||||
|
||||
if pos.Filename != o.prevFile {
|
||||
if o.prevFile != "" {
|
||||
o.tw.Flush()
|
||||
fmt.Fprintln(o.W)
|
||||
}
|
||||
fmt.Fprintln(o.W, pos.Filename)
|
||||
o.prevFile = pos.Filename
|
||||
o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
|
||||
}
|
||||
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message)
|
||||
}
|
||||
|
||||
func (o *Stylish) Stats(total, errors, warnings int) {
|
||||
if o.tw != nil {
|
||||
o.tw.Flush()
|
||||
fmt.Fprintln(o.W)
|
||||
}
|
||||
fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n",
|
||||
total, errors, warnings)
|
||||
}
|
7
vendor/honnef.co/go/tools/lint/lintutil/stats.go
vendored
Normal file
7
vendor/honnef.co/go/tools/lint/lintutil/stats.go
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package lintutil
|
||||
|
||||
import "os"
|
||||
|
||||
var infoSignals = []os.Signal{}
|
10
vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
vendored
Normal file
10
vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package lintutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var infoSignals = []os.Signal{syscall.SIGINFO}
|
10
vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
vendored
Normal file
10
vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build aix android linux solaris
|
||||
|
||||
package lintutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var infoSignals = []os.Signal{syscall.SIGUSR1}
|
392
vendor/honnef.co/go/tools/lint/lintutil/util.go
vendored
Normal file
392
vendor/honnef.co/go/tools/lint/lintutil/util.go
vendored
Normal file
@ -0,0 +1,392 @@
|
||||
// Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd.
|
||||
|
||||
// Package lintutil provides helpers for writing linter command lines.
|
||||
package lintutil // import "honnef.co/go/tools/lint/lintutil"
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/token"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/internal/cache"
|
||||
"honnef.co/go/tools/lint"
|
||||
"honnef.co/go/tools/lint/lintutil/format"
|
||||
"honnef.co/go/tools/version"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/buildutil"
|
||||
"golang.org/x/tools/go/packages"
|
||||
)
|
||||
|
||||
func NewVersionFlag() flag.Getter {
|
||||
tags := build.Default.ReleaseTags
|
||||
v := tags[len(tags)-1][2:]
|
||||
version := new(VersionFlag)
|
||||
if err := version.Set(v); err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", err))
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
type VersionFlag int
|
||||
|
||||
func (v *VersionFlag) String() string {
|
||||
return fmt.Sprintf("1.%d", *v)
|
||||
|
||||
}
|
||||
|
||||
func (v *VersionFlag) Set(s string) error {
|
||||
if len(s) < 3 {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
if s[0] != '1' {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
if s[1] != '.' {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
i, err := strconv.Atoi(s[2:])
|
||||
*v = VersionFlag(i)
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *VersionFlag) Get() interface{} {
|
||||
return int(*v)
|
||||
}
|
||||
|
||||
func usage(name string, flags *flag.FlagSet) func() {
|
||||
return func() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", name)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name)
|
||||
fmt.Fprintf(os.Stderr, "Flags:\n")
|
||||
flags.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
type list []string
|
||||
|
||||
func (list *list) String() string {
|
||||
return `"` + strings.Join(*list, ",") + `"`
|
||||
}
|
||||
|
||||
func (list *list) Set(s string) error {
|
||||
if s == "" {
|
||||
*list = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
*list = strings.Split(s, ",")
|
||||
return nil
|
||||
}
|
||||
|
||||
func FlagSet(name string) *flag.FlagSet {
|
||||
flags := flag.NewFlagSet("", flag.ExitOnError)
|
||||
flags.Usage = usage(name, flags)
|
||||
flags.String("tags", "", "List of `build tags`")
|
||||
flags.Bool("tests", true, "Include tests")
|
||||
flags.Bool("version", false, "Print version and exit")
|
||||
flags.Bool("show-ignored", false, "Don't filter ignored problems")
|
||||
flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
|
||||
flags.String("explain", "", "Print description of `check`")
|
||||
|
||||
flags.String("debug.cpuprofile", "", "Write CPU profile to `file`")
|
||||
flags.String("debug.memprofile", "", "Write memory profile to `file`")
|
||||
flags.Bool("debug.version", false, "Print detailed version information about this program")
|
||||
flags.Bool("debug.no-compile-errors", false, "Don't print compile errors")
|
||||
|
||||
checks := list{"inherit"}
|
||||
fail := list{"all"}
|
||||
flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.")
|
||||
flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.")
|
||||
|
||||
tags := build.Default.ReleaseTags
|
||||
v := tags[len(tags)-1][2:]
|
||||
version := new(VersionFlag)
|
||||
if err := version.Set(v); err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", err))
|
||||
}
|
||||
|
||||
flags.Var(version, "go", "Target Go `version` in the format '1.x'")
|
||||
return flags
|
||||
}
|
||||
|
||||
func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) {
|
||||
for _, c := range cs {
|
||||
if c.Name == check {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) {
|
||||
tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string)
|
||||
tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool)
|
||||
goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int)
|
||||
formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string)
|
||||
printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool)
|
||||
showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool)
|
||||
explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string)
|
||||
|
||||
cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string)
|
||||
memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
|
||||
debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool)
|
||||
debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool)
|
||||
|
||||
cfg := config.Config{}
|
||||
cfg.Checks = *fs.Lookup("checks").Value.(*list)
|
||||
|
||||
exit := func(code int) {
|
||||
if cpuProfile != "" {
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
if memProfile != "" {
|
||||
f, err := os.Create(memProfile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
runtime.GC()
|
||||
pprof.WriteHeapProfile(f)
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
if cpuProfile != "" {
|
||||
f, err := os.Create(cpuProfile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
}
|
||||
|
||||
if debugVersion {
|
||||
version.Verbose()
|
||||
exit(0)
|
||||
}
|
||||
|
||||
if printVersion {
|
||||
version.Print()
|
||||
exit(0)
|
||||
}
|
||||
|
||||
// Validate that the tags argument is well-formed. go/packages
|
||||
// doesn't detect malformed build flags and returns unhelpful
|
||||
// errors.
|
||||
tf := buildutil.TagsFlag{}
|
||||
if err := tf.Set(tags); err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", tags, err))
|
||||
exit(1)
|
||||
}
|
||||
|
||||
if explain != "" {
|
||||
var haystack []*analysis.Analyzer
|
||||
haystack = append(haystack, cs...)
|
||||
for _, cum := range cums {
|
||||
haystack = append(haystack, cum.Analyzer())
|
||||
}
|
||||
check, ok := findCheck(haystack, explain)
|
||||
if !ok {
|
||||
fmt.Fprintln(os.Stderr, "Couldn't find check", explain)
|
||||
exit(1)
|
||||
}
|
||||
if check.Doc == "" {
|
||||
fmt.Fprintln(os.Stderr, explain, "has no documentation")
|
||||
exit(1)
|
||||
}
|
||||
fmt.Println(check.Doc)
|
||||
exit(0)
|
||||
}
|
||||
|
||||
ps, err := Lint(cs, cums, fs.Args(), &Options{
|
||||
Tags: tags,
|
||||
LintTests: tests,
|
||||
GoVersion: goVersion,
|
||||
Config: cfg,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
exit(1)
|
||||
}
|
||||
|
||||
var f format.Formatter
|
||||
switch formatter {
|
||||
case "text":
|
||||
f = format.Text{W: os.Stdout}
|
||||
case "stylish":
|
||||
f = &format.Stylish{W: os.Stdout}
|
||||
case "json":
|
||||
f = format.JSON{W: os.Stdout}
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter)
|
||||
exit(2)
|
||||
}
|
||||
|
||||
var (
|
||||
total int
|
||||
errors int
|
||||
warnings int
|
||||
)
|
||||
|
||||
fail := *fs.Lookup("fail").Value.(*list)
|
||||
analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums))
|
||||
copy(analyzers, cs)
|
||||
for _, cum := range cums {
|
||||
analyzers = append(analyzers, cum.Analyzer())
|
||||
}
|
||||
shouldExit := lint.FilterChecks(analyzers, fail)
|
||||
shouldExit["compile"] = true
|
||||
|
||||
total = len(ps)
|
||||
for _, p := range ps {
|
||||
if p.Check == "compile" && debugNoCompile {
|
||||
continue
|
||||
}
|
||||
if p.Severity == lint.Ignored && !showIgnored {
|
||||
continue
|
||||
}
|
||||
if shouldExit[p.Check] {
|
||||
errors++
|
||||
} else {
|
||||
p.Severity = lint.Warning
|
||||
warnings++
|
||||
}
|
||||
f.Format(p)
|
||||
}
|
||||
if f, ok := f.(format.Statter); ok {
|
||||
f.Stats(total, errors, warnings)
|
||||
}
|
||||
if errors > 0 {
|
||||
exit(1)
|
||||
}
|
||||
exit(0)
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Config config.Config
|
||||
|
||||
Tags string
|
||||
LintTests bool
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
func computeSalt() ([]byte, error) {
|
||||
if version.Version != "devel" {
|
||||
return []byte(version.Version), nil
|
||||
}
|
||||
p, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) {
|
||||
salt, err := computeSalt()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not compute salt for cache: %s", err)
|
||||
}
|
||||
cache.SetSalt(salt)
|
||||
|
||||
if opt == nil {
|
||||
opt = &Options{}
|
||||
}
|
||||
|
||||
l := &lint.Linter{
|
||||
Checkers: cs,
|
||||
CumulativeCheckers: cums,
|
||||
GoVersion: opt.GoVersion,
|
||||
Config: opt.Config,
|
||||
}
|
||||
cfg := &packages.Config{}
|
||||
if opt.LintTests {
|
||||
cfg.Tests = true
|
||||
}
|
||||
if opt.Tags != "" {
|
||||
cfg.BuildFlags = append(cfg.BuildFlags, "-tags", opt.Tags)
|
||||
}
|
||||
|
||||
printStats := func() {
|
||||
// Individual stats are read atomically, but overall there
|
||||
// is no synchronisation. For printing rough progress
|
||||
// information, this doesn't matter.
|
||||
switch atomic.LoadUint32(&l.Stats.State) {
|
||||
case lint.StateInitializing:
|
||||
fmt.Fprintln(os.Stderr, "Status: initializing")
|
||||
case lint.StateGraph:
|
||||
fmt.Fprintln(os.Stderr, "Status: loading package graph")
|
||||
case lint.StateProcessing:
|
||||
fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n",
|
||||
atomic.LoadUint32(&l.Stats.ProcessedInitialPackages),
|
||||
atomic.LoadUint32(&l.Stats.InitialPackages),
|
||||
atomic.LoadUint32(&l.Stats.ProcessedPackages),
|
||||
atomic.LoadUint32(&l.Stats.TotalPackages),
|
||||
atomic.LoadUint32(&l.Stats.ActiveWorkers),
|
||||
atomic.LoadUint32(&l.Stats.TotalWorkers),
|
||||
atomic.LoadUint32(&l.Stats.Problems),
|
||||
)
|
||||
case lint.StateCumulative:
|
||||
fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers")
|
||||
}
|
||||
}
|
||||
if len(infoSignals) > 0 {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, infoSignals...)
|
||||
defer signal.Stop(ch)
|
||||
go func() {
|
||||
for range ch {
|
||||
printStats()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return l.Lint(cfg, paths)
|
||||
}
|
||||
|
||||
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`)
|
||||
|
||||
func parsePos(pos string) token.Position {
|
||||
if pos == "-" || pos == "" {
|
||||
return token.Position{}
|
||||
}
|
||||
parts := posRe.FindStringSubmatch(pos)
|
||||
if parts == nil {
|
||||
panic(fmt.Sprintf("internal error: malformed position %q", pos))
|
||||
}
|
||||
file := parts[1]
|
||||
line, _ := strconv.Atoi(parts[2])
|
||||
col, _ := strconv.Atoi(parts[3])
|
||||
return token.Position{
|
||||
Filename: file,
|
||||
Line: line,
|
||||
Column: col,
|
||||
}
|
||||
}
|
970
vendor/honnef.co/go/tools/lint/runner.go
vendored
Normal file
970
vendor/honnef.co/go/tools/lint/runner.go
vendored
Normal file
@ -0,0 +1,970 @@
|
||||
package lint
|
||||
|
||||
/*
|
||||
Parallelism
|
||||
|
||||
Runner implements parallel processing of packages by spawning one
|
||||
goroutine per package in the dependency graph, without any semaphores.
|
||||
Each goroutine initially waits on the completion of all of its
|
||||
dependencies, thus establishing correct order of processing. Once all
|
||||
dependencies finish processing, the goroutine will load the package
|
||||
from export data or source – this loading is guarded by a semaphore,
|
||||
sized according to the number of CPU cores. This way, we only have as
|
||||
many packages occupying memory and CPU resources as there are actual
|
||||
cores to process them.
|
||||
|
||||
This combination of unbounded goroutines but bounded package loading
|
||||
means that if we have many parallel, independent subgraphs, they will
|
||||
all execute in parallel, while not wasting resources for long linear
|
||||
chains or trying to process more subgraphs in parallel than the system
|
||||
can handle.
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/go/types/objectpath"
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/internal/cache"
|
||||
"honnef.co/go/tools/loader"
|
||||
)
|
||||
|
||||
// If enabled, abuse of the go/analysis API will lead to panics
|
||||
const sanityCheck = true
|
||||
|
||||
// OPT(dh): for a dependency tree A->B->C->D, if we have cached data
|
||||
// for B, there should be no need to load C and D individually. Go's
|
||||
// export data for B contains all the data we need on types, and our
|
||||
// fact cache could store the union of B, C and D in B.
|
||||
//
|
||||
// This may change unused's behavior, however, as it may observe fewer
|
||||
// interfaces from transitive dependencies.
|
||||
|
||||
type Package struct {
|
||||
dependents uint64
|
||||
|
||||
*packages.Package
|
||||
Imports []*Package
|
||||
initial bool
|
||||
fromSource bool
|
||||
hash string
|
||||
done chan struct{}
|
||||
|
||||
resultsMu sync.Mutex
|
||||
// results maps analyzer IDs to analyzer results
|
||||
results []*result
|
||||
|
||||
cfg *config.Config
|
||||
gen map[string]facts.Generator
|
||||
problems []Problem
|
||||
ignores []Ignore
|
||||
errs []error
|
||||
|
||||
// these slices are indexed by analysis
|
||||
facts []map[types.Object][]analysis.Fact
|
||||
pkgFacts [][]analysis.Fact
|
||||
|
||||
canClearTypes bool
|
||||
}
|
||||
|
||||
func (pkg *Package) decUse() {
|
||||
atomic.AddUint64(&pkg.dependents, ^uint64(0))
|
||||
if atomic.LoadUint64(&pkg.dependents) == 0 {
|
||||
// nobody depends on this package anymore
|
||||
if pkg.canClearTypes {
|
||||
pkg.Types = nil
|
||||
}
|
||||
pkg.facts = nil
|
||||
pkg.pkgFacts = nil
|
||||
|
||||
for _, imp := range pkg.Imports {
|
||||
imp.decUse()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
v interface{}
|
||||
err error
|
||||
ready chan struct{}
|
||||
}
|
||||
|
||||
type Runner struct {
|
||||
ld loader.Loader
|
||||
cache *cache.Cache
|
||||
|
||||
analyzerIDs analyzerIDs
|
||||
|
||||
// limits parallelism of loading packages
|
||||
loadSem chan struct{}
|
||||
|
||||
goVersion int
|
||||
stats *Stats
|
||||
}
|
||||
|
||||
type analyzerIDs struct {
|
||||
m map[*analysis.Analyzer]int
|
||||
}
|
||||
|
||||
func (ids analyzerIDs) get(a *analysis.Analyzer) int {
|
||||
id, ok := ids.m[a]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("no analyzer ID for %s", a.Name))
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
type Fact struct {
|
||||
Path string
|
||||
Fact analysis.Fact
|
||||
}
|
||||
|
||||
type analysisAction struct {
|
||||
analyzer *analysis.Analyzer
|
||||
analyzerID int
|
||||
pkg *Package
|
||||
newPackageFacts []analysis.Fact
|
||||
problems []Problem
|
||||
|
||||
pkgFacts map[*types.Package][]analysis.Fact
|
||||
}
|
||||
|
||||
func (ac *analysisAction) String() string {
|
||||
return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact {
|
||||
out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID]))
|
||||
for obj, facts := range ac.pkg.facts[ac.analyzerID] {
|
||||
for _, fact := range facts {
|
||||
out = append(out, analysis.ObjectFact{
|
||||
Object: obj,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ac *analysisAction) allPackageFacts() []analysis.PackageFact {
|
||||
out := make([]analysis.PackageFact, 0, len(ac.pkgFacts))
|
||||
for pkg, facts := range ac.pkgFacts {
|
||||
for _, fact := range facts {
|
||||
out = append(out, analysis.PackageFact{
|
||||
Package: pkg,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
for _, f := range ac.pkg.facts[ac.analyzerID][obj] {
|
||||
if reflect.TypeOf(f) == reflect.TypeOf(fact) {
|
||||
reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
for _, f := range ac.pkgFacts[pkg] {
|
||||
if reflect.TypeOf(f) == reflect.TypeOf(fact) {
|
||||
reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) exportPackageFact(fact analysis.Fact) {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact)
|
||||
ac.newPackageFacts = append(ac.newPackageFacts, fact)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) {
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pass.Fset, d.Pos),
|
||||
End: DisplayPosition(pass.Fset, d.End),
|
||||
Message: d.Message,
|
||||
Check: pass.Analyzer.Name,
|
||||
}
|
||||
ac.problems = append(ac.problems, p)
|
||||
}
|
||||
|
||||
func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) {
|
||||
ac.pkg.resultsMu.Lock()
|
||||
res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)]
|
||||
if res != nil {
|
||||
ac.pkg.resultsMu.Unlock()
|
||||
<-res.ready
|
||||
return res.v, res.err
|
||||
} else {
|
||||
res = &result{
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res
|
||||
ac.pkg.resultsMu.Unlock()
|
||||
|
||||
defer func() {
|
||||
res.v = ret
|
||||
res.err = err
|
||||
close(res.ready)
|
||||
}()
|
||||
|
||||
pass := new(analysis.Pass)
|
||||
*pass = analysis.Pass{
|
||||
Analyzer: ac.analyzer,
|
||||
Fset: ac.pkg.Fset,
|
||||
Files: ac.pkg.Syntax,
|
||||
// type information may be nil or may be populated. if it is
|
||||
// nil, it will get populated later.
|
||||
Pkg: ac.pkg.Types,
|
||||
TypesInfo: ac.pkg.TypesInfo,
|
||||
TypesSizes: ac.pkg.TypesSizes,
|
||||
ResultOf: map[*analysis.Analyzer]interface{}{},
|
||||
ImportObjectFact: ac.importObjectFact,
|
||||
ImportPackageFact: ac.importPackageFact,
|
||||
ExportObjectFact: ac.exportObjectFact,
|
||||
ExportPackageFact: ac.exportPackageFact,
|
||||
Report: func(d analysis.Diagnostic) {
|
||||
ac.report(pass, d)
|
||||
},
|
||||
AllObjectFacts: ac.allObjectFacts,
|
||||
AllPackageFacts: ac.allPackageFacts,
|
||||
}
|
||||
|
||||
if !ac.pkg.initial {
|
||||
// Don't report problems in dependencies
|
||||
pass.Report = func(analysis.Diagnostic) {}
|
||||
}
|
||||
return r.runAnalysisUser(pass, ac)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) {
|
||||
if len(a.FactTypes) == 0 {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
var facts []Fact
|
||||
// Look in the cache for facts
|
||||
aID, err := passActionID(pkg, a)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
aID = cache.Subkey(aID, "facts")
|
||||
b, _, err := r.cache.GetBytes(aID)
|
||||
if err != nil {
|
||||
// No cached facts, analyse this package like a user-provided one, but ignore diagnostics
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil {
|
||||
// Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics
|
||||
return nil, false
|
||||
}
|
||||
return facts, true
|
||||
}
|
||||
|
||||
type dependencyError struct {
|
||||
dep string
|
||||
err error
|
||||
}
|
||||
|
||||
func (err dependencyError) nested() dependencyError {
|
||||
if o, ok := err.err.(dependencyError); ok {
|
||||
return o.nested()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (err dependencyError) Error() string {
|
||||
if o, ok := err.err.(dependencyError); ok {
|
||||
return o.Error()
|
||||
}
|
||||
return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err)
|
||||
}
|
||||
|
||||
func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction {
|
||||
aid := r.analyzerIDs.get(a)
|
||||
ac := &analysisAction{
|
||||
analyzer: a,
|
||||
analyzerID: aid,
|
||||
pkg: pkg,
|
||||
}
|
||||
|
||||
if len(a.FactTypes) == 0 {
|
||||
return ac
|
||||
}
|
||||
|
||||
// Merge all package facts of dependencies
|
||||
ac.pkgFacts = map[*types.Package][]analysis.Fact{}
|
||||
seen := map[*Package]struct{}{}
|
||||
var dfs func(*Package)
|
||||
dfs = func(pkg *Package) {
|
||||
if _, ok := seen[pkg]; ok {
|
||||
return
|
||||
}
|
||||
seen[pkg] = struct{}{}
|
||||
s := pkg.pkgFacts[aid]
|
||||
ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)]
|
||||
for _, imp := range pkg.Imports {
|
||||
dfs(imp)
|
||||
}
|
||||
}
|
||||
dfs(pkg)
|
||||
|
||||
return ac
|
||||
}
|
||||
|
||||
// analyzes that we always want to run, even if they're not being run
|
||||
// explicitly or as dependencies. these are necessary for the inner
|
||||
// workings of the runner.
|
||||
var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer}
|
||||
|
||||
func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) {
|
||||
if !ac.pkg.fromSource {
|
||||
panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg))
|
||||
}
|
||||
|
||||
// User-provided package, analyse it
|
||||
// First analyze it with dependencies
|
||||
for _, req := range ac.analyzer.Requires {
|
||||
acReq := r.makeAnalysisAction(req, ac.pkg)
|
||||
ret, err := r.runAnalysis(acReq)
|
||||
if err != nil {
|
||||
// We couldn't run a dependency, no point in going on
|
||||
return nil, dependencyError{req.Name, err}
|
||||
}
|
||||
|
||||
pass.ResultOf[req] = ret
|
||||
}
|
||||
|
||||
// Then with this analyzer
|
||||
ret, err := ac.analyzer.Run(pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ac.analyzer.FactTypes) > 0 {
|
||||
// Merge new facts into the package and persist them.
|
||||
var facts []Fact
|
||||
for _, fact := range ac.newPackageFacts {
|
||||
id := r.analyzerIDs.get(ac.analyzer)
|
||||
ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact)
|
||||
facts = append(facts, Fact{"", fact})
|
||||
}
|
||||
for obj, afacts := range ac.pkg.facts[ac.analyzerID] {
|
||||
if obj.Pkg() != ac.pkg.Package.Types {
|
||||
continue
|
||||
}
|
||||
path, err := objectpath.For(obj)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, fact := range afacts {
|
||||
facts = append(facts, Fact{string(path), fact})
|
||||
}
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if err := gob.NewEncoder(buf).Encode(facts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aID, err := passActionID(ac.pkg, ac.analyzer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aID = cache.Subkey(aID, "facts")
|
||||
if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func NewRunner(stats *Stats) (*Runner, error) {
|
||||
cache, err := cache.Default()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Runner{
|
||||
cache: cache,
|
||||
stats: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run loads packages corresponding to patterns and analyses them with
|
||||
// analyzers. It returns the loaded packages, which contain reported
|
||||
// diagnostics as well as extracted ignore directives.
|
||||
//
|
||||
// Note that diagnostics have not been filtered at this point yet, to
|
||||
// accomodate cumulative analyzes that require additional steps to
|
||||
// produce diagnostics.
|
||||
func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) {
|
||||
r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}}
|
||||
id := 0
|
||||
seen := map[*analysis.Analyzer]struct{}{}
|
||||
var dfs func(a *analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if _, ok := seen[a]; ok {
|
||||
return
|
||||
}
|
||||
seen[a] = struct{}{}
|
||||
r.analyzerIDs.m[a] = id
|
||||
id++
|
||||
for _, f := range a.FactTypes {
|
||||
gob.Register(f)
|
||||
}
|
||||
for _, req := range a.Requires {
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
for _, a := range analyzers {
|
||||
if v := a.Flags.Lookup("go"); v != nil {
|
||||
v.Value.Set(fmt.Sprintf("1.%d", r.goVersion))
|
||||
}
|
||||
dfs(a)
|
||||
}
|
||||
for _, a := range injectedAnalyses {
|
||||
dfs(a)
|
||||
}
|
||||
|
||||
var dcfg packages.Config
|
||||
if cfg != nil {
|
||||
dcfg = *cfg
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&r.stats.State, StateGraph)
|
||||
initialPkgs, err := r.ld.Graph(dcfg, patterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer r.cache.Trim()
|
||||
|
||||
var allPkgs []*Package
|
||||
m := map[*packages.Package]*Package{}
|
||||
packages.Visit(initialPkgs, nil, func(l *packages.Package) {
|
||||
m[l] = &Package{
|
||||
Package: l,
|
||||
results: make([]*result, len(r.analyzerIDs.m)),
|
||||
facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)),
|
||||
pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)),
|
||||
done: make(chan struct{}),
|
||||
// every package needs itself
|
||||
dependents: 1,
|
||||
canClearTypes: !hasCumulative,
|
||||
}
|
||||
allPkgs = append(allPkgs, m[l])
|
||||
for i := range m[l].facts {
|
||||
m[l].facts[i] = map[types.Object][]analysis.Fact{}
|
||||
}
|
||||
for _, err := range l.Errors {
|
||||
m[l].errs = append(m[l].errs, err)
|
||||
}
|
||||
for _, v := range l.Imports {
|
||||
m[v].dependents++
|
||||
m[l].Imports = append(m[l].Imports, m[v])
|
||||
}
|
||||
|
||||
m[l].hash, err = packageHash(m[l])
|
||||
if err != nil {
|
||||
m[l].errs = append(m[l].errs, err)
|
||||
}
|
||||
})
|
||||
|
||||
pkgs := make([]*Package, len(initialPkgs))
|
||||
for i, l := range initialPkgs {
|
||||
pkgs[i] = m[l]
|
||||
pkgs[i].initial = true
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs)))
|
||||
atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs)))
|
||||
atomic.StoreUint32(&r.stats.State, StateProcessing)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(allPkgs))
|
||||
r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1))
|
||||
atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem)))
|
||||
for _, pkg := range allPkgs {
|
||||
pkg := pkg
|
||||
go func() {
|
||||
r.processPkg(pkg, analyzers)
|
||||
|
||||
if pkg.initial {
|
||||
atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1)
|
||||
}
|
||||
atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems)))
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`)
|
||||
|
||||
func parsePos(pos string) (token.Position, int, error) {
|
||||
if pos == "-" || pos == "" {
|
||||
return token.Position{}, 0, nil
|
||||
}
|
||||
parts := posRe.FindStringSubmatch(pos)
|
||||
if parts == nil {
|
||||
return token.Position{}, 0, fmt.Errorf("malformed position %q", pos)
|
||||
}
|
||||
file := parts[1]
|
||||
line, _ := strconv.Atoi(parts[2])
|
||||
col, _ := strconv.Atoi(parts[3])
|
||||
return token.Position{
|
||||
Filename: file,
|
||||
Line: line,
|
||||
Column: col,
|
||||
}, len(parts[0]), nil
|
||||
}
|
||||
|
||||
// loadPkg loads a Go package. If the package is in the set of initial
|
||||
// packages, it will be loaded from source, otherwise it will be
|
||||
// loaded from export data. In the case that the package was loaded
|
||||
// from export data, cached facts will also be loaded.
|
||||
//
|
||||
// Currently, only cached facts for this package will be loaded, not
|
||||
// for any of its dependencies.
|
||||
func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
|
||||
if pkg.Types != nil {
|
||||
panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package))
|
||||
}
|
||||
|
||||
// Load type information
|
||||
if pkg.initial {
|
||||
// Load package from source
|
||||
pkg.fromSource = true
|
||||
return r.ld.LoadFromSource(pkg.Package)
|
||||
}
|
||||
|
||||
// Load package from export data
|
||||
if err := r.ld.LoadFromExport(pkg.Package); err != nil {
|
||||
// We asked Go to give us up to date export data, yet
|
||||
// we can't load it. There must be something wrong.
|
||||
//
|
||||
// Attempt loading from source. This should fail (because
|
||||
// otherwise there would be export data); we just want to
|
||||
// get the compile errors. If loading from source succeeds
|
||||
// we discard the result, anyway. Otherwise we'll fail
|
||||
// when trying to reload from export data later.
|
||||
//
|
||||
// FIXME(dh): we no longer reload from export data, so
|
||||
// theoretically we should be able to continue
|
||||
pkg.fromSource = true
|
||||
if err := r.ld.LoadFromSource(pkg.Package); err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure this package can't be imported successfully
|
||||
pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{
|
||||
Pos: "-",
|
||||
Msg: fmt.Sprintf("could not load export data: %s", err),
|
||||
Kind: packages.ParseError,
|
||||
})
|
||||
return fmt.Errorf("could not load export data: %s", err)
|
||||
}
|
||||
|
||||
failed := false
|
||||
seen := make([]bool, len(r.analyzerIDs.m))
|
||||
var dfs func(*analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if seen[r.analyzerIDs.get(a)] {
|
||||
return
|
||||
}
|
||||
seen[r.analyzerIDs.get(a)] = true
|
||||
|
||||
if len(a.FactTypes) > 0 {
|
||||
facts, ok := r.loadCachedFacts(a, pkg)
|
||||
if !ok {
|
||||
failed = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range facts {
|
||||
if f.Path == "" {
|
||||
// This is a package fact
|
||||
pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact)
|
||||
continue
|
||||
}
|
||||
obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path))
|
||||
if err != nil {
|
||||
// Be lenient about these errors. For example, when
|
||||
// analysing io/ioutil from source, we may get a fact
|
||||
// for methods on the devNull type, and objectpath
|
||||
// will happily create a path for them. However, when
|
||||
// we later load io/ioutil from export data, the path
|
||||
// no longer resolves.
|
||||
//
|
||||
// If an exported type embeds the unexported type,
|
||||
// then (part of) the unexported type will become part
|
||||
// of the type information and our path will resolve
|
||||
// again.
|
||||
continue
|
||||
}
|
||||
pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact)
|
||||
}
|
||||
}
|
||||
|
||||
for _, req := range a.Requires {
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
for _, a := range analyzers {
|
||||
dfs(a)
|
||||
}
|
||||
|
||||
if failed {
|
||||
pkg.fromSource = true
|
||||
// XXX we added facts to the maps, we need to get rid of those
|
||||
return r.ld.LoadFromSource(pkg.Package)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type analysisError struct {
|
||||
analyzer *analysis.Analyzer
|
||||
pkg *Package
|
||||
err error
|
||||
}
|
||||
|
||||
func (err analysisError) Error() string {
|
||||
return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err)
|
||||
}
|
||||
|
||||
// processPkg processes a package. This involves loading the package,
|
||||
// either from export data or from source. For packages loaded from
|
||||
// source, the provides analyzers will be run on the package.
|
||||
func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
|
||||
defer func() {
|
||||
// Clear information we no longer need. Make sure to do this
|
||||
// when returning from processPkg so that we clear
|
||||
// dependencies, not just initial packages.
|
||||
pkg.TypesInfo = nil
|
||||
pkg.Syntax = nil
|
||||
pkg.results = nil
|
||||
|
||||
atomic.AddUint32(&r.stats.ProcessedPackages, 1)
|
||||
pkg.decUse()
|
||||
close(pkg.done)
|
||||
}()
|
||||
|
||||
// Ensure all packages have the generated map and config. This is
|
||||
// required by interna of the runner. Analyses that themselves
|
||||
// make use of either have an explicit dependency so that other
|
||||
// runners work correctly, too.
|
||||
analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...)
|
||||
|
||||
if len(pkg.errs) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, imp := range pkg.Imports {
|
||||
<-imp.done
|
||||
if len(imp.errs) > 0 {
|
||||
if imp.initial {
|
||||
// Don't print the error of the dependency since it's
|
||||
// an initial package and we're already printing the
|
||||
// error.
|
||||
pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg))
|
||||
} else {
|
||||
var s string
|
||||
for _, err := range imp.errs {
|
||||
s += "\n\t" + err.Error()
|
||||
}
|
||||
pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if pkg.PkgPath == "unsafe" {
|
||||
pkg.Types = types.Unsafe
|
||||
return
|
||||
}
|
||||
|
||||
r.loadSem <- struct{}{}
|
||||
atomic.AddUint32(&r.stats.ActiveWorkers, 1)
|
||||
defer func() {
|
||||
<-r.loadSem
|
||||
atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0))
|
||||
}()
|
||||
if err := r.loadPkg(pkg, analyzers); err != nil {
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
return
|
||||
}
|
||||
|
||||
// A package's object facts is the union of all of its dependencies.
|
||||
for _, imp := range pkg.Imports {
|
||||
for ai, m := range imp.facts {
|
||||
for obj, facts := range m {
|
||||
pkg.facts[ai][obj] = facts[0:len(facts):len(facts)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !pkg.fromSource {
|
||||
// Nothing left to do for the package.
|
||||
return
|
||||
}
|
||||
|
||||
// Run analyses on initial packages and those missing facts
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(analyzers))
|
||||
errs := make([]error, len(analyzers))
|
||||
var acs []*analysisAction
|
||||
for i, a := range analyzers {
|
||||
i := i
|
||||
a := a
|
||||
ac := r.makeAnalysisAction(a, pkg)
|
||||
acs = append(acs, ac)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Only initial packages and packages with missing
|
||||
// facts will have been loaded from source.
|
||||
if pkg.initial || r.hasFacts(a) {
|
||||
if _, err := r.runAnalysis(ac); err != nil {
|
||||
errs[i] = analysisError{a, pkg, err}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
depErrors := map[dependencyError]int{}
|
||||
for _, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
switch err := err.(type) {
|
||||
case analysisError:
|
||||
switch err := err.err.(type) {
|
||||
case dependencyError:
|
||||
depErrors[err.nested()]++
|
||||
default:
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
}
|
||||
default:
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
}
|
||||
}
|
||||
for err, count := range depErrors {
|
||||
pkg.errs = append(pkg.errs,
|
||||
fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err))
|
||||
}
|
||||
|
||||
// We can't process ignores at this point because `unused` needs
|
||||
// to see more than one package to make its decision.
|
||||
ignores, problems := parseDirectives(pkg.Package)
|
||||
pkg.ignores = append(pkg.ignores, ignores...)
|
||||
pkg.problems = append(pkg.problems, problems...)
|
||||
for _, ac := range acs {
|
||||
pkg.problems = append(pkg.problems, ac.problems...)
|
||||
}
|
||||
|
||||
if pkg.initial {
|
||||
// Only initial packages have these analyzers run, and only
|
||||
// initial packages need these.
|
||||
if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil {
|
||||
pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config)
|
||||
}
|
||||
pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator)
|
||||
}
|
||||
|
||||
// In a previous version of the code, we would throw away all type
|
||||
// information and reload it from export data. That was
|
||||
// nonsensical. The *types.Package doesn't keep any information
|
||||
// live that export data wouldn't also. We only need to discard
|
||||
// the AST and the TypesInfo maps; that happens after we return
|
||||
// from processPkg.
|
||||
}
|
||||
|
||||
// hasFacts reports whether an analysis exports any facts. An analysis
|
||||
// that has a transitive dependency that exports facts is considered
|
||||
// to be exporting facts.
|
||||
func (r *Runner) hasFacts(a *analysis.Analyzer) bool {
|
||||
ret := false
|
||||
seen := make([]bool, len(r.analyzerIDs.m))
|
||||
var dfs func(*analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if seen[r.analyzerIDs.get(a)] {
|
||||
return
|
||||
}
|
||||
seen[r.analyzerIDs.get(a)] = true
|
||||
if len(a.FactTypes) > 0 {
|
||||
ret = true
|
||||
}
|
||||
for _, req := range a.Requires {
|
||||
if ret {
|
||||
break
|
||||
}
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
dfs(a)
|
||||
return ret
|
||||
}
|
||||
|
||||
func parseDirective(s string) (cmd string, args []string) {
|
||||
if !strings.HasPrefix(s, "//lint:") {
|
||||
return "", nil
|
||||
}
|
||||
s = strings.TrimPrefix(s, "//lint:")
|
||||
fields := strings.Split(s, " ")
|
||||
return fields[0], fields[1:]
|
||||
}
|
||||
|
||||
// parseDirectives extracts all linter directives from the source
|
||||
// files of the package. Malformed directives are returned as problems.
|
||||
func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) {
|
||||
var ignores []Ignore
|
||||
var problems []Problem
|
||||
|
||||
for _, f := range pkg.Syntax {
|
||||
found := false
|
||||
commentLoop:
|
||||
for _, cg := range f.Comments {
|
||||
for _, c := range cg.List {
|
||||
if strings.Contains(c.Text, "//lint:") {
|
||||
found = true
|
||||
break commentLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
|
||||
for node, cgs := range cm {
|
||||
for _, cg := range cgs {
|
||||
for _, c := range cg.List {
|
||||
if !strings.HasPrefix(c.Text, "//lint:") {
|
||||
continue
|
||||
}
|
||||
cmd, args := parseDirective(c.Text)
|
||||
switch cmd {
|
||||
case "ignore", "file-ignore":
|
||||
if len(args) < 2 {
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pkg.Fset, c.Pos()),
|
||||
Message: "malformed linter directive; missing the required reason field?",
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
problems = append(problems, p)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
// unknown directive, ignore
|
||||
continue
|
||||
}
|
||||
checks := strings.Split(args[0], ",")
|
||||
pos := DisplayPosition(pkg.Fset, node.Pos())
|
||||
var ig Ignore
|
||||
switch cmd {
|
||||
case "ignore":
|
||||
ig = &LineIgnore{
|
||||
File: pos.Filename,
|
||||
Line: pos.Line,
|
||||
Checks: checks,
|
||||
Pos: c.Pos(),
|
||||
}
|
||||
case "file-ignore":
|
||||
ig = &FileIgnore{
|
||||
File: pos.Filename,
|
||||
Checks: checks,
|
||||
}
|
||||
}
|
||||
ignores = append(ignores, ig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ignores, problems
|
||||
}
|
||||
|
||||
// packageHash computes a package's hash. The hash is based on all Go
|
||||
// files that make up the package, as well as the hashes of imported
|
||||
// packages.
|
||||
func packageHash(pkg *Package) (string, error) {
|
||||
key := cache.NewHash("package hash")
|
||||
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
|
||||
for _, f := range pkg.CompiledGoFiles {
|
||||
h, err := cache.FileHash(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fmt.Fprintf(key, "file %s %x\n", f, h)
|
||||
}
|
||||
|
||||
imps := make([]*Package, len(pkg.Imports))
|
||||
copy(imps, pkg.Imports)
|
||||
sort.Slice(imps, func(i, j int) bool {
|
||||
return imps[i].PkgPath < imps[j].PkgPath
|
||||
})
|
||||
for _, dep := range imps {
|
||||
if dep.PkgPath == "unsafe" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash)
|
||||
}
|
||||
h := key.Sum()
|
||||
return hex.EncodeToString(h[:]), nil
|
||||
}
|
||||
|
||||
// passActionID computes an ActionID for an analysis pass.
|
||||
func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) {
|
||||
key := cache.NewHash("action ID")
|
||||
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
|
||||
fmt.Fprintf(key, "pkghash %s\n", pkg.hash)
|
||||
fmt.Fprintf(key, "analyzer %s\n", analyzer.Name)
|
||||
|
||||
return key.Sum(), nil
|
||||
}
|
20
vendor/honnef.co/go/tools/lint/stats.go
vendored
Normal file
20
vendor/honnef.co/go/tools/lint/stats.go
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package lint
|
||||
|
||||
const (
|
||||
StateInitializing = 0
|
||||
StateGraph = 1
|
||||
StateProcessing = 2
|
||||
StateCumulative = 3
|
||||
)
|
||||
|
||||
type Stats struct {
|
||||
State uint32
|
||||
|
||||
InitialPackages uint32
|
||||
TotalPackages uint32
|
||||
ProcessedPackages uint32
|
||||
ProcessedInitialPackages uint32
|
||||
Problems uint32
|
||||
ActiveWorkers uint32
|
||||
TotalWorkers uint32
|
||||
}
|
Reference in New Issue
Block a user