mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Update to kube v1.17
Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
327fcd1b1b
commit
3af1e26d7c
20
vendor/honnef.co/go/tools/LICENSE
vendored
Normal file
20
vendor/honnef.co/go/tools/LICENSE
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Copyright (c) 2016 Dominik Honnef
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
226
vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY
vendored
Normal file
226
vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY
vendored
Normal file
@ -0,0 +1,226 @@
|
||||
Staticcheck and its related tools make use of third party projects,
|
||||
either by reusing their code, or by statically linking them into
|
||||
resulting binaries. These projects are:
|
||||
|
||||
* The Go Programming Language - https://golang.org/
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
* github.com/BurntSushi/toml - https://github.com/BurntSushi/toml
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
|
||||
* github.com/google/renameio - https://github.com/google/renameio
|
||||
|
||||
Copyright 2018 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
* github.com/kisielk/gotool – https://github.com/kisielk/gotool
|
||||
|
||||
Copyright (c) 2013 Kamil Kisiel <kamil@kamilkisiel.net>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
All the files in this distribution are covered under either the MIT
|
||||
license (see the file LICENSE) except some files mentioned below.
|
||||
|
||||
match.go, match_test.go:
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
* github.com/rogpeppe/go-internal - https://github.com/rogpeppe/go-internal
|
||||
|
||||
Copyright (c) 2018 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
* golang.org/x/mod/module - https://github.com/golang/mod
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
* golang.org/x/tools/go/analysis - https://github.com/golang/tools
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
48
vendor/honnef.co/go/tools/arg/arg.go
vendored
Normal file
48
vendor/honnef.co/go/tools/arg/arg.go
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package arg
|
||||
|
||||
var args = map[string]int{
|
||||
"(*encoding/json.Decoder).Decode.v": 0,
|
||||
"(*encoding/json.Encoder).Encode.v": 0,
|
||||
"(*encoding/xml.Decoder).Decode.v": 0,
|
||||
"(*encoding/xml.Encoder).Encode.v": 0,
|
||||
"(*sync.Pool).Put.x": 0,
|
||||
"(*text/template.Template).Parse.text": 0,
|
||||
"(io.Seeker).Seek.offset": 0,
|
||||
"(time.Time).Sub.u": 0,
|
||||
"append.elems": 1,
|
||||
"append.slice": 0,
|
||||
"bytes.Equal.a": 0,
|
||||
"bytes.Equal.b": 1,
|
||||
"encoding/binary.Write.data": 2,
|
||||
"errors.New.text": 0,
|
||||
"fmt.Fprintf.format": 1,
|
||||
"fmt.Printf.format": 0,
|
||||
"fmt.Sprintf.a[0]": 1,
|
||||
"fmt.Sprintf.format": 0,
|
||||
"json.Marshal.v": 0,
|
||||
"json.Unmarshal.v": 1,
|
||||
"len.v": 0,
|
||||
"make.size[0]": 1,
|
||||
"make.size[1]": 2,
|
||||
"make.t": 0,
|
||||
"net/url.Parse.rawurl": 0,
|
||||
"os.OpenFile.flag": 1,
|
||||
"os/exec.Command.name": 0,
|
||||
"os/signal.Notify.c": 0,
|
||||
"regexp.Compile.expr": 0,
|
||||
"runtime.SetFinalizer.finalizer": 1,
|
||||
"runtime.SetFinalizer.obj": 0,
|
||||
"sort.Sort.data": 0,
|
||||
"time.Parse.layout": 0,
|
||||
"time.Sleep.d": 0,
|
||||
"xml.Marshal.v": 0,
|
||||
"xml.Unmarshal.v": 1,
|
||||
}
|
||||
|
||||
func Arg(name string) int {
|
||||
n, ok := args[name]
|
||||
if !ok {
|
||||
panic("unknown argument " + name)
|
||||
}
|
||||
return n
|
||||
}
|
44
vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
vendored
Normal file
44
vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// staticcheck analyses Go code and makes it better.
|
||||
package main // import "honnef.co/go/tools/cmd/staticcheck"
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/lint"
|
||||
"honnef.co/go/tools/lint/lintutil"
|
||||
"honnef.co/go/tools/simple"
|
||||
"honnef.co/go/tools/staticcheck"
|
||||
"honnef.co/go/tools/stylecheck"
|
||||
"honnef.co/go/tools/unused"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fs := lintutil.FlagSet("staticcheck")
|
||||
wholeProgram := fs.Bool("unused.whole-program", false, "Run unused in whole program mode")
|
||||
debug := fs.String("debug.unused-graph", "", "Write unused's object graph to `file`")
|
||||
fs.Parse(os.Args[1:])
|
||||
|
||||
var cs []*analysis.Analyzer
|
||||
for _, v := range simple.Analyzers {
|
||||
cs = append(cs, v)
|
||||
}
|
||||
for _, v := range staticcheck.Analyzers {
|
||||
cs = append(cs, v)
|
||||
}
|
||||
for _, v := range stylecheck.Analyzers {
|
||||
cs = append(cs, v)
|
||||
}
|
||||
|
||||
u := unused.NewChecker(*wholeProgram)
|
||||
if *debug != "" {
|
||||
f, err := os.OpenFile(*debug, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
u.Debug = f
|
||||
}
|
||||
cums := []lint.CumulativeChecker{u}
|
||||
lintutil.ProcessFlagSet(cs, cums, fs)
|
||||
}
|
224
vendor/honnef.co/go/tools/config/config.go
vendored
Normal file
224
vendor/honnef.co/go/tools/config/config.go
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"golang.org/x/tools/go/analysis"
|
||||
)
|
||||
|
||||
var Analyzer = &analysis.Analyzer{
|
||||
Name: "config",
|
||||
Doc: "loads configuration for the current package tree",
|
||||
Run: func(pass *analysis.Pass) (interface{}, error) {
|
||||
if len(pass.Files) == 0 {
|
||||
cfg := DefaultConfig
|
||||
return &cfg, nil
|
||||
}
|
||||
cache, err := os.UserCacheDir()
|
||||
if err != nil {
|
||||
cache = ""
|
||||
}
|
||||
var path string
|
||||
for _, f := range pass.Files {
|
||||
p := pass.Fset.PositionFor(f.Pos(), true).Filename
|
||||
// FIXME(dh): using strings.HasPrefix isn't technically
|
||||
// correct, but it should be good enough for now.
|
||||
if cache != "" && strings.HasPrefix(p, cache) {
|
||||
// File in the build cache of the standard Go build system
|
||||
continue
|
||||
}
|
||||
path = p
|
||||
break
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
// The package only consists of generated files.
|
||||
cfg := DefaultConfig
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
cfg, err := Load(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading staticcheck.conf: %s", err)
|
||||
}
|
||||
return &cfg, nil
|
||||
},
|
||||
RunDespiteErrors: true,
|
||||
ResultType: reflect.TypeOf((*Config)(nil)),
|
||||
}
|
||||
|
||||
func For(pass *analysis.Pass) *Config {
|
||||
return pass.ResultOf[Analyzer].(*Config)
|
||||
}
|
||||
|
||||
func mergeLists(a, b []string) []string {
|
||||
out := make([]string, 0, len(a)+len(b))
|
||||
for _, el := range b {
|
||||
if el == "inherit" {
|
||||
out = append(out, a...)
|
||||
} else {
|
||||
out = append(out, el)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func normalizeList(list []string) []string {
|
||||
if len(list) > 1 {
|
||||
nlist := make([]string, 0, len(list))
|
||||
nlist = append(nlist, list[0])
|
||||
for i, el := range list[1:] {
|
||||
if el != list[i] {
|
||||
nlist = append(nlist, el)
|
||||
}
|
||||
}
|
||||
list = nlist
|
||||
}
|
||||
|
||||
for _, el := range list {
|
||||
if el == "inherit" {
|
||||
// This should never happen, because the default config
|
||||
// should not use "inherit"
|
||||
panic(`unresolved "inherit"`)
|
||||
}
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func (cfg Config) Merge(ocfg Config) Config {
|
||||
if ocfg.Checks != nil {
|
||||
cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks)
|
||||
}
|
||||
if ocfg.Initialisms != nil {
|
||||
cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms)
|
||||
}
|
||||
if ocfg.DotImportWhitelist != nil {
|
||||
cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist)
|
||||
}
|
||||
if ocfg.HTTPStatusCodeWhitelist != nil {
|
||||
cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
// TODO(dh): this implementation makes it impossible for external
|
||||
// clients to add their own checkers with configuration. At the
|
||||
// moment, we don't really care about that; we don't encourage
|
||||
// that people use this package. In the future, we may. The
|
||||
// obvious solution would be using map[string]interface{}, but
|
||||
// that's obviously subpar.
|
||||
|
||||
Checks []string `toml:"checks"`
|
||||
Initialisms []string `toml:"initialisms"`
|
||||
DotImportWhitelist []string `toml:"dot_import_whitelist"`
|
||||
HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"`
|
||||
}
|
||||
|
||||
func (c Config) String() string {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
fmt.Fprintf(buf, "Checks: %#v\n", c.Checks)
|
||||
fmt.Fprintf(buf, "Initialisms: %#v\n", c.Initialisms)
|
||||
fmt.Fprintf(buf, "DotImportWhitelist: %#v\n", c.DotImportWhitelist)
|
||||
fmt.Fprintf(buf, "HTTPStatusCodeWhitelist: %#v", c.HTTPStatusCodeWhitelist)
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var DefaultConfig = Config{
|
||||
Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016"},
|
||||
Initialisms: []string{
|
||||
"ACL", "API", "ASCII", "CPU", "CSS", "DNS",
|
||||
"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
|
||||
"IP", "JSON", "QPS", "RAM", "RPC", "SLA",
|
||||
"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
|
||||
"UDP", "UI", "GID", "UID", "UUID", "URI",
|
||||
"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
|
||||
"XSS", "SIP", "RTP",
|
||||
},
|
||||
DotImportWhitelist: []string{},
|
||||
HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"},
|
||||
}
|
||||
|
||||
const configName = "staticcheck.conf"
|
||||
|
||||
func parseConfigs(dir string) ([]Config, error) {
|
||||
var out []Config
|
||||
|
||||
// TODO(dh): consider stopping at the GOPATH/module boundary
|
||||
for dir != "" {
|
||||
f, err := os.Open(filepath.Join(dir, configName))
|
||||
if os.IsNotExist(err) {
|
||||
ndir := filepath.Dir(dir)
|
||||
if ndir == dir {
|
||||
break
|
||||
}
|
||||
dir = ndir
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cfg Config
|
||||
_, err = toml.DecodeReader(f, &cfg)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, cfg)
|
||||
ndir := filepath.Dir(dir)
|
||||
if ndir == dir {
|
||||
break
|
||||
}
|
||||
dir = ndir
|
||||
}
|
||||
out = append(out, DefaultConfig)
|
||||
if len(out) < 2 {
|
||||
return out, nil
|
||||
}
|
||||
for i := 0; i < len(out)/2; i++ {
|
||||
out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i]
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func mergeConfigs(confs []Config) Config {
|
||||
if len(confs) == 0 {
|
||||
// This shouldn't happen because we always have at least a
|
||||
// default config.
|
||||
panic("trying to merge zero configs")
|
||||
}
|
||||
if len(confs) == 1 {
|
||||
return confs[0]
|
||||
}
|
||||
conf := confs[0]
|
||||
for _, oconf := range confs[1:] {
|
||||
conf = conf.Merge(oconf)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
func Load(dir string) (Config, error) {
|
||||
confs, err := parseConfigs(dir)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
conf := mergeConfigs(confs)
|
||||
|
||||
conf.Checks = normalizeList(conf.Checks)
|
||||
conf.Initialisms = normalizeList(conf.Initialisms)
|
||||
conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist)
|
||||
conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist)
|
||||
|
||||
return conf, nil
|
||||
}
|
112
vendor/honnef.co/go/tools/deprecated/stdlib.go
vendored
Normal file
112
vendor/honnef.co/go/tools/deprecated/stdlib.go
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
package deprecated
|
||||
|
||||
type Deprecation struct {
|
||||
DeprecatedSince int
|
||||
AlternativeAvailableSince int
|
||||
}
|
||||
|
||||
var Stdlib = map[string]Deprecation{
|
||||
"image/jpeg.Reader": {4, 0},
|
||||
// FIXME(dh): AllowBinary isn't being detected as deprecated
|
||||
// because the comment has a newline right after "Deprecated:"
|
||||
"go/build.AllowBinary": {7, 7},
|
||||
"(archive/zip.FileHeader).CompressedSize": {1, 1},
|
||||
"(archive/zip.FileHeader).UncompressedSize": {1, 1},
|
||||
"(archive/zip.FileHeader).ModifiedTime": {10, 10},
|
||||
"(archive/zip.FileHeader).ModifiedDate": {10, 10},
|
||||
"(*archive/zip.FileHeader).ModTime": {10, 10},
|
||||
"(*archive/zip.FileHeader).SetModTime": {10, 10},
|
||||
"(go/doc.Package).Bugs": {1, 1},
|
||||
"os.SEEK_SET": {7, 7},
|
||||
"os.SEEK_CUR": {7, 7},
|
||||
"os.SEEK_END": {7, 7},
|
||||
"(net.Dialer).Cancel": {7, 7},
|
||||
"runtime.CPUProfile": {9, 0},
|
||||
"compress/flate.ReadError": {6, 6},
|
||||
"compress/flate.WriteError": {6, 6},
|
||||
"path/filepath.HasPrefix": {0, 0},
|
||||
"(net/http.Transport).Dial": {7, 7},
|
||||
"(*net/http.Transport).CancelRequest": {6, 5},
|
||||
"net/http.ErrWriteAfterFlush": {7, 0},
|
||||
"net/http.ErrHeaderTooLong": {8, 0},
|
||||
"net/http.ErrShortBody": {8, 0},
|
||||
"net/http.ErrMissingContentLength": {8, 0},
|
||||
"net/http/httputil.ErrPersistEOF": {0, 0},
|
||||
"net/http/httputil.ErrClosed": {0, 0},
|
||||
"net/http/httputil.ErrPipeline": {0, 0},
|
||||
"net/http/httputil.ServerConn": {0, 0},
|
||||
"net/http/httputil.NewServerConn": {0, 0},
|
||||
"net/http/httputil.ClientConn": {0, 0},
|
||||
"net/http/httputil.NewClientConn": {0, 0},
|
||||
"net/http/httputil.NewProxyClientConn": {0, 0},
|
||||
"(net/http.Request).Cancel": {7, 7},
|
||||
"(text/template/parse.PipeNode).Line": {1, 1},
|
||||
"(text/template/parse.ActionNode).Line": {1, 1},
|
||||
"(text/template/parse.BranchNode).Line": {1, 1},
|
||||
"(text/template/parse.TemplateNode).Line": {1, 1},
|
||||
"database/sql/driver.ColumnConverter": {9, 9},
|
||||
"database/sql/driver.Execer": {8, 8},
|
||||
"database/sql/driver.Queryer": {8, 8},
|
||||
"(database/sql/driver.Conn).Begin": {8, 8},
|
||||
"(database/sql/driver.Stmt).Exec": {8, 8},
|
||||
"(database/sql/driver.Stmt).Query": {8, 8},
|
||||
"syscall.StringByteSlice": {1, 1},
|
||||
"syscall.StringBytePtr": {1, 1},
|
||||
"syscall.StringSlicePtr": {1, 1},
|
||||
"syscall.StringToUTF16": {1, 1},
|
||||
"syscall.StringToUTF16Ptr": {1, 1},
|
||||
"(*regexp.Regexp).Copy": {12, 12},
|
||||
"(archive/tar.Header).Xattrs": {10, 10},
|
||||
"archive/tar.TypeRegA": {11, 1},
|
||||
"go/types.NewInterface": {11, 11},
|
||||
"(*go/types.Interface).Embedded": {11, 11},
|
||||
"go/importer.For": {12, 12},
|
||||
"encoding/json.InvalidUTF8Error": {2, 2},
|
||||
"encoding/json.UnmarshalFieldError": {2, 2},
|
||||
"encoding/csv.ErrTrailingComma": {2, 2},
|
||||
"(encoding/csv.Reader).TrailingComma": {2, 2},
|
||||
"(net.Dialer).DualStack": {12, 12},
|
||||
"net/http.ErrUnexpectedTrailer": {12, 12},
|
||||
"net/http.CloseNotifier": {11, 7},
|
||||
"net/http.ProtocolError": {8, 8},
|
||||
"(crypto/x509.CertificateRequest).Attributes": {5, 3},
|
||||
// This function has no alternative, but also no purpose.
|
||||
"(*crypto/rc4.Cipher).Reset": {12, 0},
|
||||
"(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7},
|
||||
|
||||
// All of these have been deprecated in favour of external libraries
|
||||
"syscall.AttachLsf": {7, 0},
|
||||
"syscall.DetachLsf": {7, 0},
|
||||
"syscall.LsfSocket": {7, 0},
|
||||
"syscall.SetLsfPromisc": {7, 0},
|
||||
"syscall.LsfJump": {7, 0},
|
||||
"syscall.LsfStmt": {7, 0},
|
||||
"syscall.BpfStmt": {7, 0},
|
||||
"syscall.BpfJump": {7, 0},
|
||||
"syscall.BpfBuflen": {7, 0},
|
||||
"syscall.SetBpfBuflen": {7, 0},
|
||||
"syscall.BpfDatalink": {7, 0},
|
||||
"syscall.SetBpfDatalink": {7, 0},
|
||||
"syscall.SetBpfPromisc": {7, 0},
|
||||
"syscall.FlushBpf": {7, 0},
|
||||
"syscall.BpfInterface": {7, 0},
|
||||
"syscall.SetBpfInterface": {7, 0},
|
||||
"syscall.BpfTimeout": {7, 0},
|
||||
"syscall.SetBpfTimeout": {7, 0},
|
||||
"syscall.BpfStats": {7, 0},
|
||||
"syscall.SetBpfImmediate": {7, 0},
|
||||
"syscall.SetBpf": {7, 0},
|
||||
"syscall.CheckBpfVersion": {7, 0},
|
||||
"syscall.BpfHeadercmpl": {7, 0},
|
||||
"syscall.SetBpfHeadercmpl": {7, 0},
|
||||
"syscall.RouteRIB": {8, 0},
|
||||
"syscall.RoutingMessage": {8, 0},
|
||||
"syscall.RouteMessage": {8, 0},
|
||||
"syscall.InterfaceMessage": {8, 0},
|
||||
"syscall.InterfaceAddrMessage": {8, 0},
|
||||
"syscall.ParseRoutingMessage": {8, 0},
|
||||
"syscall.ParseRoutingSockaddr": {8, 0},
|
||||
"InterfaceAnnounceMessage": {7, 0},
|
||||
"InterfaceMulticastAddrMessage": {7, 0},
|
||||
"syscall.FormatMessage": {5, 0},
|
||||
}
|
144
vendor/honnef.co/go/tools/facts/deprecated.go
vendored
Normal file
144
vendor/honnef.co/go/tools/facts/deprecated.go
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
package facts
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
)
|
||||
|
||||
type IsDeprecated struct{ Msg string }
|
||||
|
||||
func (*IsDeprecated) AFact() {}
|
||||
func (d *IsDeprecated) String() string { return "Deprecated: " + d.Msg }
|
||||
|
||||
type DeprecatedResult struct {
|
||||
Objects map[types.Object]*IsDeprecated
|
||||
Packages map[*types.Package]*IsDeprecated
|
||||
}
|
||||
|
||||
var Deprecated = &analysis.Analyzer{
|
||||
Name: "fact_deprecated",
|
||||
Doc: "Mark deprecated objects",
|
||||
Run: deprecated,
|
||||
FactTypes: []analysis.Fact{(*IsDeprecated)(nil)},
|
||||
ResultType: reflect.TypeOf(DeprecatedResult{}),
|
||||
}
|
||||
|
||||
func deprecated(pass *analysis.Pass) (interface{}, error) {
|
||||
var names []*ast.Ident
|
||||
|
||||
extractDeprecatedMessage := func(docs []*ast.CommentGroup) string {
|
||||
for _, doc := range docs {
|
||||
if doc == nil {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(doc.Text(), "\n\n")
|
||||
last := parts[len(parts)-1]
|
||||
if !strings.HasPrefix(last, "Deprecated: ") {
|
||||
continue
|
||||
}
|
||||
alt := last[len("Deprecated: "):]
|
||||
alt = strings.Replace(alt, "\n", " ", -1)
|
||||
return alt
|
||||
}
|
||||
return ""
|
||||
}
|
||||
doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) {
|
||||
alt := extractDeprecatedMessage(docs)
|
||||
if alt == "" {
|
||||
return
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
obj := pass.TypesInfo.ObjectOf(name)
|
||||
pass.ExportObjectFact(obj, &IsDeprecated{alt})
|
||||
}
|
||||
}
|
||||
|
||||
var docs []*ast.CommentGroup
|
||||
for _, f := range pass.Files {
|
||||
docs = append(docs, f.Doc)
|
||||
}
|
||||
if alt := extractDeprecatedMessage(docs); alt != "" {
|
||||
// Don't mark package syscall as deprecated, even though
|
||||
// it is. A lot of people still use it for simple
|
||||
// constants like SIGKILL, and I am not comfortable
|
||||
// telling them to use x/sys for that.
|
||||
if pass.Pkg.Path() != "syscall" {
|
||||
pass.ExportPackageFact(&IsDeprecated{alt})
|
||||
}
|
||||
}
|
||||
|
||||
docs = docs[:0]
|
||||
for _, f := range pass.Files {
|
||||
fn := func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return true
|
||||
}
|
||||
var ret bool
|
||||
switch node := node.(type) {
|
||||
case *ast.GenDecl:
|
||||
switch node.Tok {
|
||||
case token.TYPE, token.CONST, token.VAR:
|
||||
docs = append(docs, node.Doc)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
docs = append(docs, node.Doc)
|
||||
names = []*ast.Ident{node.Name}
|
||||
ret = false
|
||||
case *ast.TypeSpec:
|
||||
docs = append(docs, node.Doc)
|
||||
names = []*ast.Ident{node.Name}
|
||||
ret = true
|
||||
case *ast.ValueSpec:
|
||||
docs = append(docs, node.Doc)
|
||||
names = node.Names
|
||||
ret = false
|
||||
case *ast.File:
|
||||
return true
|
||||
case *ast.StructType:
|
||||
for _, field := range node.Fields.List {
|
||||
doDocs(field.Names, []*ast.CommentGroup{field.Doc})
|
||||
}
|
||||
return false
|
||||
case *ast.InterfaceType:
|
||||
for _, field := range node.Methods.List {
|
||||
doDocs(field.Names, []*ast.CommentGroup{field.Doc})
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
if len(names) == 0 || len(docs) == 0 {
|
||||
return ret
|
||||
}
|
||||
doDocs(names, docs)
|
||||
|
||||
docs = docs[:0]
|
||||
names = nil
|
||||
return ret
|
||||
}
|
||||
ast.Inspect(f, fn)
|
||||
}
|
||||
|
||||
out := DeprecatedResult{
|
||||
Objects: map[types.Object]*IsDeprecated{},
|
||||
Packages: map[*types.Package]*IsDeprecated{},
|
||||
}
|
||||
|
||||
for _, fact := range pass.AllObjectFacts() {
|
||||
out.Objects[fact.Object] = fact.Fact.(*IsDeprecated)
|
||||
}
|
||||
for _, fact := range pass.AllPackageFacts() {
|
||||
out.Packages[fact.Package] = fact.Fact.(*IsDeprecated)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
86
vendor/honnef.co/go/tools/facts/generated.go
vendored
Normal file
86
vendor/honnef.co/go/tools/facts/generated.go
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package facts
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
)
|
||||
|
||||
type Generator int
|
||||
|
||||
// A list of known generators we can detect
|
||||
const (
|
||||
Unknown Generator = iota
|
||||
Goyacc
|
||||
Cgo
|
||||
Stringer
|
||||
)
|
||||
|
||||
var (
|
||||
// used by cgo before Go 1.11
|
||||
oldCgo = []byte("// Created by cgo - DO NOT EDIT")
|
||||
prefix = []byte("// Code generated ")
|
||||
suffix = []byte(" DO NOT EDIT.")
|
||||
nl = []byte("\n")
|
||||
crnl = []byte("\r\n")
|
||||
)
|
||||
|
||||
func isGenerated(path string) (Generator, bool) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
defer f.Close()
|
||||
br := bufio.NewReader(f)
|
||||
for {
|
||||
s, err := br.ReadBytes('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
return 0, false
|
||||
}
|
||||
s = bytes.TrimSuffix(s, crnl)
|
||||
s = bytes.TrimSuffix(s, nl)
|
||||
if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
|
||||
text := string(s[len(prefix) : len(s)-len(suffix)])
|
||||
switch text {
|
||||
case "by goyacc.":
|
||||
return Goyacc, true
|
||||
case "by cmd/cgo;":
|
||||
return Cgo, true
|
||||
}
|
||||
if strings.HasPrefix(text, `by "stringer `) {
|
||||
return Stringer, true
|
||||
}
|
||||
return Unknown, true
|
||||
}
|
||||
if bytes.Equal(s, oldCgo) {
|
||||
return Cgo, true
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
var Generated = &analysis.Analyzer{
|
||||
Name: "isgenerated",
|
||||
Doc: "annotate file names that have been code generated",
|
||||
Run: func(pass *analysis.Pass) (interface{}, error) {
|
||||
m := map[string]Generator{}
|
||||
for _, f := range pass.Files {
|
||||
path := pass.Fset.PositionFor(f.Pos(), false).Filename
|
||||
g, ok := isGenerated(path)
|
||||
if ok {
|
||||
m[path] = g
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
},
|
||||
RunDespiteErrors: true,
|
||||
ResultType: reflect.TypeOf(map[string]Generator{}),
|
||||
}
|
175
vendor/honnef.co/go/tools/facts/purity.go
vendored
Normal file
175
vendor/honnef.co/go/tools/facts/purity.go
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
package facts
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"go/types"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/functions"
|
||||
"honnef.co/go/tools/internal/passes/buildssa"
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
type IsPure struct{}
|
||||
|
||||
func (*IsPure) AFact() {}
|
||||
func (d *IsPure) String() string { return "is pure" }
|
||||
|
||||
type PurityResult map[*types.Func]*IsPure
|
||||
|
||||
var Purity = &analysis.Analyzer{
|
||||
Name: "fact_purity",
|
||||
Doc: "Mark pure functions",
|
||||
Run: purity,
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
FactTypes: []analysis.Fact{(*IsPure)(nil)},
|
||||
ResultType: reflect.TypeOf(PurityResult{}),
|
||||
}
|
||||
|
||||
var pureStdlib = map[string]struct{}{
|
||||
"errors.New": {},
|
||||
"fmt.Errorf": {},
|
||||
"fmt.Sprintf": {},
|
||||
"fmt.Sprint": {},
|
||||
"sort.Reverse": {},
|
||||
"strings.Map": {},
|
||||
"strings.Repeat": {},
|
||||
"strings.Replace": {},
|
||||
"strings.Title": {},
|
||||
"strings.ToLower": {},
|
||||
"strings.ToLowerSpecial": {},
|
||||
"strings.ToTitle": {},
|
||||
"strings.ToTitleSpecial": {},
|
||||
"strings.ToUpper": {},
|
||||
"strings.ToUpperSpecial": {},
|
||||
"strings.Trim": {},
|
||||
"strings.TrimFunc": {},
|
||||
"strings.TrimLeft": {},
|
||||
"strings.TrimLeftFunc": {},
|
||||
"strings.TrimPrefix": {},
|
||||
"strings.TrimRight": {},
|
||||
"strings.TrimRightFunc": {},
|
||||
"strings.TrimSpace": {},
|
||||
"strings.TrimSuffix": {},
|
||||
"(*net/http.Request).WithContext": {},
|
||||
}
|
||||
|
||||
func purity(pass *analysis.Pass) (interface{}, error) {
|
||||
seen := map[*ssa.Function]struct{}{}
|
||||
ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg
|
||||
var check func(ssafn *ssa.Function) (ret bool)
|
||||
check = func(ssafn *ssa.Function) (ret bool) {
|
||||
if ssafn.Object() == nil {
|
||||
// TODO(dh): support closures
|
||||
return false
|
||||
}
|
||||
if pass.ImportObjectFact(ssafn.Object(), new(IsPure)) {
|
||||
return true
|
||||
}
|
||||
if ssafn.Pkg != ssapkg {
|
||||
// Function is in another package but wasn't marked as
|
||||
// pure, ergo it isn't pure
|
||||
return false
|
||||
}
|
||||
// Break recursion
|
||||
if _, ok := seen[ssafn]; ok {
|
||||
return false
|
||||
}
|
||||
|
||||
seen[ssafn] = struct{}{}
|
||||
defer func() {
|
||||
if ret {
|
||||
pass.ExportObjectFact(ssafn.Object(), &IsPure{})
|
||||
}
|
||||
}()
|
||||
|
||||
if functions.IsStub(ssafn) {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, ok := pureStdlib[ssafn.Object().(*types.Func).FullName()]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if ssafn.Signature.Results().Len() == 0 {
|
||||
// A function with no return values is empty or is doing some
|
||||
// work we cannot see (for example because of build tags);
|
||||
// don't consider it pure.
|
||||
return false
|
||||
}
|
||||
|
||||
for _, param := range ssafn.Params {
|
||||
if _, ok := param.Type().Underlying().(*types.Basic); !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if ssafn.Blocks == nil {
|
||||
return false
|
||||
}
|
||||
checkCall := func(common *ssa.CallCommon) bool {
|
||||
if common.IsInvoke() {
|
||||
return false
|
||||
}
|
||||
builtin, ok := common.Value.(*ssa.Builtin)
|
||||
if !ok {
|
||||
if common.StaticCallee() != ssafn {
|
||||
if common.StaticCallee() == nil {
|
||||
return false
|
||||
}
|
||||
if !check(common.StaticCallee()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch builtin.Name() {
|
||||
case "len", "cap", "make", "new":
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
for _, b := range ssafn.Blocks {
|
||||
for _, ins := range b.Instrs {
|
||||
switch ins := ins.(type) {
|
||||
case *ssa.Call:
|
||||
if !checkCall(ins.Common()) {
|
||||
return false
|
||||
}
|
||||
case *ssa.Defer:
|
||||
if !checkCall(&ins.Call) {
|
||||
return false
|
||||
}
|
||||
case *ssa.Select:
|
||||
return false
|
||||
case *ssa.Send:
|
||||
return false
|
||||
case *ssa.Go:
|
||||
return false
|
||||
case *ssa.Panic:
|
||||
return false
|
||||
case *ssa.Store:
|
||||
return false
|
||||
case *ssa.FieldAddr:
|
||||
return false
|
||||
case *ssa.UnOp:
|
||||
if ins.Op == token.MUL || ins.Op == token.AND {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
|
||||
check(ssafn)
|
||||
}
|
||||
|
||||
out := PurityResult{}
|
||||
for _, fact := range pass.AllObjectFacts() {
|
||||
out[fact.Object.(*types.Func)] = fact.Fact.(*IsPure)
|
||||
}
|
||||
return out, nil
|
||||
}
|
24
vendor/honnef.co/go/tools/facts/token.go
vendored
Normal file
24
vendor/honnef.co/go/tools/facts/token.go
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package facts
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
)
|
||||
|
||||
var TokenFile = &analysis.Analyzer{
|
||||
Name: "tokenfileanalyzer",
|
||||
Doc: "creates a mapping of *token.File to *ast.File",
|
||||
Run: func(pass *analysis.Pass) (interface{}, error) {
|
||||
m := map[*token.File]*ast.File{}
|
||||
for _, af := range pass.Files {
|
||||
tf := pass.Fset.File(af.Pos())
|
||||
m[tf] = af
|
||||
}
|
||||
return m, nil
|
||||
},
|
||||
RunDespiteErrors: true,
|
||||
ResultType: reflect.TypeOf(map[*token.File]*ast.File{}),
|
||||
}
|
54
vendor/honnef.co/go/tools/functions/loops.go
vendored
Normal file
54
vendor/honnef.co/go/tools/functions/loops.go
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package functions
|
||||
|
||||
import "honnef.co/go/tools/ssa"
|
||||
|
||||
type Loop struct{ ssa.BlockSet }
|
||||
|
||||
func FindLoops(fn *ssa.Function) []Loop {
|
||||
if fn.Blocks == nil {
|
||||
return nil
|
||||
}
|
||||
tree := fn.DomPreorder()
|
||||
var sets []Loop
|
||||
for _, h := range tree {
|
||||
for _, n := range h.Preds {
|
||||
if !h.Dominates(n) {
|
||||
continue
|
||||
}
|
||||
// n is a back-edge to h
|
||||
// h is the loop header
|
||||
if n == h {
|
||||
set := Loop{}
|
||||
set.Add(n)
|
||||
sets = append(sets, set)
|
||||
continue
|
||||
}
|
||||
set := Loop{}
|
||||
set.Add(h)
|
||||
set.Add(n)
|
||||
for _, b := range allPredsBut(n, h, nil) {
|
||||
set.Add(b)
|
||||
}
|
||||
sets = append(sets, set)
|
||||
}
|
||||
}
|
||||
return sets
|
||||
}
|
||||
|
||||
func allPredsBut(b, but *ssa.BasicBlock, list []*ssa.BasicBlock) []*ssa.BasicBlock {
|
||||
outer:
|
||||
for _, pred := range b.Preds {
|
||||
if pred == but {
|
||||
continue
|
||||
}
|
||||
for _, p := range list {
|
||||
// TODO improve big-o complexity of this function
|
||||
if pred == p {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
list = append(list, pred)
|
||||
list = allPredsBut(pred, but, list)
|
||||
}
|
||||
return list
|
||||
}
|
46
vendor/honnef.co/go/tools/functions/pure.go
vendored
Normal file
46
vendor/honnef.co/go/tools/functions/pure.go
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package functions
|
||||
|
||||
import (
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
func filterDebug(instr []ssa.Instruction) []ssa.Instruction {
|
||||
var out []ssa.Instruction
|
||||
for _, ins := range instr {
|
||||
if _, ok := ins.(*ssa.DebugRef); !ok {
|
||||
out = append(out, ins)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// IsStub reports whether a function is a stub. A function is
|
||||
// considered a stub if it has no instructions or exactly one
|
||||
// instruction, which must be either returning only constant values or
|
||||
// a panic.
|
||||
func IsStub(fn *ssa.Function) bool {
|
||||
if len(fn.Blocks) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(fn.Blocks) > 1 {
|
||||
return false
|
||||
}
|
||||
instrs := filterDebug(fn.Blocks[0].Instrs)
|
||||
if len(instrs) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
switch instrs[0].(type) {
|
||||
case *ssa.Return:
|
||||
// Since this is the only instruction, the return value must
|
||||
// be a constant. We consider all constants as stubs, not just
|
||||
// the zero value. This does not, unfortunately, cover zero
|
||||
// initialised structs, as these cause additional
|
||||
// instructions.
|
||||
return true
|
||||
case *ssa.Panic:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
24
vendor/honnef.co/go/tools/functions/terminates.go
vendored
Normal file
24
vendor/honnef.co/go/tools/functions/terminates.go
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package functions
|
||||
|
||||
import "honnef.co/go/tools/ssa"
|
||||
|
||||
// Terminates reports whether fn is supposed to return, that is if it
|
||||
// has at least one theoretic path that returns from the function.
|
||||
// Explicit panics do not count as terminating.
|
||||
func Terminates(fn *ssa.Function) bool {
|
||||
if fn.Blocks == nil {
|
||||
// assuming that a function terminates is the conservative
|
||||
// choice
|
||||
return true
|
||||
}
|
||||
|
||||
for _, block := range fn.Blocks {
|
||||
if len(block.Instrs) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
27
vendor/honnef.co/go/tools/gcsizes/LICENSE
vendored
Normal file
27
vendor/honnef.co/go/tools/gcsizes/LICENSE
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
46
vendor/honnef.co/go/tools/go/types/typeutil/callee.go
vendored
Normal file
46
vendor/honnef.co/go/tools/go/types/typeutil/callee.go
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
// Callee returns the named target of a function call, if any:
|
||||
// a function, method, builtin, or variable.
|
||||
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
|
||||
var obj types.Object
|
||||
switch fun := astutil.Unparen(call.Fun).(type) {
|
||||
case *ast.Ident:
|
||||
obj = info.Uses[fun] // type, var, builtin, or declared func
|
||||
case *ast.SelectorExpr:
|
||||
if sel, ok := info.Selections[fun]; ok {
|
||||
obj = sel.Obj() // method or field
|
||||
} else {
|
||||
obj = info.Uses[fun.Sel] // qualified identifier?
|
||||
}
|
||||
}
|
||||
if _, ok := obj.(*types.TypeName); ok {
|
||||
return nil // T(x) is a conversion, not a call
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// StaticCallee returns the target (function or method) of a static
|
||||
// function call, if any. It returns nil for calls to builtins.
|
||||
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
|
||||
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
|
||||
return f
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func interfaceMethod(f *types.Func) bool {
|
||||
recv := f.Type().(*types.Signature).Recv()
|
||||
return recv != nil && types.IsInterface(recv.Type())
|
||||
}
|
75
vendor/honnef.co/go/tools/go/types/typeutil/identical.go
vendored
Normal file
75
vendor/honnef.co/go/tools/go/types/typeutil/identical.go
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// Identical reports whether x and y are identical types.
|
||||
// Unlike types.Identical, receivers of Signature types are not ignored.
|
||||
// Unlike types.Identical, interfaces are compared via pointer equality (except for the empty interface, which gets deduplicated).
|
||||
// Unlike types.Identical, structs are compared via pointer equality.
|
||||
func Identical(x, y types.Type) (ret bool) {
|
||||
if !types.Identical(x, y) {
|
||||
return false
|
||||
}
|
||||
|
||||
switch x := x.(type) {
|
||||
case *types.Struct:
|
||||
y, ok := y.(*types.Struct)
|
||||
if !ok {
|
||||
// should be impossible
|
||||
return true
|
||||
}
|
||||
return x == y
|
||||
case *types.Interface:
|
||||
// The issue with interfaces, typeutil.Map and types.Identical
|
||||
//
|
||||
// types.Identical, when comparing two interfaces, only looks at the set
|
||||
// of all methods, not differentiating between implicit (embedded) and
|
||||
// explicit methods.
|
||||
//
|
||||
// When we see the following two types, in source order
|
||||
//
|
||||
// type I1 interface { foo() }
|
||||
// type I2 interface { I1 }
|
||||
//
|
||||
// then we will first correctly process I1 and its underlying type. When
|
||||
// we get to I2, we will see that its underlying type is identical to
|
||||
// that of I1 and not process it again. This, however, means that we will
|
||||
// not record the fact that I2 embeds I1. If only I2 is reachable via the
|
||||
// graph root, then I1 will not be considered used.
|
||||
//
|
||||
// We choose to be lazy and compare interfaces by their
|
||||
// pointers. This will obviously miss identical interfaces,
|
||||
// but this only has a runtime cost, it doesn't affect
|
||||
// correctness.
|
||||
y, ok := y.(*types.Interface)
|
||||
if !ok {
|
||||
// should be impossible
|
||||
return true
|
||||
}
|
||||
if x.NumEmbeddeds() == 0 &&
|
||||
y.NumEmbeddeds() == 0 &&
|
||||
x.NumMethods() == 0 &&
|
||||
y.NumMethods() == 0 {
|
||||
// all truly empty interfaces are the same
|
||||
return true
|
||||
}
|
||||
return x == y
|
||||
case *types.Signature:
|
||||
y, ok := y.(*types.Signature)
|
||||
if !ok {
|
||||
// should be impossible
|
||||
return true
|
||||
}
|
||||
if x.Recv() == y.Recv() {
|
||||
return true
|
||||
}
|
||||
if x.Recv() == nil || y.Recv() == nil {
|
||||
return false
|
||||
}
|
||||
return Identical(x.Recv().Type(), y.Recv().Type())
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
31
vendor/honnef.co/go/tools/go/types/typeutil/imports.go
vendored
Normal file
31
vendor/honnef.co/go/tools/go/types/typeutil/imports.go
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
import "go/types"
|
||||
|
||||
// Dependencies returns all dependencies of the specified packages.
|
||||
//
|
||||
// Dependent packages appear in topological order: if package P imports
|
||||
// package Q, Q appears earlier than P in the result.
|
||||
// The algorithm follows import statements in the order they
|
||||
// appear in the source code, so the result is a total order.
|
||||
//
|
||||
func Dependencies(pkgs ...*types.Package) []*types.Package {
|
||||
var result []*types.Package
|
||||
seen := make(map[*types.Package]bool)
|
||||
var visit func(pkgs []*types.Package)
|
||||
visit = func(pkgs []*types.Package) {
|
||||
for _, p := range pkgs {
|
||||
if !seen[p] {
|
||||
seen[p] = true
|
||||
visit(p.Imports())
|
||||
result = append(result, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
visit(pkgs)
|
||||
return result
|
||||
}
|
319
vendor/honnef.co/go/tools/go/types/typeutil/map.go
vendored
Normal file
319
vendor/honnef.co/go/tools/go/types/typeutil/map.go
vendored
Normal file
@ -0,0 +1,319 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package typeutil defines various utilities for types, such as Map,
|
||||
// a mapping from types.Type to interface{} values.
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Map is a hash-table-based mapping from types (types.Type) to
|
||||
// arbitrary interface{} values. The concrete types that implement
|
||||
// the Type interface are pointers. Since they are not canonicalized,
|
||||
// == cannot be used to check for equivalence, and thus we cannot
|
||||
// simply use a Go map.
|
||||
//
|
||||
// Just as with map[K]V, a nil *Map is a valid empty map.
|
||||
//
|
||||
// Not thread-safe.
|
||||
//
|
||||
// This fork handles Signatures correctly, respecting method
|
||||
// receivers. Furthermore, it doesn't deduplicate interfaces or
|
||||
// structs. Interfaces aren't deduplicated as not to conflate implicit
|
||||
// and explicit methods. Structs aren't deduplicated because we track
|
||||
// fields of each type separately.
|
||||
//
|
||||
type Map struct {
|
||||
hasher Hasher // shared by many Maps
|
||||
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
|
||||
length int // number of map entries
|
||||
}
|
||||
|
||||
// entry is an entry (key/value association) in a hash bucket.
|
||||
type entry struct {
|
||||
key types.Type
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// SetHasher sets the hasher used by Map.
|
||||
//
|
||||
// All Hashers are functionally equivalent but contain internal state
|
||||
// used to cache the results of hashing previously seen types.
|
||||
//
|
||||
// A single Hasher created by MakeHasher() may be shared among many
|
||||
// Maps. This is recommended if the instances have many keys in
|
||||
// common, as it will amortize the cost of hash computation.
|
||||
//
|
||||
// A Hasher may grow without bound as new types are seen. Even when a
|
||||
// type is deleted from the map, the Hasher never shrinks, since other
|
||||
// types in the map may reference the deleted type indirectly.
|
||||
//
|
||||
// Hashers are not thread-safe, and read-only operations such as
|
||||
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
|
||||
// read-lock) is require around all Map operations if a shared
|
||||
// hasher is accessed from multiple threads.
|
||||
//
|
||||
// If SetHasher is not called, the Map will create a private hasher at
|
||||
// the first call to Insert.
|
||||
//
|
||||
func (m *Map) SetHasher(hasher Hasher) {
|
||||
m.hasher = hasher
|
||||
}
|
||||
|
||||
// Delete removes the entry with the given key, if any.
|
||||
// It returns true if the entry was found.
|
||||
//
|
||||
func (m *Map) Delete(key types.Type) bool {
|
||||
if m != nil && m.table != nil {
|
||||
hash := m.hasher.Hash(key)
|
||||
bucket := m.table[hash]
|
||||
for i, e := range bucket {
|
||||
if e.key != nil && Identical(key, e.key) {
|
||||
// We can't compact the bucket as it
|
||||
// would disturb iterators.
|
||||
bucket[i] = entry{}
|
||||
m.length--
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// At returns the map entry for the given key.
|
||||
// The result is nil if the entry is not present.
|
||||
//
|
||||
func (m *Map) At(key types.Type) interface{} {
|
||||
if m != nil && m.table != nil {
|
||||
for _, e := range m.table[m.hasher.Hash(key)] {
|
||||
if e.key != nil && Identical(key, e.key) {
|
||||
return e.value
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set sets the map entry for key to val,
|
||||
// and returns the previous entry, if any.
|
||||
func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
|
||||
if m.table != nil {
|
||||
hash := m.hasher.Hash(key)
|
||||
bucket := m.table[hash]
|
||||
var hole *entry
|
||||
for i, e := range bucket {
|
||||
if e.key == nil {
|
||||
hole = &bucket[i]
|
||||
} else if Identical(key, e.key) {
|
||||
prev = e.value
|
||||
bucket[i].value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if hole != nil {
|
||||
*hole = entry{key, value} // overwrite deleted entry
|
||||
} else {
|
||||
m.table[hash] = append(bucket, entry{key, value})
|
||||
}
|
||||
} else {
|
||||
if m.hasher.memo == nil {
|
||||
m.hasher = MakeHasher()
|
||||
}
|
||||
hash := m.hasher.Hash(key)
|
||||
m.table = map[uint32][]entry{hash: {entry{key, value}}}
|
||||
}
|
||||
|
||||
m.length++
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of map entries.
|
||||
func (m *Map) Len() int {
|
||||
if m != nil {
|
||||
return m.length
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Iterate calls function f on each entry in the map in unspecified order.
|
||||
//
|
||||
// If f should mutate the map, Iterate provides the same guarantees as
|
||||
// Go maps: if f deletes a map entry that Iterate has not yet reached,
|
||||
// f will not be invoked for it, but if f inserts a map entry that
|
||||
// Iterate has not yet reached, whether or not f will be invoked for
|
||||
// it is unspecified.
|
||||
//
|
||||
func (m *Map) Iterate(f func(key types.Type, value interface{})) {
|
||||
if m != nil {
|
||||
for _, bucket := range m.table {
|
||||
for _, e := range bucket {
|
||||
if e.key != nil {
|
||||
f(e.key, e.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Keys returns a new slice containing the set of map keys.
|
||||
// The order is unspecified.
|
||||
func (m *Map) Keys() []types.Type {
|
||||
keys := make([]types.Type, 0, m.Len())
|
||||
m.Iterate(func(key types.Type, _ interface{}) {
|
||||
keys = append(keys, key)
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
func (m *Map) toString(values bool) string {
|
||||
if m == nil {
|
||||
return "{}"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, "{")
|
||||
sep := ""
|
||||
m.Iterate(func(key types.Type, value interface{}) {
|
||||
fmt.Fprint(&buf, sep)
|
||||
sep = ", "
|
||||
fmt.Fprint(&buf, key)
|
||||
if values {
|
||||
fmt.Fprintf(&buf, ": %q", value)
|
||||
}
|
||||
})
|
||||
fmt.Fprint(&buf, "}")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// String returns a string representation of the map's entries.
|
||||
// Values are printed using fmt.Sprintf("%v", v).
|
||||
// Order is unspecified.
|
||||
//
|
||||
func (m *Map) String() string {
|
||||
return m.toString(true)
|
||||
}
|
||||
|
||||
// KeysString returns a string representation of the map's key set.
|
||||
// Order is unspecified.
|
||||
//
|
||||
func (m *Map) KeysString() string {
|
||||
return m.toString(false)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Hasher
|
||||
|
||||
// A Hasher maps each type to its hash value.
|
||||
// For efficiency, a hasher uses memoization; thus its memory
|
||||
// footprint grows monotonically over time.
|
||||
// Hashers are not thread-safe.
|
||||
// Hashers have reference semantics.
|
||||
// Call MakeHasher to create a Hasher.
|
||||
type Hasher struct {
|
||||
memo map[types.Type]uint32
|
||||
}
|
||||
|
||||
// MakeHasher returns a new Hasher instance.
|
||||
func MakeHasher() Hasher {
|
||||
return Hasher{make(map[types.Type]uint32)}
|
||||
}
|
||||
|
||||
// Hash computes a hash value for the given type t such that
|
||||
// Identical(t, t') => Hash(t) == Hash(t').
|
||||
func (h Hasher) Hash(t types.Type) uint32 {
|
||||
hash, ok := h.memo[t]
|
||||
if !ok {
|
||||
hash = h.hashFor(t)
|
||||
h.memo[t] = hash
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// hashString computes the Fowler–Noll–Vo hash of s.
|
||||
func hashString(s string) uint32 {
|
||||
var h uint32
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashFor computes the hash of t.
|
||||
func (h Hasher) hashFor(t types.Type) uint32 {
|
||||
// See Identical for rationale.
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
return uint32(t.Kind())
|
||||
|
||||
case *types.Array:
|
||||
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Slice:
|
||||
return 9049 + 2*h.Hash(t.Elem())
|
||||
|
||||
case *types.Struct:
|
||||
var hash uint32 = 9059
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if f.Anonymous() {
|
||||
hash += 8861
|
||||
}
|
||||
hash += hashString(t.Tag(i))
|
||||
hash += hashString(f.Name()) // (ignore f.Pkg)
|
||||
hash += h.Hash(f.Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Pointer:
|
||||
return 9067 + 2*h.Hash(t.Elem())
|
||||
|
||||
case *types.Signature:
|
||||
var hash uint32 = 9091
|
||||
if t.Variadic() {
|
||||
hash *= 8863
|
||||
}
|
||||
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
|
||||
|
||||
case *types.Interface:
|
||||
var hash uint32 = 9103
|
||||
for i, n := 0, t.NumMethods(); i < n; i++ {
|
||||
// See go/types.identicalMethods for rationale.
|
||||
// Method order is not significant.
|
||||
// Ignore m.Pkg().
|
||||
m := t.Method(i)
|
||||
hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Map:
|
||||
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Chan:
|
||||
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Named:
|
||||
// Not safe with a copying GC; objects may move.
|
||||
return uint32(reflect.ValueOf(t.Obj()).Pointer())
|
||||
|
||||
case *types.Tuple:
|
||||
return h.hashTuple(t)
|
||||
}
|
||||
panic(t)
|
||||
}
|
||||
|
||||
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
|
||||
// See go/types.identicalTypes for rationale.
|
||||
n := tuple.Len()
|
||||
var hash uint32 = 9137 + 2*uint32(n)
|
||||
for i := 0; i < n; i++ {
|
||||
hash += 3 * h.Hash(tuple.At(i).Type())
|
||||
}
|
||||
return hash
|
||||
}
|
72
vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
vendored
Normal file
72
vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements a cache of method sets.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A MethodSetCache records the method set of each type T for which
|
||||
// MethodSet(T) is called so that repeat queries are fast.
|
||||
// The zero value is a ready-to-use cache instance.
|
||||
type MethodSetCache struct {
|
||||
mu sync.Mutex
|
||||
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
|
||||
others map[types.Type]*types.MethodSet // all other types
|
||||
}
|
||||
|
||||
// MethodSet returns the method set of type T. It is thread-safe.
|
||||
//
|
||||
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
|
||||
// Utility functions can thus expose an optional *MethodSetCache
|
||||
// parameter to clients that care about performance.
|
||||
//
|
||||
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
|
||||
if cache == nil {
|
||||
return types.NewMethodSet(T)
|
||||
}
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
switch T := T.(type) {
|
||||
case *types.Named:
|
||||
return cache.lookupNamed(T).value
|
||||
|
||||
case *types.Pointer:
|
||||
if N, ok := T.Elem().(*types.Named); ok {
|
||||
return cache.lookupNamed(N).pointer
|
||||
}
|
||||
}
|
||||
|
||||
// all other types
|
||||
// (The map uses pointer equivalence, not type identity.)
|
||||
mset := cache.others[T]
|
||||
if mset == nil {
|
||||
mset = types.NewMethodSet(T)
|
||||
if cache.others == nil {
|
||||
cache.others = make(map[types.Type]*types.MethodSet)
|
||||
}
|
||||
cache.others[T] = mset
|
||||
}
|
||||
return mset
|
||||
}
|
||||
|
||||
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
|
||||
if cache.named == nil {
|
||||
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
|
||||
}
|
||||
// Avoid recomputing mset(*T) for each distinct Pointer
|
||||
// instance whose underlying type is a named type.
|
||||
msets, ok := cache.named[named]
|
||||
if !ok {
|
||||
msets.value = types.NewMethodSet(named)
|
||||
msets.pointer = types.NewMethodSet(types.NewPointer(named))
|
||||
cache.named[named] = msets
|
||||
}
|
||||
return msets
|
||||
}
|
52
vendor/honnef.co/go/tools/go/types/typeutil/ui.go
vendored
Normal file
52
vendor/honnef.co/go/tools/go/types/typeutil/ui.go
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
// This file defines utilities for user interfaces that display types.
|
||||
|
||||
import "go/types"
|
||||
|
||||
// IntuitiveMethodSet returns the intuitive method set of a type T,
|
||||
// which is the set of methods you can call on an addressable value of
|
||||
// that type.
|
||||
//
|
||||
// The result always contains MethodSet(T), and is exactly MethodSet(T)
|
||||
// for interface types and for pointer-to-concrete types.
|
||||
// For all other concrete types T, the result additionally
|
||||
// contains each method belonging to *T if there is no identically
|
||||
// named method on T itself.
|
||||
//
|
||||
// This corresponds to user intuition about method sets;
|
||||
// this function is intended only for user interfaces.
|
||||
//
|
||||
// The order of the result is as for types.MethodSet(T).
|
||||
//
|
||||
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
|
||||
isPointerToConcrete := func(T types.Type) bool {
|
||||
ptr, ok := T.(*types.Pointer)
|
||||
return ok && !types.IsInterface(ptr.Elem())
|
||||
}
|
||||
|
||||
var result []*types.Selection
|
||||
mset := msets.MethodSet(T)
|
||||
if types.IsInterface(T) || isPointerToConcrete(T) {
|
||||
for i, n := 0, mset.Len(); i < n; i++ {
|
||||
result = append(result, mset.At(i))
|
||||
}
|
||||
} else {
|
||||
// T is some other concrete type.
|
||||
// Report methods of T and *T, preferring those of T.
|
||||
pmset := msets.MethodSet(types.NewPointer(T))
|
||||
for i, n := 0, pmset.Len(); i < n; i++ {
|
||||
meth := pmset.At(i)
|
||||
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
|
||||
meth = m
|
||||
}
|
||||
result = append(result, meth)
|
||||
}
|
||||
|
||||
}
|
||||
return result
|
||||
}
|
474
vendor/honnef.co/go/tools/internal/cache/cache.go
vendored
Normal file
474
vendor/honnef.co/go/tools/internal/cache/cache.go
vendored
Normal file
@ -0,0 +1,474 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cache implements a build artifact cache.
|
||||
//
|
||||
// This package is a slightly modified fork of Go's
|
||||
// cmd/go/internal/cache package.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"honnef.co/go/tools/internal/renameio"
|
||||
)
|
||||
|
||||
// An ActionID is a cache action key, the hash of a complete description of a
|
||||
// repeatable computation (command line, environment variables,
|
||||
// input file contents, executable contents).
|
||||
type ActionID [HashSize]byte
|
||||
|
||||
// An OutputID is a cache output key, the hash of an output of a computation.
|
||||
type OutputID [HashSize]byte
|
||||
|
||||
// A Cache is a package cache, backed by a file system directory tree.
|
||||
type Cache struct {
|
||||
dir string
|
||||
now func() time.Time
|
||||
}
|
||||
|
||||
// Open opens and returns the cache in the given directory.
|
||||
//
|
||||
// It is safe for multiple processes on a single machine to use the
|
||||
// same cache directory in a local file system simultaneously.
|
||||
// They will coordinate using operating system file locks and may
|
||||
// duplicate effort but will not corrupt the cache.
|
||||
//
|
||||
// However, it is NOT safe for multiple processes on different machines
|
||||
// to share a cache directory (for example, if the directory were stored
|
||||
// in a network file system). File locking is notoriously unreliable in
|
||||
// network file systems and may not suffice to protect the cache.
|
||||
//
|
||||
func Open(dir string) (*Cache, error) {
|
||||
info, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")}
|
||||
}
|
||||
for i := 0; i < 256; i++ {
|
||||
name := filepath.Join(dir, fmt.Sprintf("%02x", i))
|
||||
if err := os.MkdirAll(name, 0777); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
c := &Cache{
|
||||
dir: dir,
|
||||
now: time.Now,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// fileName returns the name of the file corresponding to the given id.
|
||||
func (c *Cache) fileName(id [HashSize]byte, key string) string {
|
||||
return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
|
||||
}
|
||||
|
||||
var errMissing = errors.New("cache entry not found")
|
||||
|
||||
const (
|
||||
// action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n"
|
||||
hexSize = HashSize * 2
|
||||
entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
|
||||
)
|
||||
|
||||
// verify controls whether to run the cache in verify mode.
|
||||
// In verify mode, the cache always returns errMissing from Get
|
||||
// but then double-checks in Put that the data being written
|
||||
// exactly matches any existing entry. This provides an easy
|
||||
// way to detect program behavior that would have been different
|
||||
// had the cache entry been returned from Get.
|
||||
//
|
||||
// verify is enabled by setting the environment variable
|
||||
// GODEBUG=gocacheverify=1.
|
||||
var verify = false
|
||||
|
||||
// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
|
||||
var DebugTest = false
|
||||
|
||||
func init() { initEnv() }
|
||||
|
||||
func initEnv() {
|
||||
verify = false
|
||||
debugHash = false
|
||||
debug := strings.Split(os.Getenv("GODEBUG"), ",")
|
||||
for _, f := range debug {
|
||||
if f == "gocacheverify=1" {
|
||||
verify = true
|
||||
}
|
||||
if f == "gocachehash=1" {
|
||||
debugHash = true
|
||||
}
|
||||
if f == "gocachetest=1" {
|
||||
DebugTest = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get looks up the action ID in the cache,
|
||||
// returning the corresponding output ID and file size, if any.
|
||||
// Note that finding an output ID does not guarantee that the
|
||||
// saved file for that output ID is still available.
|
||||
func (c *Cache) Get(id ActionID) (Entry, error) {
|
||||
if verify {
|
||||
return Entry{}, errMissing
|
||||
}
|
||||
return c.get(id)
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
OutputID OutputID
|
||||
Size int64
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
// get is Get but does not respect verify mode, so that Put can use it.
|
||||
func (c *Cache) get(id ActionID) (Entry, error) {
|
||||
missing := func() (Entry, error) {
|
||||
return Entry{}, errMissing
|
||||
}
|
||||
f, err := os.Open(c.fileName(id, "a"))
|
||||
if err != nil {
|
||||
return missing()
|
||||
}
|
||||
defer f.Close()
|
||||
entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
|
||||
if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF {
|
||||
return missing()
|
||||
}
|
||||
if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
|
||||
return missing()
|
||||
}
|
||||
eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
|
||||
eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
|
||||
esize, entry := entry[1:1+20], entry[1+20:]
|
||||
//lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465
|
||||
etime, entry := entry[1:1+20], entry[1+20:]
|
||||
var buf [HashSize]byte
|
||||
if _, err := hex.Decode(buf[:], eid); err != nil || buf != id {
|
||||
return missing()
|
||||
}
|
||||
if _, err := hex.Decode(buf[:], eout); err != nil {
|
||||
return missing()
|
||||
}
|
||||
i := 0
|
||||
for i < len(esize) && esize[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
|
||||
if err != nil || size < 0 {
|
||||
return missing()
|
||||
}
|
||||
i = 0
|
||||
for i < len(etime) && etime[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
|
||||
if err != nil || size < 0 {
|
||||
return missing()
|
||||
}
|
||||
|
||||
c.used(c.fileName(id, "a"))
|
||||
|
||||
return Entry{buf, size, time.Unix(0, tm)}, nil
|
||||
}
|
||||
|
||||
// GetFile looks up the action ID in the cache and returns
|
||||
// the name of the corresponding data file.
|
||||
func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) {
|
||||
entry, err = c.Get(id)
|
||||
if err != nil {
|
||||
return "", Entry{}, err
|
||||
}
|
||||
file = c.OutputFile(entry.OutputID)
|
||||
info, err := os.Stat(file)
|
||||
if err != nil || info.Size() != entry.Size {
|
||||
return "", Entry{}, errMissing
|
||||
}
|
||||
return file, entry, nil
|
||||
}
|
||||
|
||||
// GetBytes looks up the action ID in the cache and returns
|
||||
// the corresponding output bytes.
|
||||
// GetBytes should only be used for data that can be expected to fit in memory.
|
||||
func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) {
|
||||
entry, err := c.Get(id)
|
||||
if err != nil {
|
||||
return nil, entry, err
|
||||
}
|
||||
data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID))
|
||||
if sha256.Sum256(data) != entry.OutputID {
|
||||
return nil, entry, errMissing
|
||||
}
|
||||
return data, entry, nil
|
||||
}
|
||||
|
||||
// OutputFile returns the name of the cache file storing output with the given OutputID.
|
||||
func (c *Cache) OutputFile(out OutputID) string {
|
||||
file := c.fileName(out, "d")
|
||||
c.used(file)
|
||||
return file
|
||||
}
|
||||
|
||||
// Time constants for cache expiration.
|
||||
//
|
||||
// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
|
||||
// to avoid causing many unnecessary inode updates. The mtimes therefore
|
||||
// roughly reflect "time of last use" but may in fact be older by at most an hour.
|
||||
//
|
||||
// We scan the cache for entries to delete at most once per trimInterval (1 day).
|
||||
//
|
||||
// When we do scan the cache, we delete entries that have not been used for
|
||||
// at least trimLimit (5 days). Statistics gathered from a month of usage by
|
||||
// Go developers found that essentially all reuse of cached entries happened
|
||||
// within 5 days of the previous reuse. See golang.org/issue/22990.
|
||||
const (
|
||||
mtimeInterval = 1 * time.Hour
|
||||
trimInterval = 24 * time.Hour
|
||||
trimLimit = 5 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// used makes a best-effort attempt to update mtime on file,
|
||||
// so that mtime reflects cache access time.
|
||||
//
|
||||
// Because the reflection only needs to be approximate,
|
||||
// and to reduce the amount of disk activity caused by using
|
||||
// cache entries, used only updates the mtime if the current
|
||||
// mtime is more than an hour old. This heuristic eliminates
|
||||
// nearly all of the mtime updates that would otherwise happen,
|
||||
// while still keeping the mtimes useful for cache trimming.
|
||||
func (c *Cache) used(file string) {
|
||||
info, err := os.Stat(file)
|
||||
if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
|
||||
return
|
||||
}
|
||||
os.Chtimes(file, c.now(), c.now())
|
||||
}
|
||||
|
||||
// Trim removes old cache entries that are likely not to be reused.
|
||||
func (c *Cache) Trim() {
|
||||
now := c.now()
|
||||
|
||||
// We maintain in dir/trim.txt the time of the last completed cache trim.
|
||||
// If the cache has been trimmed recently enough, do nothing.
|
||||
// This is the common case.
|
||||
data, _ := ioutil.ReadFile(filepath.Join(c.dir, "trim.txt"))
|
||||
t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||
if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval {
|
||||
return
|
||||
}
|
||||
|
||||
// Trim each of the 256 subdirectories.
|
||||
// We subtract an additional mtimeInterval
|
||||
// to account for the imprecision of our "last used" mtimes.
|
||||
cutoff := now.Add(-trimLimit - mtimeInterval)
|
||||
for i := 0; i < 256; i++ {
|
||||
subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
|
||||
c.trimSubdir(subdir, cutoff)
|
||||
}
|
||||
|
||||
// Ignore errors from here: if we don't write the complete timestamp, the
|
||||
// cache will appear older than it is, and we'll trim it again next time.
|
||||
renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())))
|
||||
}
|
||||
|
||||
// trimSubdir trims a single cache subdirectory.
|
||||
func (c *Cache) trimSubdir(subdir string, cutoff time.Time) {
|
||||
// Read all directory entries from subdir before removing
|
||||
// any files, in case removing files invalidates the file offset
|
||||
// in the directory scan. Also, ignore error from f.Readdirnames,
|
||||
// because we don't care about reporting the error and we still
|
||||
// want to process any entries found before the error.
|
||||
f, err := os.Open(subdir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
names, _ := f.Readdirnames(-1)
|
||||
f.Close()
|
||||
|
||||
for _, name := range names {
|
||||
// Remove only cache entries (xxxx-a and xxxx-d).
|
||||
if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
|
||||
continue
|
||||
}
|
||||
entry := filepath.Join(subdir, name)
|
||||
info, err := os.Stat(entry)
|
||||
if err == nil && info.ModTime().Before(cutoff) {
|
||||
os.Remove(entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// putIndexEntry adds an entry to the cache recording that executing the action
|
||||
// with the given id produces an output with the given output id (hash) and size.
|
||||
func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
|
||||
// Note: We expect that for one reason or another it may happen
|
||||
// that repeating an action produces a different output hash
|
||||
// (for example, if the output contains a time stamp or temp dir name).
|
||||
// While not ideal, this is also not a correctness problem, so we
|
||||
// don't make a big deal about it. In particular, we leave the action
|
||||
// cache entries writable specifically so that they can be overwritten.
|
||||
//
|
||||
// Setting GODEBUG=gocacheverify=1 does make a big deal:
|
||||
// in verify mode we are double-checking that the cache entries
|
||||
// are entirely reproducible. As just noted, this may be unrealistic
|
||||
// in some cases but the check is also useful for shaking out real bugs.
|
||||
entry := []byte(fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano()))
|
||||
if verify && allowVerify {
|
||||
old, err := c.get(id)
|
||||
if err == nil && (old.OutputID != out || old.Size != size) {
|
||||
// panic to show stack trace, so we can see what code is generating this cache entry.
|
||||
msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
|
||||
panic(msg)
|
||||
}
|
||||
}
|
||||
file := c.fileName(id, "a")
|
||||
if err := ioutil.WriteFile(file, entry, 0666); err != nil {
|
||||
// TODO(bcmills): This Remove potentially races with another go command writing to file.
|
||||
// Can we eliminate it?
|
||||
os.Remove(file)
|
||||
return err
|
||||
}
|
||||
os.Chtimes(file, c.now(), c.now()) // mainly for tests
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put stores the given output in the cache as the output for the action ID.
|
||||
// It may read file twice. The content of file must not change between the two passes.
|
||||
func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
|
||||
return c.put(id, file, true)
|
||||
}
|
||||
|
||||
// PutNoVerify is like Put but disables the verify check
|
||||
// when GODEBUG=goverifycache=1 is set.
|
||||
// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
|
||||
// like test output containing times and the like.
|
||||
func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
|
||||
return c.put(id, file, false)
|
||||
}
|
||||
|
||||
func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
|
||||
// Compute output ID.
|
||||
h := sha256.New()
|
||||
if _, err := file.Seek(0, 0); err != nil {
|
||||
return OutputID{}, 0, err
|
||||
}
|
||||
size, err := io.Copy(h, file)
|
||||
if err != nil {
|
||||
return OutputID{}, 0, err
|
||||
}
|
||||
var out OutputID
|
||||
h.Sum(out[:0])
|
||||
|
||||
// Copy to cached output file (if not already present).
|
||||
if err := c.copyFile(file, out, size); err != nil {
|
||||
return out, size, err
|
||||
}
|
||||
|
||||
// Add to cache index.
|
||||
return out, size, c.putIndexEntry(id, out, size, allowVerify)
|
||||
}
|
||||
|
||||
// PutBytes stores the given bytes in the cache as the output for the action ID.
|
||||
func (c *Cache) PutBytes(id ActionID, data []byte) error {
|
||||
_, _, err := c.Put(id, bytes.NewReader(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// copyFile copies file into the cache, expecting it to have the given
|
||||
// output ID and size, if that file is not present already.
|
||||
func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
|
||||
name := c.fileName(out, "d")
|
||||
info, err := os.Stat(name)
|
||||
if err == nil && info.Size() == size {
|
||||
// Check hash.
|
||||
if f, err := os.Open(name); err == nil {
|
||||
h := sha256.New()
|
||||
io.Copy(h, f)
|
||||
f.Close()
|
||||
var out2 OutputID
|
||||
h.Sum(out2[:0])
|
||||
if out == out2 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Hash did not match. Fall through and rewrite file.
|
||||
}
|
||||
|
||||
// Copy file to cache directory.
|
||||
mode := os.O_RDWR | os.O_CREATE
|
||||
if err == nil && info.Size() > size { // shouldn't happen but fix in case
|
||||
mode |= os.O_TRUNC
|
||||
}
|
||||
f, err := os.OpenFile(name, mode, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if size == 0 {
|
||||
// File now exists with correct size.
|
||||
// Only one possible zero-length file, so contents are OK too.
|
||||
// Early return here makes sure there's a "last byte" for code below.
|
||||
return nil
|
||||
}
|
||||
|
||||
// From here on, if any of the I/O writing the file fails,
|
||||
// we make a best-effort attempt to truncate the file f
|
||||
// before returning, to avoid leaving bad bytes in the file.
|
||||
|
||||
// Copy file to f, but also into h to double-check hash.
|
||||
if _, err := file.Seek(0, 0); err != nil {
|
||||
f.Truncate(0)
|
||||
return err
|
||||
}
|
||||
h := sha256.New()
|
||||
w := io.MultiWriter(f, h)
|
||||
if _, err := io.CopyN(w, file, size-1); err != nil {
|
||||
f.Truncate(0)
|
||||
return err
|
||||
}
|
||||
// Check last byte before writing it; writing it will make the size match
|
||||
// what other processes expect to find and might cause them to start
|
||||
// using the file.
|
||||
buf := make([]byte, 1)
|
||||
if _, err := file.Read(buf); err != nil {
|
||||
f.Truncate(0)
|
||||
return err
|
||||
}
|
||||
h.Write(buf)
|
||||
sum := h.Sum(nil)
|
||||
if !bytes.Equal(sum, out[:]) {
|
||||
f.Truncate(0)
|
||||
return fmt.Errorf("file content changed underfoot")
|
||||
}
|
||||
|
||||
// Commit cache file entry.
|
||||
if _, err := f.Write(buf); err != nil {
|
||||
f.Truncate(0)
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
// Data might not have been written,
|
||||
// but file may look like it is the right size.
|
||||
// To be extra careful, remove cached file.
|
||||
os.Remove(name)
|
||||
return err
|
||||
}
|
||||
os.Chtimes(name, c.now(), c.now()) // mainly for tests
|
||||
|
||||
return nil
|
||||
}
|
85
vendor/honnef.co/go/tools/internal/cache/default.go
vendored
Normal file
85
vendor/honnef.co/go/tools/internal/cache/default.go
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Default returns the default cache to use.
|
||||
func Default() (*Cache, error) {
|
||||
defaultOnce.Do(initDefaultCache)
|
||||
return defaultCache, defaultDirErr
|
||||
}
|
||||
|
||||
var (
|
||||
defaultOnce sync.Once
|
||||
defaultCache *Cache
|
||||
)
|
||||
|
||||
// cacheREADME is a message stored in a README in the cache directory.
|
||||
// Because the cache lives outside the normal Go trees, we leave the
|
||||
// README as a courtesy to explain where it came from.
|
||||
const cacheREADME = `This directory holds cached build artifacts from staticcheck.
|
||||
`
|
||||
|
||||
// initDefaultCache does the work of finding the default cache
|
||||
// the first time Default is called.
|
||||
func initDefaultCache() {
|
||||
dir := DefaultDir()
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
|
||||
// Best effort.
|
||||
ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666)
|
||||
}
|
||||
|
||||
c, err := Open(dir)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
|
||||
}
|
||||
defaultCache = c
|
||||
}
|
||||
|
||||
var (
|
||||
defaultDirOnce sync.Once
|
||||
defaultDir string
|
||||
defaultDirErr error
|
||||
)
|
||||
|
||||
// DefaultDir returns the effective STATICCHECK_CACHE setting.
|
||||
func DefaultDir() string {
|
||||
// Save the result of the first call to DefaultDir for later use in
|
||||
// initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that
|
||||
// subprocesses will inherit it, but that means initDefaultCache can't
|
||||
// otherwise distinguish between an explicit "off" and a UserCacheDir error.
|
||||
|
||||
defaultDirOnce.Do(func() {
|
||||
defaultDir = os.Getenv("STATICCHECK_CACHE")
|
||||
if filepath.IsAbs(defaultDir) {
|
||||
return
|
||||
}
|
||||
if defaultDir != "" {
|
||||
defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not an absolute path")
|
||||
return
|
||||
}
|
||||
|
||||
// Compute default location.
|
||||
dir, err := os.UserCacheDir()
|
||||
if err != nil {
|
||||
defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not defined and %v", err)
|
||||
return
|
||||
}
|
||||
defaultDir = filepath.Join(dir, "staticcheck")
|
||||
})
|
||||
|
||||
return defaultDir
|
||||
}
|
176
vendor/honnef.co/go/tools/internal/cache/hash.go
vendored
Normal file
176
vendor/honnef.co/go/tools/internal/cache/hash.go
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var debugHash = false // set when GODEBUG=gocachehash=1
|
||||
|
||||
// HashSize is the number of bytes in a hash.
|
||||
const HashSize = 32
|
||||
|
||||
// A Hash provides access to the canonical hash function used to index the cache.
|
||||
// The current implementation uses salted SHA256, but clients must not assume this.
|
||||
type Hash struct {
|
||||
h hash.Hash
|
||||
name string // for debugging
|
||||
buf *bytes.Buffer // for verify
|
||||
}
|
||||
|
||||
// hashSalt is a salt string added to the beginning of every hash
|
||||
// created by NewHash. Using the Staticcheck version makes sure that different
|
||||
// versions of the command do not address the same cache
|
||||
// entries, so that a bug in one version does not affect the execution
|
||||
// of other versions. This salt will result in additional ActionID files
|
||||
// in the cache, but not additional copies of the large output files,
|
||||
// which are still addressed by unsalted SHA256.
|
||||
var hashSalt []byte
|
||||
|
||||
func SetSalt(b []byte) {
|
||||
hashSalt = b
|
||||
}
|
||||
|
||||
// Subkey returns an action ID corresponding to mixing a parent
|
||||
// action ID with a string description of the subkey.
|
||||
func Subkey(parent ActionID, desc string) ActionID {
|
||||
h := sha256.New()
|
||||
h.Write([]byte("subkey:"))
|
||||
h.Write(parent[:])
|
||||
h.Write([]byte(desc))
|
||||
var out ActionID
|
||||
h.Sum(out[:0])
|
||||
if debugHash {
|
||||
fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out)
|
||||
}
|
||||
if verify {
|
||||
hashDebug.Lock()
|
||||
hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc)
|
||||
hashDebug.Unlock()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// NewHash returns a new Hash.
|
||||
// The caller is expected to Write data to it and then call Sum.
|
||||
func NewHash(name string) *Hash {
|
||||
h := &Hash{h: sha256.New(), name: name}
|
||||
if debugHash {
|
||||
fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
|
||||
}
|
||||
h.Write(hashSalt)
|
||||
if verify {
|
||||
h.buf = new(bytes.Buffer)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Write writes data to the running hash.
|
||||
func (h *Hash) Write(b []byte) (int, error) {
|
||||
if debugHash {
|
||||
fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b)
|
||||
}
|
||||
if h.buf != nil {
|
||||
h.buf.Write(b)
|
||||
}
|
||||
return h.h.Write(b)
|
||||
}
|
||||
|
||||
// Sum returns the hash of the data written previously.
|
||||
func (h *Hash) Sum() [HashSize]byte {
|
||||
var out [HashSize]byte
|
||||
h.h.Sum(out[:0])
|
||||
if debugHash {
|
||||
fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out)
|
||||
}
|
||||
if h.buf != nil {
|
||||
hashDebug.Lock()
|
||||
if hashDebug.m == nil {
|
||||
hashDebug.m = make(map[[HashSize]byte]string)
|
||||
}
|
||||
hashDebug.m[out] = h.buf.String()
|
||||
hashDebug.Unlock()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// In GODEBUG=gocacheverify=1 mode,
|
||||
// hashDebug holds the input to every computed hash ID,
|
||||
// so that we can work backward from the ID involved in a
|
||||
// cache entry mismatch to a description of what should be there.
|
||||
var hashDebug struct {
|
||||
sync.Mutex
|
||||
m map[[HashSize]byte]string
|
||||
}
|
||||
|
||||
// reverseHash returns the input used to compute the hash id.
|
||||
func reverseHash(id [HashSize]byte) string {
|
||||
hashDebug.Lock()
|
||||
s := hashDebug.m[id]
|
||||
hashDebug.Unlock()
|
||||
return s
|
||||
}
|
||||
|
||||
var hashFileCache struct {
|
||||
sync.Mutex
|
||||
m map[string][HashSize]byte
|
||||
}
|
||||
|
||||
// FileHash returns the hash of the named file.
|
||||
// It caches repeated lookups for a given file,
|
||||
// and the cache entry for a file can be initialized
|
||||
// using SetFileHash.
|
||||
// The hash used by FileHash is not the same as
|
||||
// the hash used by NewHash.
|
||||
func FileHash(file string) ([HashSize]byte, error) {
|
||||
hashFileCache.Lock()
|
||||
out, ok := hashFileCache.m[file]
|
||||
hashFileCache.Unlock()
|
||||
|
||||
if ok {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
h := sha256.New()
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
if debugHash {
|
||||
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
|
||||
}
|
||||
return [HashSize]byte{}, err
|
||||
}
|
||||
_, err = io.Copy(h, f)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
if debugHash {
|
||||
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
|
||||
}
|
||||
return [HashSize]byte{}, err
|
||||
}
|
||||
h.Sum(out[:0])
|
||||
if debugHash {
|
||||
fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out)
|
||||
}
|
||||
|
||||
SetFileHash(file, out)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SetFileHash sets the hash returned by FileHash for file.
|
||||
func SetFileHash(file string, sum [HashSize]byte) {
|
||||
hashFileCache.Lock()
|
||||
if hashFileCache.m == nil {
|
||||
hashFileCache.m = make(map[string][HashSize]byte)
|
||||
}
|
||||
hashFileCache.m[file] = sum
|
||||
hashFileCache.Unlock()
|
||||
}
|
116
vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go
vendored
Normal file
116
vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package buildssa defines an Analyzer that constructs the SSA
|
||||
// representation of an error-free package and returns the set of all
|
||||
// functions within it. It does not report any diagnostics itself but
|
||||
// may be used as an input to other analyzers.
|
||||
//
|
||||
// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE.
|
||||
package buildssa
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
var Analyzer = &analysis.Analyzer{
|
||||
Name: "buildssa",
|
||||
Doc: "build SSA-form IR for later passes",
|
||||
Run: run,
|
||||
ResultType: reflect.TypeOf(new(SSA)),
|
||||
}
|
||||
|
||||
// SSA provides SSA-form intermediate representation for all the
|
||||
// non-blank source functions in the current package.
|
||||
type SSA struct {
|
||||
Pkg *ssa.Package
|
||||
SrcFuncs []*ssa.Function
|
||||
}
|
||||
|
||||
func run(pass *analysis.Pass) (interface{}, error) {
|
||||
// Plundered from ssautil.BuildPackage.
|
||||
|
||||
// We must create a new Program for each Package because the
|
||||
// analysis API provides no place to hang a Program shared by
|
||||
// all Packages. Consequently, SSA Packages and Functions do not
|
||||
// have a canonical representation across an analysis session of
|
||||
// multiple packages. This is unlikely to be a problem in
|
||||
// practice because the analysis API essentially forces all
|
||||
// packages to be analysed independently, so any given call to
|
||||
// Analysis.Run on a package will see only SSA objects belonging
|
||||
// to a single Program.
|
||||
|
||||
mode := ssa.GlobalDebug
|
||||
|
||||
prog := ssa.NewProgram(pass.Fset, mode)
|
||||
|
||||
// Create SSA packages for all imports.
|
||||
// Order is not significant.
|
||||
created := make(map[*types.Package]bool)
|
||||
var createAll func(pkgs []*types.Package)
|
||||
createAll = func(pkgs []*types.Package) {
|
||||
for _, p := range pkgs {
|
||||
if !created[p] {
|
||||
created[p] = true
|
||||
prog.CreatePackage(p, nil, nil, true)
|
||||
createAll(p.Imports())
|
||||
}
|
||||
}
|
||||
}
|
||||
createAll(pass.Pkg.Imports())
|
||||
|
||||
// Create and build the primary package.
|
||||
ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false)
|
||||
ssapkg.Build()
|
||||
|
||||
// Compute list of source functions, including literals,
|
||||
// in source order.
|
||||
var funcs []*ssa.Function
|
||||
var addAnons func(f *ssa.Function)
|
||||
addAnons = func(f *ssa.Function) {
|
||||
funcs = append(funcs, f)
|
||||
for _, anon := range f.AnonFuncs {
|
||||
addAnons(anon)
|
||||
}
|
||||
}
|
||||
addAnons(ssapkg.Members["init"].(*ssa.Function))
|
||||
for _, f := range pass.Files {
|
||||
for _, decl := range f.Decls {
|
||||
if fdecl, ok := decl.(*ast.FuncDecl); ok {
|
||||
|
||||
// SSA will not build a Function
|
||||
// for a FuncDecl named blank.
|
||||
// That's arguably too strict but
|
||||
// relaxing it would break uniqueness of
|
||||
// names of package members.
|
||||
if fdecl.Name.Name == "_" {
|
||||
continue
|
||||
}
|
||||
|
||||
// (init functions have distinct Func
|
||||
// objects named "init" and distinct
|
||||
// ssa.Functions named "init#1", ...)
|
||||
|
||||
fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func)
|
||||
if fn == nil {
|
||||
panic(fn)
|
||||
}
|
||||
|
||||
f := ssapkg.Prog.FuncValue(fn)
|
||||
if f == nil {
|
||||
panic(fn)
|
||||
}
|
||||
|
||||
addAnons(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &SSA{Pkg: ssapkg, SrcFuncs: funcs}, nil
|
||||
}
|
83
vendor/honnef.co/go/tools/internal/renameio/renameio.go
vendored
Normal file
83
vendor/honnef.co/go/tools/internal/renameio/renameio.go
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package renameio writes files atomically by renaming temporary files.
|
||||
package renameio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const patternSuffix = "*.tmp"
|
||||
|
||||
// Pattern returns a glob pattern that matches the unrenamed temporary files
|
||||
// created when writing to filename.
|
||||
func Pattern(filename string) string {
|
||||
return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix)
|
||||
}
|
||||
|
||||
// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary
|
||||
// file in the same directory as filename, then renames it atomically to the
|
||||
// final name.
|
||||
//
|
||||
// That ensures that the final location, if it exists, is always a complete file.
|
||||
func WriteFile(filename string, data []byte) (err error) {
|
||||
return WriteToFile(filename, bytes.NewReader(data))
|
||||
}
|
||||
|
||||
// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader
|
||||
// instead of a slice.
|
||||
func WriteToFile(filename string, data io.Reader) (err error) {
|
||||
f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+patternSuffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
// Only call os.Remove on f.Name() if we failed to rename it: otherwise,
|
||||
// some other process may have created a new file with the same name after
|
||||
// that.
|
||||
if err != nil {
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(f, data); err != nil {
|
||||
return err
|
||||
}
|
||||
// Sync the file before renaming it: otherwise, after a crash the reader may
|
||||
// observe a 0-length file instead of the actual contents.
|
||||
// See https://golang.org/issue/22397#issuecomment-380831736.
|
||||
if err := f.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var start time.Time
|
||||
for {
|
||||
err := os.Rename(f.Name(), filename)
|
||||
if err == nil || runtime.GOOS != "windows" || !strings.HasSuffix(err.Error(), "Access is denied.") {
|
||||
return err
|
||||
}
|
||||
|
||||
// Windows seems to occasionally trigger spurious "Access is denied" errors
|
||||
// here (see golang.org/issue/31247). We're not sure why. It's probably
|
||||
// worth a little extra latency to avoid propagating the spurious errors.
|
||||
if start.IsZero() {
|
||||
start = time.Now()
|
||||
} else if time.Since(start) >= 500*time.Millisecond {
|
||||
return err
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
}
|
70
vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
vendored
Normal file
70
vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package sharedcheck
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/internal/passes/buildssa"
|
||||
. "honnef.co/go/tools/lint/lintdsl"
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) {
|
||||
for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
|
||||
fn := func(node ast.Node) bool {
|
||||
rng, ok := node.(*ast.RangeStmt)
|
||||
if !ok || !IsBlank(rng.Key) {
|
||||
return true
|
||||
}
|
||||
|
||||
v, _ := ssafn.ValueForExpr(rng.X)
|
||||
|
||||
// Check that we're converting from string to []rune
|
||||
val, _ := v.(*ssa.Convert)
|
||||
if val == nil {
|
||||
return true
|
||||
}
|
||||
Tsrc, ok := val.X.Type().(*types.Basic)
|
||||
if !ok || Tsrc.Kind() != types.String {
|
||||
return true
|
||||
}
|
||||
Tdst, ok := val.Type().(*types.Slice)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
TdstElem, ok := Tdst.Elem().(*types.Basic)
|
||||
if !ok || TdstElem.Kind() != types.Int32 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check that the result of the conversion is only used to
|
||||
// range over
|
||||
refs := val.Referrers()
|
||||
if refs == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Expect two refs: one for obtaining the length of the slice,
|
||||
// one for accessing the elements
|
||||
if len(FilterDebug(*refs)) != 2 {
|
||||
// TODO(dh): right now, we check that only one place
|
||||
// refers to our slice. This will miss cases such as
|
||||
// ranging over the slice twice. Ideally, we'd ensure that
|
||||
// the slice is only used for ranging over (without
|
||||
// accessing the key), but that is harder to do because in
|
||||
// SSA form, ranging over a slice looks like an ordinary
|
||||
// loop with index increments and slice accesses. We'd
|
||||
// have to look at the associated AST node to check that
|
||||
// it's a range statement.
|
||||
return true
|
||||
}
|
||||
|
||||
pass.Reportf(rng.Pos(), "should range over string, not []rune(string)")
|
||||
|
||||
return true
|
||||
}
|
||||
Inspect(ssafn.Syntax(), fn)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
28
vendor/honnef.co/go/tools/lint/LICENSE
vendored
Normal file
28
vendor/honnef.co/go/tools/lint/LICENSE
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2016 Dominik Honnef. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
491
vendor/honnef.co/go/tools/lint/lint.go
vendored
Normal file
491
vendor/honnef.co/go/tools/lint/lint.go
vendored
Normal file
@ -0,0 +1,491 @@
|
||||
// Package lint provides the foundation for tools like staticcheck
|
||||
package lint // import "honnef.co/go/tools/lint"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"honnef.co/go/tools/config"
|
||||
)
|
||||
|
||||
type Documentation struct {
|
||||
Title string
|
||||
Text string
|
||||
Since string
|
||||
NonDefault bool
|
||||
Options []string
|
||||
}
|
||||
|
||||
func (doc *Documentation) String() string {
|
||||
b := &strings.Builder{}
|
||||
fmt.Fprintf(b, "%s\n\n", doc.Title)
|
||||
if doc.Text != "" {
|
||||
fmt.Fprintf(b, "%s\n\n", doc.Text)
|
||||
}
|
||||
fmt.Fprint(b, "Available since\n ")
|
||||
if doc.Since == "" {
|
||||
fmt.Fprint(b, "unreleased")
|
||||
} else {
|
||||
fmt.Fprintf(b, "%s", doc.Since)
|
||||
}
|
||||
if doc.NonDefault {
|
||||
fmt.Fprint(b, ", non-default")
|
||||
}
|
||||
fmt.Fprint(b, "\n")
|
||||
if len(doc.Options) > 0 {
|
||||
fmt.Fprintf(b, "\nOptions\n")
|
||||
for _, opt := range doc.Options {
|
||||
fmt.Fprintf(b, " %s", opt)
|
||||
}
|
||||
fmt.Fprint(b, "\n")
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
type Ignore interface {
|
||||
Match(p Problem) bool
|
||||
}
|
||||
|
||||
type LineIgnore struct {
|
||||
File string
|
||||
Line int
|
||||
Checks []string
|
||||
Matched bool
|
||||
Pos token.Pos
|
||||
}
|
||||
|
||||
func (li *LineIgnore) Match(p Problem) bool {
|
||||
pos := p.Pos
|
||||
if pos.Filename != li.File || pos.Line != li.Line {
|
||||
return false
|
||||
}
|
||||
for _, c := range li.Checks {
|
||||
if m, _ := filepath.Match(c, p.Check); m {
|
||||
li.Matched = true
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (li *LineIgnore) String() string {
|
||||
matched := "not matched"
|
||||
if li.Matched {
|
||||
matched = "matched"
|
||||
}
|
||||
return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
|
||||
}
|
||||
|
||||
type FileIgnore struct {
|
||||
File string
|
||||
Checks []string
|
||||
}
|
||||
|
||||
func (fi *FileIgnore) Match(p Problem) bool {
|
||||
if p.Pos.Filename != fi.File {
|
||||
return false
|
||||
}
|
||||
for _, c := range fi.Checks {
|
||||
if m, _ := filepath.Match(c, p.Check); m {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Severity uint8
|
||||
|
||||
const (
|
||||
Error Severity = iota
|
||||
Warning
|
||||
Ignored
|
||||
)
|
||||
|
||||
// Problem represents a problem in some source code.
|
||||
type Problem struct {
|
||||
Pos token.Position
|
||||
End token.Position
|
||||
Message string
|
||||
Check string
|
||||
Severity Severity
|
||||
}
|
||||
|
||||
func (p *Problem) String() string {
|
||||
return fmt.Sprintf("%s (%s)", p.Message, p.Check)
|
||||
}
|
||||
|
||||
// A Linter lints Go source code.
|
||||
type Linter struct {
|
||||
Checkers []*analysis.Analyzer
|
||||
CumulativeCheckers []CumulativeChecker
|
||||
GoVersion int
|
||||
Config config.Config
|
||||
Stats Stats
|
||||
}
|
||||
|
||||
type CumulativeChecker interface {
|
||||
Analyzer() *analysis.Analyzer
|
||||
Result() []types.Object
|
||||
ProblemObject(*token.FileSet, types.Object) Problem
|
||||
}
|
||||
|
||||
func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) {
|
||||
var allAnalyzers []*analysis.Analyzer
|
||||
allAnalyzers = append(allAnalyzers, l.Checkers...)
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
allAnalyzers = append(allAnalyzers, cum.Analyzer())
|
||||
}
|
||||
|
||||
// The -checks command line flag overrules all configuration
|
||||
// files, which means that for `-checks="foo"`, no check other
|
||||
// than foo can ever be reported to the user. Make use of this
|
||||
// fact to cull the list of analyses we need to run.
|
||||
|
||||
// replace "inherit" with "all", as we don't want to base the
|
||||
// list of all checks on the default configuration, which
|
||||
// disables certain checks.
|
||||
checks := make([]string, len(l.Config.Checks))
|
||||
copy(checks, l.Config.Checks)
|
||||
for i, c := range checks {
|
||||
if c == "inherit" {
|
||||
checks[i] = "all"
|
||||
}
|
||||
}
|
||||
|
||||
allowed := FilterChecks(allAnalyzers, checks)
|
||||
var allowedAnalyzers []*analysis.Analyzer
|
||||
for _, c := range l.Checkers {
|
||||
if allowed[c.Name] {
|
||||
allowedAnalyzers = append(allowedAnalyzers, c)
|
||||
}
|
||||
}
|
||||
hasCumulative := false
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
a := cum.Analyzer()
|
||||
if allowed[a.Name] {
|
||||
hasCumulative = true
|
||||
allowedAnalyzers = append(allowedAnalyzers, a)
|
||||
}
|
||||
}
|
||||
|
||||
r, err := NewRunner(&l.Stats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.goVersion = l.GoVersion
|
||||
|
||||
pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tpkgToPkg := map[*types.Package]*Package{}
|
||||
for _, pkg := range pkgs {
|
||||
tpkgToPkg[pkg.Types] = pkg
|
||||
|
||||
for _, e := range pkg.errs {
|
||||
switch e := e.(type) {
|
||||
case types.Error:
|
||||
p := Problem{
|
||||
Pos: e.Fset.PositionFor(e.Pos, false),
|
||||
Message: e.Msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
case packages.Error:
|
||||
msg := e.Msg
|
||||
if len(msg) != 0 && msg[0] == '\n' {
|
||||
// TODO(dh): See https://github.com/golang/go/issues/32363
|
||||
msg = msg[1:]
|
||||
}
|
||||
|
||||
var pos token.Position
|
||||
if e.Pos == "" {
|
||||
// Under certain conditions (malformed package
|
||||
// declarations, multiple packages in the same
|
||||
// directory), go list emits an error on stderr
|
||||
// instead of JSON. Those errors do not have
|
||||
// associated position information in
|
||||
// go/packages.Error, even though the output on
|
||||
// stderr may contain it.
|
||||
if p, n, err := parsePos(msg); err == nil {
|
||||
if abs, err := filepath.Abs(p.Filename); err == nil {
|
||||
p.Filename = abs
|
||||
}
|
||||
pos = p
|
||||
msg = msg[n+2:]
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
pos, _, err = parsePos(e.Pos)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", e))
|
||||
}
|
||||
}
|
||||
p := Problem{
|
||||
Pos: pos,
|
||||
Message: msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
case scanner.ErrorList:
|
||||
for _, e := range e {
|
||||
p := Problem{
|
||||
Pos: e.Pos,
|
||||
Message: e.Msg,
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
}
|
||||
case error:
|
||||
p := Problem{
|
||||
Pos: token.Position{},
|
||||
Message: e.Error(),
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
pkg.problems = append(pkg.problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&r.stats.State, StateCumulative)
|
||||
var problems []Problem
|
||||
for _, cum := range l.CumulativeCheckers {
|
||||
for _, res := range cum.Result() {
|
||||
pkg := tpkgToPkg[res.Pkg()]
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
if allowedChecks[cum.Analyzer().Name] {
|
||||
pos := DisplayPosition(pkg.Fset, res.Pos())
|
||||
// FIXME(dh): why are we ignoring generated files
|
||||
// here? Surely this is specific to 'unused', not all
|
||||
// cumulative checkers
|
||||
if _, ok := pkg.gen[pos.Filename]; ok {
|
||||
continue
|
||||
}
|
||||
p := cum.ProblemObject(pkg.Fset, res)
|
||||
problems = append(problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
for _, ig := range pkg.ignores {
|
||||
for i := range pkg.problems {
|
||||
p := &pkg.problems[i]
|
||||
if ig.Match(*p) {
|
||||
p.Severity = Ignored
|
||||
}
|
||||
}
|
||||
for i := range problems {
|
||||
p := &problems[i]
|
||||
if ig.Match(*p) {
|
||||
p.Severity = Ignored
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pkg.cfg == nil {
|
||||
// The package failed to load, otherwise we would have a
|
||||
// valid config. Pass through all errors.
|
||||
problems = append(problems, pkg.problems...)
|
||||
} else {
|
||||
for _, p := range pkg.problems {
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
allowedChecks["compile"] = true
|
||||
if allowedChecks[p.Check] {
|
||||
problems = append(problems, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ig := range pkg.ignores {
|
||||
ig, ok := ig.(*LineIgnore)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if ig.Matched {
|
||||
continue
|
||||
}
|
||||
|
||||
couldveMatched := false
|
||||
allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
|
||||
for _, c := range ig.Checks {
|
||||
if !allowedChecks[c] {
|
||||
continue
|
||||
}
|
||||
couldveMatched = true
|
||||
break
|
||||
}
|
||||
|
||||
if !couldveMatched {
|
||||
// The ignored checks were disabled for the containing package.
|
||||
// Don't flag the ignore for not having matched.
|
||||
continue
|
||||
}
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pkg.Fset, ig.Pos),
|
||||
Message: "this linter directive didn't match anything; should it be removed?",
|
||||
Check: "",
|
||||
}
|
||||
problems = append(problems, p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(problems) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sort.Slice(problems, func(i, j int) bool {
|
||||
pi := problems[i].Pos
|
||||
pj := problems[j].Pos
|
||||
|
||||
if pi.Filename != pj.Filename {
|
||||
return pi.Filename < pj.Filename
|
||||
}
|
||||
if pi.Line != pj.Line {
|
||||
return pi.Line < pj.Line
|
||||
}
|
||||
if pi.Column != pj.Column {
|
||||
return pi.Column < pj.Column
|
||||
}
|
||||
|
||||
return problems[i].Message < problems[j].Message
|
||||
})
|
||||
|
||||
var out []Problem
|
||||
out = append(out, problems[0])
|
||||
for i, p := range problems[1:] {
|
||||
// We may encounter duplicate problems because one file
|
||||
// can be part of many packages.
|
||||
if problems[i] != p {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool {
|
||||
// OPT(dh): this entire computation could be cached per package
|
||||
allowedChecks := map[string]bool{}
|
||||
|
||||
for _, check := range checks {
|
||||
b := true
|
||||
if len(check) > 1 && check[0] == '-' {
|
||||
b = false
|
||||
check = check[1:]
|
||||
}
|
||||
if check == "*" || check == "all" {
|
||||
// Match all
|
||||
for _, c := range allChecks {
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
} else if strings.HasSuffix(check, "*") {
|
||||
// Glob
|
||||
prefix := check[:len(check)-1]
|
||||
isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
|
||||
|
||||
for _, c := range allChecks {
|
||||
idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) })
|
||||
if isCat {
|
||||
// Glob is S*, which should match S1000 but not SA1000
|
||||
cat := c.Name[:idx]
|
||||
if prefix == cat {
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
} else {
|
||||
// Glob is S1*
|
||||
if strings.HasPrefix(c.Name, prefix) {
|
||||
allowedChecks[c.Name] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Literal check name
|
||||
allowedChecks[check] = b
|
||||
}
|
||||
}
|
||||
return allowedChecks
|
||||
}
|
||||
|
||||
type Positioner interface {
|
||||
Pos() token.Pos
|
||||
}
|
||||
|
||||
func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
|
||||
if p == token.NoPos {
|
||||
return token.Position{}
|
||||
}
|
||||
|
||||
// Only use the adjusted position if it points to another Go file.
|
||||
// This means we'll point to the original file for cgo files, but
|
||||
// we won't point to a YACC grammar file.
|
||||
pos := fset.PositionFor(p, false)
|
||||
adjPos := fset.PositionFor(p, true)
|
||||
|
||||
if filepath.Ext(adjPos.Filename) == ".go" {
|
||||
return adjPos
|
||||
}
|
||||
return pos
|
||||
}
|
||||
|
||||
var bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf.Grow(64)
|
||||
return buf
|
||||
},
|
||||
}
|
||||
|
||||
func FuncName(f *types.Func) string {
|
||||
buf := bufferPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
if f.Type() != nil {
|
||||
sig := f.Type().(*types.Signature)
|
||||
if recv := sig.Recv(); recv != nil {
|
||||
buf.WriteByte('(')
|
||||
if _, ok := recv.Type().(*types.Interface); ok {
|
||||
// gcimporter creates abstract methods of
|
||||
// named interfaces using the interface type
|
||||
// (not the named type) as the receiver.
|
||||
// Don't print it in full.
|
||||
buf.WriteString("interface")
|
||||
} else {
|
||||
types.WriteType(buf, recv.Type(), nil)
|
||||
}
|
||||
buf.WriteByte(')')
|
||||
buf.WriteByte('.')
|
||||
} else if f.Pkg() != nil {
|
||||
writePackage(buf, f.Pkg())
|
||||
}
|
||||
}
|
||||
buf.WriteString(f.Name())
|
||||
s := buf.String()
|
||||
bufferPool.Put(buf)
|
||||
return s
|
||||
}
|
||||
|
||||
func writePackage(buf *bytes.Buffer, pkg *types.Package) {
|
||||
if pkg == nil {
|
||||
return
|
||||
}
|
||||
s := pkg.Path()
|
||||
if s != "" {
|
||||
buf.WriteString(s)
|
||||
buf.WriteByte('.')
|
||||
}
|
||||
}
|
400
vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
vendored
Normal file
400
vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
vendored
Normal file
@ -0,0 +1,400 @@
|
||||
// Package lintdsl provides helpers for implementing static analysis
|
||||
// checks. Dot-importing this package is encouraged.
|
||||
package lintdsl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/lint"
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
type packager interface {
|
||||
Package() *ssa.Package
|
||||
}
|
||||
|
||||
func CallName(call *ssa.CallCommon) string {
|
||||
if call.IsInvoke() {
|
||||
return ""
|
||||
}
|
||||
switch v := call.Value.(type) {
|
||||
case *ssa.Function:
|
||||
fn, ok := v.Object().(*types.Func)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return lint.FuncName(fn)
|
||||
case *ssa.Builtin:
|
||||
return v.Name()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name }
|
||||
func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name }
|
||||
|
||||
func FilterDebug(instr []ssa.Instruction) []ssa.Instruction {
|
||||
var out []ssa.Instruction
|
||||
for _, ins := range instr {
|
||||
if _, ok := ins.(*ssa.DebugRef); !ok {
|
||||
out = append(out, ins)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func IsExample(fn *ssa.Function) bool {
|
||||
if !strings.HasPrefix(fn.Name(), "Example") {
|
||||
return false
|
||||
}
|
||||
f := fn.Prog.Fset.File(fn.Pos())
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
return strings.HasSuffix(f.Name(), "_test.go")
|
||||
}
|
||||
|
||||
func IsPointerLike(T types.Type) bool {
|
||||
switch T := T.Underlying().(type) {
|
||||
case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer:
|
||||
return true
|
||||
case *types.Basic:
|
||||
return T.Kind() == types.UnsafePointer
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IsIdent(expr ast.Expr, ident string) bool {
|
||||
id, ok := expr.(*ast.Ident)
|
||||
return ok && id.Name == ident
|
||||
}
|
||||
|
||||
// isBlank returns whether id is the blank identifier "_".
|
||||
// If id == nil, the answer is false.
|
||||
func IsBlank(id ast.Expr) bool {
|
||||
ident, _ := id.(*ast.Ident)
|
||||
return ident != nil && ident.Name == "_"
|
||||
}
|
||||
|
||||
func IsIntLiteral(expr ast.Expr, literal string) bool {
|
||||
lit, ok := expr.(*ast.BasicLit)
|
||||
return ok && lit.Kind == token.INT && lit.Value == literal
|
||||
}
|
||||
|
||||
// Deprecated: use IsIntLiteral instead
|
||||
func IsZero(expr ast.Expr) bool {
|
||||
return IsIntLiteral(expr, "0")
|
||||
}
|
||||
|
||||
func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool {
|
||||
return IsType(pass.TypesInfo.TypeOf(expr), name)
|
||||
}
|
||||
|
||||
func IsInTest(pass *analysis.Pass, node lint.Positioner) bool {
|
||||
// FIXME(dh): this doesn't work for global variables with
|
||||
// initializers
|
||||
f := pass.Fset.File(node.Pos())
|
||||
return f != nil && strings.HasSuffix(f.Name(), "_test.go")
|
||||
}
|
||||
|
||||
func IsInMain(pass *analysis.Pass, node lint.Positioner) bool {
|
||||
if node, ok := node.(packager); ok {
|
||||
return node.Package().Pkg.Name() == "main"
|
||||
}
|
||||
return pass.Pkg.Name() == "main"
|
||||
}
|
||||
|
||||
func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string {
|
||||
info := pass.TypesInfo
|
||||
sel := info.Selections[expr]
|
||||
if sel == nil {
|
||||
if x, ok := expr.X.(*ast.Ident); ok {
|
||||
pkg, ok := info.ObjectOf(x).(*types.PkgName)
|
||||
if !ok {
|
||||
// This shouldn't happen
|
||||
return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name)
|
||||
}
|
||||
panic(fmt.Sprintf("unsupported selector: %v", expr))
|
||||
}
|
||||
return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
|
||||
}
|
||||
|
||||
func IsNil(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
return pass.TypesInfo.Types[expr].IsNil()
|
||||
}
|
||||
|
||||
func BoolConst(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
|
||||
return constant.BoolVal(val)
|
||||
}
|
||||
|
||||
func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool {
|
||||
// We explicitly don't support typed bools because more often than
|
||||
// not, custom bool types are used as binary enums and the
|
||||
// explicit comparison is desired.
|
||||
|
||||
ident, ok := expr.(*ast.Ident)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
obj := pass.TypesInfo.ObjectOf(ident)
|
||||
c, ok := obj.(*types.Const)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
basic, ok := c.Type().(*types.Basic)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) {
|
||||
tv := pass.TypesInfo.Types[expr]
|
||||
if tv.Value == nil {
|
||||
return 0, false
|
||||
}
|
||||
if tv.Value.Kind() != constant.Int {
|
||||
return 0, false
|
||||
}
|
||||
return constant.Int64Val(tv.Value)
|
||||
}
|
||||
|
||||
func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) {
|
||||
val := pass.TypesInfo.Types[expr].Value
|
||||
if val == nil {
|
||||
return "", false
|
||||
}
|
||||
if val.Kind() != constant.String {
|
||||
return "", false
|
||||
}
|
||||
return constant.StringVal(val), true
|
||||
}
|
||||
|
||||
// Dereference returns a pointer's element type; otherwise it returns
|
||||
// T.
|
||||
func Dereference(T types.Type) types.Type {
|
||||
if p, ok := T.Underlying().(*types.Pointer); ok {
|
||||
return p.Elem()
|
||||
}
|
||||
return T
|
||||
}
|
||||
|
||||
// DereferenceR returns a pointer's element type; otherwise it returns
|
||||
// T. If the element type is itself a pointer, DereferenceR will be
|
||||
// applied recursively.
|
||||
func DereferenceR(T types.Type) types.Type {
|
||||
if p, ok := T.Underlying().(*types.Pointer); ok {
|
||||
return DereferenceR(p.Elem())
|
||||
}
|
||||
return T
|
||||
}
|
||||
|
||||
func IsGoVersion(pass *analysis.Pass, minor int) bool {
|
||||
version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int)
|
||||
return version >= minor
|
||||
}
|
||||
|
||||
func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string {
|
||||
switch fun := call.Fun.(type) {
|
||||
case *ast.SelectorExpr:
|
||||
fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return lint.FuncName(fn)
|
||||
case *ast.Ident:
|
||||
obj := pass.TypesInfo.ObjectOf(fun)
|
||||
switch obj := obj.(type) {
|
||||
case *types.Func:
|
||||
return lint.FuncName(obj)
|
||||
case *types.Builtin:
|
||||
return obj.Name()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool {
|
||||
call, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return CallNameAST(pass, call) == name
|
||||
}
|
||||
|
||||
func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool {
|
||||
for _, name := range names {
|
||||
if IsCallToAST(pass, node, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Render(pass *analysis.Pass, x interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
if err := printer.Fprint(&buf, pass.Fset, x); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func RenderArgs(pass *analysis.Pass, args []ast.Expr) string {
|
||||
var ss []string
|
||||
for _, arg := range args {
|
||||
ss = append(ss, Render(pass, arg))
|
||||
}
|
||||
return strings.Join(ss, ", ")
|
||||
}
|
||||
|
||||
func Preamble(f *ast.File) string {
|
||||
cutoff := f.Package
|
||||
if f.Doc != nil {
|
||||
cutoff = f.Doc.Pos()
|
||||
}
|
||||
var out []string
|
||||
for _, cmt := range f.Comments {
|
||||
if cmt.Pos() >= cutoff {
|
||||
break
|
||||
}
|
||||
out = append(out, cmt.Text())
|
||||
}
|
||||
return strings.Join(out, "\n")
|
||||
}
|
||||
|
||||
func Inspect(node ast.Node, fn func(node ast.Node) bool) {
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
ast.Inspect(node, fn)
|
||||
}
|
||||
|
||||
func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec {
|
||||
if len(specs) == 0 {
|
||||
return nil
|
||||
}
|
||||
groups := make([][]ast.Spec, 1)
|
||||
groups[0] = append(groups[0], specs[0])
|
||||
|
||||
for _, spec := range specs[1:] {
|
||||
g := groups[len(groups)-1]
|
||||
if fset.PositionFor(spec.Pos(), false).Line-1 !=
|
||||
fset.PositionFor(g[len(g)-1].End(), false).Line {
|
||||
|
||||
groups = append(groups, nil)
|
||||
}
|
||||
|
||||
groups[len(groups)-1] = append(groups[len(groups)-1], spec)
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
func IsObject(obj types.Object, name string) bool {
|
||||
var path string
|
||||
if pkg := obj.Pkg(); pkg != nil {
|
||||
path = pkg.Path() + "."
|
||||
}
|
||||
return path+obj.Name() == name
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Var *types.Var
|
||||
Tag string
|
||||
Path []int
|
||||
}
|
||||
|
||||
// FlattenFields recursively flattens T and embedded structs,
|
||||
// returning a list of fields. If multiple fields with the same name
|
||||
// exist, all will be returned.
|
||||
func FlattenFields(T *types.Struct) []Field {
|
||||
return flattenFields(T, nil, nil)
|
||||
}
|
||||
|
||||
func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field {
|
||||
if seen == nil {
|
||||
seen = map[types.Type]bool{}
|
||||
}
|
||||
if seen[T] {
|
||||
return nil
|
||||
}
|
||||
seen[T] = true
|
||||
var out []Field
|
||||
for i := 0; i < T.NumFields(); i++ {
|
||||
field := T.Field(i)
|
||||
tag := T.Tag(i)
|
||||
np := append(path[:len(path):len(path)], i)
|
||||
if field.Anonymous() {
|
||||
if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok {
|
||||
out = append(out, flattenFields(s, np, seen)...)
|
||||
}
|
||||
} else {
|
||||
out = append(out, Field{field, tag, np})
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func File(pass *analysis.Pass, node lint.Positioner) *ast.File {
|
||||
pass.Fset.PositionFor(node.Pos(), true)
|
||||
m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File)
|
||||
return m[pass.Fset.File(node.Pos())]
|
||||
}
|
||||
|
||||
// IsGenerated reports whether pos is in a generated file, It ignores
|
||||
// //line directives.
|
||||
func IsGenerated(pass *analysis.Pass, pos token.Pos) bool {
|
||||
_, ok := Generator(pass, pos)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Generator returns the generator that generated the file containing
|
||||
// pos. It ignores //line directives.
|
||||
func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) {
|
||||
file := pass.Fset.PositionFor(pos, false).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
g, ok := m[file]
|
||||
return g, ok
|
||||
}
|
||||
|
||||
func ReportfFG(pass *analysis.Pass, pos token.Pos, f string, args ...interface{}) {
|
||||
file := lint.DisplayPosition(pass.Fset, pos).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
if _, ok := m[file]; ok {
|
||||
return
|
||||
}
|
||||
pass.Reportf(pos, f, args...)
|
||||
}
|
||||
|
||||
func ReportNodef(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
pass.Report(analysis.Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg})
|
||||
}
|
||||
|
||||
func ReportNodefFG(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) {
|
||||
file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename
|
||||
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
|
||||
if _, ok := m[file]; ok {
|
||||
return
|
||||
}
|
||||
ReportNodef(pass, node, format, args...)
|
||||
}
|
135
vendor/honnef.co/go/tools/lint/lintutil/format/format.go
vendored
Normal file
135
vendor/honnef.co/go/tools/lint/lintutil/format/format.go
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Package format provides formatters for linter problems.
|
||||
package format
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/tabwriter"
|
||||
|
||||
"honnef.co/go/tools/lint"
|
||||
)
|
||||
|
||||
func shortPath(path string) string {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
|
||||
return rel
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func relativePositionString(pos token.Position) string {
|
||||
s := shortPath(pos.Filename)
|
||||
if pos.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type Statter interface {
|
||||
Stats(total, errors, warnings int)
|
||||
}
|
||||
|
||||
type Formatter interface {
|
||||
Format(p lint.Problem)
|
||||
}
|
||||
|
||||
type Text struct {
|
||||
W io.Writer
|
||||
}
|
||||
|
||||
func (o Text) Format(p lint.Problem) {
|
||||
fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Pos), p.String())
|
||||
}
|
||||
|
||||
type JSON struct {
|
||||
W io.Writer
|
||||
}
|
||||
|
||||
func severity(s lint.Severity) string {
|
||||
switch s {
|
||||
case lint.Error:
|
||||
return "error"
|
||||
case lint.Warning:
|
||||
return "warning"
|
||||
case lint.Ignored:
|
||||
return "ignored"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (o JSON) Format(p lint.Problem) {
|
||||
type location struct {
|
||||
File string `json:"file"`
|
||||
Line int `json:"line"`
|
||||
Column int `json:"column"`
|
||||
}
|
||||
jp := struct {
|
||||
Code string `json:"code"`
|
||||
Severity string `json:"severity,omitempty"`
|
||||
Location location `json:"location"`
|
||||
End location `json:"end"`
|
||||
Message string `json:"message"`
|
||||
}{
|
||||
Code: p.Check,
|
||||
Severity: severity(p.Severity),
|
||||
Location: location{
|
||||
File: p.Pos.Filename,
|
||||
Line: p.Pos.Line,
|
||||
Column: p.Pos.Column,
|
||||
},
|
||||
End: location{
|
||||
File: p.End.Filename,
|
||||
Line: p.End.Line,
|
||||
Column: p.End.Column,
|
||||
},
|
||||
Message: p.Message,
|
||||
}
|
||||
_ = json.NewEncoder(o.W).Encode(jp)
|
||||
}
|
||||
|
||||
type Stylish struct {
|
||||
W io.Writer
|
||||
|
||||
prevFile string
|
||||
tw *tabwriter.Writer
|
||||
}
|
||||
|
||||
func (o *Stylish) Format(p lint.Problem) {
|
||||
pos := p.Pos
|
||||
if pos.Filename == "" {
|
||||
pos.Filename = "-"
|
||||
}
|
||||
|
||||
if pos.Filename != o.prevFile {
|
||||
if o.prevFile != "" {
|
||||
o.tw.Flush()
|
||||
fmt.Fprintln(o.W)
|
||||
}
|
||||
fmt.Fprintln(o.W, pos.Filename)
|
||||
o.prevFile = pos.Filename
|
||||
o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
|
||||
}
|
||||
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message)
|
||||
}
|
||||
|
||||
func (o *Stylish) Stats(total, errors, warnings int) {
|
||||
if o.tw != nil {
|
||||
o.tw.Flush()
|
||||
fmt.Fprintln(o.W)
|
||||
}
|
||||
fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n",
|
||||
total, errors, warnings)
|
||||
}
|
7
vendor/honnef.co/go/tools/lint/lintutil/stats.go
vendored
Normal file
7
vendor/honnef.co/go/tools/lint/lintutil/stats.go
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package lintutil
|
||||
|
||||
import "os"
|
||||
|
||||
var infoSignals = []os.Signal{}
|
10
vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
vendored
Normal file
10
vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package lintutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var infoSignals = []os.Signal{syscall.SIGINFO}
|
10
vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
vendored
Normal file
10
vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build aix android linux solaris
|
||||
|
||||
package lintutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var infoSignals = []os.Signal{syscall.SIGUSR1}
|
392
vendor/honnef.co/go/tools/lint/lintutil/util.go
vendored
Normal file
392
vendor/honnef.co/go/tools/lint/lintutil/util.go
vendored
Normal file
@ -0,0 +1,392 @@
|
||||
// Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd.
|
||||
|
||||
// Package lintutil provides helpers for writing linter command lines.
|
||||
package lintutil // import "honnef.co/go/tools/lint/lintutil"
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/token"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/internal/cache"
|
||||
"honnef.co/go/tools/lint"
|
||||
"honnef.co/go/tools/lint/lintutil/format"
|
||||
"honnef.co/go/tools/version"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/buildutil"
|
||||
"golang.org/x/tools/go/packages"
|
||||
)
|
||||
|
||||
func NewVersionFlag() flag.Getter {
|
||||
tags := build.Default.ReleaseTags
|
||||
v := tags[len(tags)-1][2:]
|
||||
version := new(VersionFlag)
|
||||
if err := version.Set(v); err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", err))
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
type VersionFlag int
|
||||
|
||||
func (v *VersionFlag) String() string {
|
||||
return fmt.Sprintf("1.%d", *v)
|
||||
|
||||
}
|
||||
|
||||
func (v *VersionFlag) Set(s string) error {
|
||||
if len(s) < 3 {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
if s[0] != '1' {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
if s[1] != '.' {
|
||||
return errors.New("invalid Go version")
|
||||
}
|
||||
i, err := strconv.Atoi(s[2:])
|
||||
*v = VersionFlag(i)
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *VersionFlag) Get() interface{} {
|
||||
return int(*v)
|
||||
}
|
||||
|
||||
func usage(name string, flags *flag.FlagSet) func() {
|
||||
return func() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", name)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name)
|
||||
fmt.Fprintf(os.Stderr, "Flags:\n")
|
||||
flags.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
type list []string
|
||||
|
||||
func (list *list) String() string {
|
||||
return `"` + strings.Join(*list, ",") + `"`
|
||||
}
|
||||
|
||||
func (list *list) Set(s string) error {
|
||||
if s == "" {
|
||||
*list = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
*list = strings.Split(s, ",")
|
||||
return nil
|
||||
}
|
||||
|
||||
func FlagSet(name string) *flag.FlagSet {
|
||||
flags := flag.NewFlagSet("", flag.ExitOnError)
|
||||
flags.Usage = usage(name, flags)
|
||||
flags.String("tags", "", "List of `build tags`")
|
||||
flags.Bool("tests", true, "Include tests")
|
||||
flags.Bool("version", false, "Print version and exit")
|
||||
flags.Bool("show-ignored", false, "Don't filter ignored problems")
|
||||
flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
|
||||
flags.String("explain", "", "Print description of `check`")
|
||||
|
||||
flags.String("debug.cpuprofile", "", "Write CPU profile to `file`")
|
||||
flags.String("debug.memprofile", "", "Write memory profile to `file`")
|
||||
flags.Bool("debug.version", false, "Print detailed version information about this program")
|
||||
flags.Bool("debug.no-compile-errors", false, "Don't print compile errors")
|
||||
|
||||
checks := list{"inherit"}
|
||||
fail := list{"all"}
|
||||
flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.")
|
||||
flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.")
|
||||
|
||||
tags := build.Default.ReleaseTags
|
||||
v := tags[len(tags)-1][2:]
|
||||
version := new(VersionFlag)
|
||||
if err := version.Set(v); err != nil {
|
||||
panic(fmt.Sprintf("internal error: %s", err))
|
||||
}
|
||||
|
||||
flags.Var(version, "go", "Target Go `version` in the format '1.x'")
|
||||
return flags
|
||||
}
|
||||
|
||||
func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) {
|
||||
for _, c := range cs {
|
||||
if c.Name == check {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) {
|
||||
tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string)
|
||||
tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool)
|
||||
goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int)
|
||||
formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string)
|
||||
printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool)
|
||||
showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool)
|
||||
explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string)
|
||||
|
||||
cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string)
|
||||
memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
|
||||
debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool)
|
||||
debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool)
|
||||
|
||||
cfg := config.Config{}
|
||||
cfg.Checks = *fs.Lookup("checks").Value.(*list)
|
||||
|
||||
exit := func(code int) {
|
||||
if cpuProfile != "" {
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
if memProfile != "" {
|
||||
f, err := os.Create(memProfile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
runtime.GC()
|
||||
pprof.WriteHeapProfile(f)
|
||||
}
|
||||
os.Exit(code)
|
||||
}
|
||||
if cpuProfile != "" {
|
||||
f, err := os.Create(cpuProfile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
}
|
||||
|
||||
if debugVersion {
|
||||
version.Verbose()
|
||||
exit(0)
|
||||
}
|
||||
|
||||
if printVersion {
|
||||
version.Print()
|
||||
exit(0)
|
||||
}
|
||||
|
||||
// Validate that the tags argument is well-formed. go/packages
|
||||
// doesn't detect malformed build flags and returns unhelpful
|
||||
// errors.
|
||||
tf := buildutil.TagsFlag{}
|
||||
if err := tf.Set(tags); err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", tags, err))
|
||||
exit(1)
|
||||
}
|
||||
|
||||
if explain != "" {
|
||||
var haystack []*analysis.Analyzer
|
||||
haystack = append(haystack, cs...)
|
||||
for _, cum := range cums {
|
||||
haystack = append(haystack, cum.Analyzer())
|
||||
}
|
||||
check, ok := findCheck(haystack, explain)
|
||||
if !ok {
|
||||
fmt.Fprintln(os.Stderr, "Couldn't find check", explain)
|
||||
exit(1)
|
||||
}
|
||||
if check.Doc == "" {
|
||||
fmt.Fprintln(os.Stderr, explain, "has no documentation")
|
||||
exit(1)
|
||||
}
|
||||
fmt.Println(check.Doc)
|
||||
exit(0)
|
||||
}
|
||||
|
||||
ps, err := Lint(cs, cums, fs.Args(), &Options{
|
||||
Tags: tags,
|
||||
LintTests: tests,
|
||||
GoVersion: goVersion,
|
||||
Config: cfg,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
exit(1)
|
||||
}
|
||||
|
||||
var f format.Formatter
|
||||
switch formatter {
|
||||
case "text":
|
||||
f = format.Text{W: os.Stdout}
|
||||
case "stylish":
|
||||
f = &format.Stylish{W: os.Stdout}
|
||||
case "json":
|
||||
f = format.JSON{W: os.Stdout}
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter)
|
||||
exit(2)
|
||||
}
|
||||
|
||||
var (
|
||||
total int
|
||||
errors int
|
||||
warnings int
|
||||
)
|
||||
|
||||
fail := *fs.Lookup("fail").Value.(*list)
|
||||
analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums))
|
||||
copy(analyzers, cs)
|
||||
for _, cum := range cums {
|
||||
analyzers = append(analyzers, cum.Analyzer())
|
||||
}
|
||||
shouldExit := lint.FilterChecks(analyzers, fail)
|
||||
shouldExit["compile"] = true
|
||||
|
||||
total = len(ps)
|
||||
for _, p := range ps {
|
||||
if p.Check == "compile" && debugNoCompile {
|
||||
continue
|
||||
}
|
||||
if p.Severity == lint.Ignored && !showIgnored {
|
||||
continue
|
||||
}
|
||||
if shouldExit[p.Check] {
|
||||
errors++
|
||||
} else {
|
||||
p.Severity = lint.Warning
|
||||
warnings++
|
||||
}
|
||||
f.Format(p)
|
||||
}
|
||||
if f, ok := f.(format.Statter); ok {
|
||||
f.Stats(total, errors, warnings)
|
||||
}
|
||||
if errors > 0 {
|
||||
exit(1)
|
||||
}
|
||||
exit(0)
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Config config.Config
|
||||
|
||||
Tags string
|
||||
LintTests bool
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
func computeSalt() ([]byte, error) {
|
||||
if version.Version != "devel" {
|
||||
return []byte(version.Version), nil
|
||||
}
|
||||
p, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) {
|
||||
salt, err := computeSalt()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not compute salt for cache: %s", err)
|
||||
}
|
||||
cache.SetSalt(salt)
|
||||
|
||||
if opt == nil {
|
||||
opt = &Options{}
|
||||
}
|
||||
|
||||
l := &lint.Linter{
|
||||
Checkers: cs,
|
||||
CumulativeCheckers: cums,
|
||||
GoVersion: opt.GoVersion,
|
||||
Config: opt.Config,
|
||||
}
|
||||
cfg := &packages.Config{}
|
||||
if opt.LintTests {
|
||||
cfg.Tests = true
|
||||
}
|
||||
if opt.Tags != "" {
|
||||
cfg.BuildFlags = append(cfg.BuildFlags, "-tags", opt.Tags)
|
||||
}
|
||||
|
||||
printStats := func() {
|
||||
// Individual stats are read atomically, but overall there
|
||||
// is no synchronisation. For printing rough progress
|
||||
// information, this doesn't matter.
|
||||
switch atomic.LoadUint32(&l.Stats.State) {
|
||||
case lint.StateInitializing:
|
||||
fmt.Fprintln(os.Stderr, "Status: initializing")
|
||||
case lint.StateGraph:
|
||||
fmt.Fprintln(os.Stderr, "Status: loading package graph")
|
||||
case lint.StateProcessing:
|
||||
fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n",
|
||||
atomic.LoadUint32(&l.Stats.ProcessedInitialPackages),
|
||||
atomic.LoadUint32(&l.Stats.InitialPackages),
|
||||
atomic.LoadUint32(&l.Stats.ProcessedPackages),
|
||||
atomic.LoadUint32(&l.Stats.TotalPackages),
|
||||
atomic.LoadUint32(&l.Stats.ActiveWorkers),
|
||||
atomic.LoadUint32(&l.Stats.TotalWorkers),
|
||||
atomic.LoadUint32(&l.Stats.Problems),
|
||||
)
|
||||
case lint.StateCumulative:
|
||||
fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers")
|
||||
}
|
||||
}
|
||||
if len(infoSignals) > 0 {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, infoSignals...)
|
||||
defer signal.Stop(ch)
|
||||
go func() {
|
||||
for range ch {
|
||||
printStats()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return l.Lint(cfg, paths)
|
||||
}
|
||||
|
||||
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`)
|
||||
|
||||
func parsePos(pos string) token.Position {
|
||||
if pos == "-" || pos == "" {
|
||||
return token.Position{}
|
||||
}
|
||||
parts := posRe.FindStringSubmatch(pos)
|
||||
if parts == nil {
|
||||
panic(fmt.Sprintf("internal error: malformed position %q", pos))
|
||||
}
|
||||
file := parts[1]
|
||||
line, _ := strconv.Atoi(parts[2])
|
||||
col, _ := strconv.Atoi(parts[3])
|
||||
return token.Position{
|
||||
Filename: file,
|
||||
Line: line,
|
||||
Column: col,
|
||||
}
|
||||
}
|
970
vendor/honnef.co/go/tools/lint/runner.go
vendored
Normal file
970
vendor/honnef.co/go/tools/lint/runner.go
vendored
Normal file
@ -0,0 +1,970 @@
|
||||
package lint
|
||||
|
||||
/*
|
||||
Parallelism
|
||||
|
||||
Runner implements parallel processing of packages by spawning one
|
||||
goroutine per package in the dependency graph, without any semaphores.
|
||||
Each goroutine initially waits on the completion of all of its
|
||||
dependencies, thus establishing correct order of processing. Once all
|
||||
dependencies finish processing, the goroutine will load the package
|
||||
from export data or source – this loading is guarded by a semaphore,
|
||||
sized according to the number of CPU cores. This way, we only have as
|
||||
many packages occupying memory and CPU resources as there are actual
|
||||
cores to process them.
|
||||
|
||||
This combination of unbounded goroutines but bounded package loading
|
||||
means that if we have many parallel, independent subgraphs, they will
|
||||
all execute in parallel, while not wasting resources for long linear
|
||||
chains or trying to process more subgraphs in parallel than the system
|
||||
can handle.
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/go/types/objectpath"
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/internal/cache"
|
||||
"honnef.co/go/tools/loader"
|
||||
)
|
||||
|
||||
// If enabled, abuse of the go/analysis API will lead to panics
|
||||
const sanityCheck = true
|
||||
|
||||
// OPT(dh): for a dependency tree A->B->C->D, if we have cached data
|
||||
// for B, there should be no need to load C and D individually. Go's
|
||||
// export data for B contains all the data we need on types, and our
|
||||
// fact cache could store the union of B, C and D in B.
|
||||
//
|
||||
// This may change unused's behavior, however, as it may observe fewer
|
||||
// interfaces from transitive dependencies.
|
||||
|
||||
type Package struct {
|
||||
dependents uint64
|
||||
|
||||
*packages.Package
|
||||
Imports []*Package
|
||||
initial bool
|
||||
fromSource bool
|
||||
hash string
|
||||
done chan struct{}
|
||||
|
||||
resultsMu sync.Mutex
|
||||
// results maps analyzer IDs to analyzer results
|
||||
results []*result
|
||||
|
||||
cfg *config.Config
|
||||
gen map[string]facts.Generator
|
||||
problems []Problem
|
||||
ignores []Ignore
|
||||
errs []error
|
||||
|
||||
// these slices are indexed by analysis
|
||||
facts []map[types.Object][]analysis.Fact
|
||||
pkgFacts [][]analysis.Fact
|
||||
|
||||
canClearTypes bool
|
||||
}
|
||||
|
||||
func (pkg *Package) decUse() {
|
||||
atomic.AddUint64(&pkg.dependents, ^uint64(0))
|
||||
if atomic.LoadUint64(&pkg.dependents) == 0 {
|
||||
// nobody depends on this package anymore
|
||||
if pkg.canClearTypes {
|
||||
pkg.Types = nil
|
||||
}
|
||||
pkg.facts = nil
|
||||
pkg.pkgFacts = nil
|
||||
|
||||
for _, imp := range pkg.Imports {
|
||||
imp.decUse()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
v interface{}
|
||||
err error
|
||||
ready chan struct{}
|
||||
}
|
||||
|
||||
type Runner struct {
|
||||
ld loader.Loader
|
||||
cache *cache.Cache
|
||||
|
||||
analyzerIDs analyzerIDs
|
||||
|
||||
// limits parallelism of loading packages
|
||||
loadSem chan struct{}
|
||||
|
||||
goVersion int
|
||||
stats *Stats
|
||||
}
|
||||
|
||||
type analyzerIDs struct {
|
||||
m map[*analysis.Analyzer]int
|
||||
}
|
||||
|
||||
func (ids analyzerIDs) get(a *analysis.Analyzer) int {
|
||||
id, ok := ids.m[a]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("no analyzer ID for %s", a.Name))
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
type Fact struct {
|
||||
Path string
|
||||
Fact analysis.Fact
|
||||
}
|
||||
|
||||
type analysisAction struct {
|
||||
analyzer *analysis.Analyzer
|
||||
analyzerID int
|
||||
pkg *Package
|
||||
newPackageFacts []analysis.Fact
|
||||
problems []Problem
|
||||
|
||||
pkgFacts map[*types.Package][]analysis.Fact
|
||||
}
|
||||
|
||||
func (ac *analysisAction) String() string {
|
||||
return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact {
|
||||
out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID]))
|
||||
for obj, facts := range ac.pkg.facts[ac.analyzerID] {
|
||||
for _, fact := range facts {
|
||||
out = append(out, analysis.ObjectFact{
|
||||
Object: obj,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ac *analysisAction) allPackageFacts() []analysis.PackageFact {
|
||||
out := make([]analysis.PackageFact, 0, len(ac.pkgFacts))
|
||||
for pkg, facts := range ac.pkgFacts {
|
||||
for _, fact := range facts {
|
||||
out = append(out, analysis.PackageFact{
|
||||
Package: pkg,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
for _, f := range ac.pkg.facts[ac.analyzerID][obj] {
|
||||
if reflect.TypeOf(f) == reflect.TypeOf(fact) {
|
||||
reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
for _, f := range ac.pkgFacts[pkg] {
|
||||
if reflect.TypeOf(f) == reflect.TypeOf(fact) {
|
||||
reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) exportPackageFact(fact analysis.Fact) {
|
||||
if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
|
||||
panic("analysis doesn't export any facts")
|
||||
}
|
||||
ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact)
|
||||
ac.newPackageFacts = append(ac.newPackageFacts, fact)
|
||||
}
|
||||
|
||||
func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) {
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pass.Fset, d.Pos),
|
||||
End: DisplayPosition(pass.Fset, d.End),
|
||||
Message: d.Message,
|
||||
Check: pass.Analyzer.Name,
|
||||
}
|
||||
ac.problems = append(ac.problems, p)
|
||||
}
|
||||
|
||||
func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) {
|
||||
ac.pkg.resultsMu.Lock()
|
||||
res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)]
|
||||
if res != nil {
|
||||
ac.pkg.resultsMu.Unlock()
|
||||
<-res.ready
|
||||
return res.v, res.err
|
||||
} else {
|
||||
res = &result{
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res
|
||||
ac.pkg.resultsMu.Unlock()
|
||||
|
||||
defer func() {
|
||||
res.v = ret
|
||||
res.err = err
|
||||
close(res.ready)
|
||||
}()
|
||||
|
||||
pass := new(analysis.Pass)
|
||||
*pass = analysis.Pass{
|
||||
Analyzer: ac.analyzer,
|
||||
Fset: ac.pkg.Fset,
|
||||
Files: ac.pkg.Syntax,
|
||||
// type information may be nil or may be populated. if it is
|
||||
// nil, it will get populated later.
|
||||
Pkg: ac.pkg.Types,
|
||||
TypesInfo: ac.pkg.TypesInfo,
|
||||
TypesSizes: ac.pkg.TypesSizes,
|
||||
ResultOf: map[*analysis.Analyzer]interface{}{},
|
||||
ImportObjectFact: ac.importObjectFact,
|
||||
ImportPackageFact: ac.importPackageFact,
|
||||
ExportObjectFact: ac.exportObjectFact,
|
||||
ExportPackageFact: ac.exportPackageFact,
|
||||
Report: func(d analysis.Diagnostic) {
|
||||
ac.report(pass, d)
|
||||
},
|
||||
AllObjectFacts: ac.allObjectFacts,
|
||||
AllPackageFacts: ac.allPackageFacts,
|
||||
}
|
||||
|
||||
if !ac.pkg.initial {
|
||||
// Don't report problems in dependencies
|
||||
pass.Report = func(analysis.Diagnostic) {}
|
||||
}
|
||||
return r.runAnalysisUser(pass, ac)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) {
|
||||
if len(a.FactTypes) == 0 {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
var facts []Fact
|
||||
// Look in the cache for facts
|
||||
aID, err := passActionID(pkg, a)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
aID = cache.Subkey(aID, "facts")
|
||||
b, _, err := r.cache.GetBytes(aID)
|
||||
if err != nil {
|
||||
// No cached facts, analyse this package like a user-provided one, but ignore diagnostics
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil {
|
||||
// Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics
|
||||
return nil, false
|
||||
}
|
||||
return facts, true
|
||||
}
|
||||
|
||||
type dependencyError struct {
|
||||
dep string
|
||||
err error
|
||||
}
|
||||
|
||||
func (err dependencyError) nested() dependencyError {
|
||||
if o, ok := err.err.(dependencyError); ok {
|
||||
return o.nested()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (err dependencyError) Error() string {
|
||||
if o, ok := err.err.(dependencyError); ok {
|
||||
return o.Error()
|
||||
}
|
||||
return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err)
|
||||
}
|
||||
|
||||
func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction {
|
||||
aid := r.analyzerIDs.get(a)
|
||||
ac := &analysisAction{
|
||||
analyzer: a,
|
||||
analyzerID: aid,
|
||||
pkg: pkg,
|
||||
}
|
||||
|
||||
if len(a.FactTypes) == 0 {
|
||||
return ac
|
||||
}
|
||||
|
||||
// Merge all package facts of dependencies
|
||||
ac.pkgFacts = map[*types.Package][]analysis.Fact{}
|
||||
seen := map[*Package]struct{}{}
|
||||
var dfs func(*Package)
|
||||
dfs = func(pkg *Package) {
|
||||
if _, ok := seen[pkg]; ok {
|
||||
return
|
||||
}
|
||||
seen[pkg] = struct{}{}
|
||||
s := pkg.pkgFacts[aid]
|
||||
ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)]
|
||||
for _, imp := range pkg.Imports {
|
||||
dfs(imp)
|
||||
}
|
||||
}
|
||||
dfs(pkg)
|
||||
|
||||
return ac
|
||||
}
|
||||
|
||||
// analyzes that we always want to run, even if they're not being run
|
||||
// explicitly or as dependencies. these are necessary for the inner
|
||||
// workings of the runner.
|
||||
var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer}
|
||||
|
||||
func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) {
|
||||
if !ac.pkg.fromSource {
|
||||
panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg))
|
||||
}
|
||||
|
||||
// User-provided package, analyse it
|
||||
// First analyze it with dependencies
|
||||
for _, req := range ac.analyzer.Requires {
|
||||
acReq := r.makeAnalysisAction(req, ac.pkg)
|
||||
ret, err := r.runAnalysis(acReq)
|
||||
if err != nil {
|
||||
// We couldn't run a dependency, no point in going on
|
||||
return nil, dependencyError{req.Name, err}
|
||||
}
|
||||
|
||||
pass.ResultOf[req] = ret
|
||||
}
|
||||
|
||||
// Then with this analyzer
|
||||
ret, err := ac.analyzer.Run(pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ac.analyzer.FactTypes) > 0 {
|
||||
// Merge new facts into the package and persist them.
|
||||
var facts []Fact
|
||||
for _, fact := range ac.newPackageFacts {
|
||||
id := r.analyzerIDs.get(ac.analyzer)
|
||||
ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact)
|
||||
facts = append(facts, Fact{"", fact})
|
||||
}
|
||||
for obj, afacts := range ac.pkg.facts[ac.analyzerID] {
|
||||
if obj.Pkg() != ac.pkg.Package.Types {
|
||||
continue
|
||||
}
|
||||
path, err := objectpath.For(obj)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, fact := range afacts {
|
||||
facts = append(facts, Fact{string(path), fact})
|
||||
}
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if err := gob.NewEncoder(buf).Encode(facts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aID, err := passActionID(ac.pkg, ac.analyzer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aID = cache.Subkey(aID, "facts")
|
||||
if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func NewRunner(stats *Stats) (*Runner, error) {
|
||||
cache, err := cache.Default()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Runner{
|
||||
cache: cache,
|
||||
stats: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run loads packages corresponding to patterns and analyses them with
|
||||
// analyzers. It returns the loaded packages, which contain reported
|
||||
// diagnostics as well as extracted ignore directives.
|
||||
//
|
||||
// Note that diagnostics have not been filtered at this point yet, to
|
||||
// accomodate cumulative analyzes that require additional steps to
|
||||
// produce diagnostics.
|
||||
func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) {
|
||||
r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}}
|
||||
id := 0
|
||||
seen := map[*analysis.Analyzer]struct{}{}
|
||||
var dfs func(a *analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if _, ok := seen[a]; ok {
|
||||
return
|
||||
}
|
||||
seen[a] = struct{}{}
|
||||
r.analyzerIDs.m[a] = id
|
||||
id++
|
||||
for _, f := range a.FactTypes {
|
||||
gob.Register(f)
|
||||
}
|
||||
for _, req := range a.Requires {
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
for _, a := range analyzers {
|
||||
if v := a.Flags.Lookup("go"); v != nil {
|
||||
v.Value.Set(fmt.Sprintf("1.%d", r.goVersion))
|
||||
}
|
||||
dfs(a)
|
||||
}
|
||||
for _, a := range injectedAnalyses {
|
||||
dfs(a)
|
||||
}
|
||||
|
||||
var dcfg packages.Config
|
||||
if cfg != nil {
|
||||
dcfg = *cfg
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&r.stats.State, StateGraph)
|
||||
initialPkgs, err := r.ld.Graph(dcfg, patterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer r.cache.Trim()
|
||||
|
||||
var allPkgs []*Package
|
||||
m := map[*packages.Package]*Package{}
|
||||
packages.Visit(initialPkgs, nil, func(l *packages.Package) {
|
||||
m[l] = &Package{
|
||||
Package: l,
|
||||
results: make([]*result, len(r.analyzerIDs.m)),
|
||||
facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)),
|
||||
pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)),
|
||||
done: make(chan struct{}),
|
||||
// every package needs itself
|
||||
dependents: 1,
|
||||
canClearTypes: !hasCumulative,
|
||||
}
|
||||
allPkgs = append(allPkgs, m[l])
|
||||
for i := range m[l].facts {
|
||||
m[l].facts[i] = map[types.Object][]analysis.Fact{}
|
||||
}
|
||||
for _, err := range l.Errors {
|
||||
m[l].errs = append(m[l].errs, err)
|
||||
}
|
||||
for _, v := range l.Imports {
|
||||
m[v].dependents++
|
||||
m[l].Imports = append(m[l].Imports, m[v])
|
||||
}
|
||||
|
||||
m[l].hash, err = packageHash(m[l])
|
||||
if err != nil {
|
||||
m[l].errs = append(m[l].errs, err)
|
||||
}
|
||||
})
|
||||
|
||||
pkgs := make([]*Package, len(initialPkgs))
|
||||
for i, l := range initialPkgs {
|
||||
pkgs[i] = m[l]
|
||||
pkgs[i].initial = true
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs)))
|
||||
atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs)))
|
||||
atomic.StoreUint32(&r.stats.State, StateProcessing)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(allPkgs))
|
||||
r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1))
|
||||
atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem)))
|
||||
for _, pkg := range allPkgs {
|
||||
pkg := pkg
|
||||
go func() {
|
||||
r.processPkg(pkg, analyzers)
|
||||
|
||||
if pkg.initial {
|
||||
atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1)
|
||||
}
|
||||
atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems)))
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`)
|
||||
|
||||
func parsePos(pos string) (token.Position, int, error) {
|
||||
if pos == "-" || pos == "" {
|
||||
return token.Position{}, 0, nil
|
||||
}
|
||||
parts := posRe.FindStringSubmatch(pos)
|
||||
if parts == nil {
|
||||
return token.Position{}, 0, fmt.Errorf("malformed position %q", pos)
|
||||
}
|
||||
file := parts[1]
|
||||
line, _ := strconv.Atoi(parts[2])
|
||||
col, _ := strconv.Atoi(parts[3])
|
||||
return token.Position{
|
||||
Filename: file,
|
||||
Line: line,
|
||||
Column: col,
|
||||
}, len(parts[0]), nil
|
||||
}
|
||||
|
||||
// loadPkg loads a Go package. If the package is in the set of initial
|
||||
// packages, it will be loaded from source, otherwise it will be
|
||||
// loaded from export data. In the case that the package was loaded
|
||||
// from export data, cached facts will also be loaded.
|
||||
//
|
||||
// Currently, only cached facts for this package will be loaded, not
|
||||
// for any of its dependencies.
|
||||
func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
|
||||
if pkg.Types != nil {
|
||||
panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package))
|
||||
}
|
||||
|
||||
// Load type information
|
||||
if pkg.initial {
|
||||
// Load package from source
|
||||
pkg.fromSource = true
|
||||
return r.ld.LoadFromSource(pkg.Package)
|
||||
}
|
||||
|
||||
// Load package from export data
|
||||
if err := r.ld.LoadFromExport(pkg.Package); err != nil {
|
||||
// We asked Go to give us up to date export data, yet
|
||||
// we can't load it. There must be something wrong.
|
||||
//
|
||||
// Attempt loading from source. This should fail (because
|
||||
// otherwise there would be export data); we just want to
|
||||
// get the compile errors. If loading from source succeeds
|
||||
// we discard the result, anyway. Otherwise we'll fail
|
||||
// when trying to reload from export data later.
|
||||
//
|
||||
// FIXME(dh): we no longer reload from export data, so
|
||||
// theoretically we should be able to continue
|
||||
pkg.fromSource = true
|
||||
if err := r.ld.LoadFromSource(pkg.Package); err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure this package can't be imported successfully
|
||||
pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{
|
||||
Pos: "-",
|
||||
Msg: fmt.Sprintf("could not load export data: %s", err),
|
||||
Kind: packages.ParseError,
|
||||
})
|
||||
return fmt.Errorf("could not load export data: %s", err)
|
||||
}
|
||||
|
||||
failed := false
|
||||
seen := make([]bool, len(r.analyzerIDs.m))
|
||||
var dfs func(*analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if seen[r.analyzerIDs.get(a)] {
|
||||
return
|
||||
}
|
||||
seen[r.analyzerIDs.get(a)] = true
|
||||
|
||||
if len(a.FactTypes) > 0 {
|
||||
facts, ok := r.loadCachedFacts(a, pkg)
|
||||
if !ok {
|
||||
failed = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range facts {
|
||||
if f.Path == "" {
|
||||
// This is a package fact
|
||||
pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact)
|
||||
continue
|
||||
}
|
||||
obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path))
|
||||
if err != nil {
|
||||
// Be lenient about these errors. For example, when
|
||||
// analysing io/ioutil from source, we may get a fact
|
||||
// for methods on the devNull type, and objectpath
|
||||
// will happily create a path for them. However, when
|
||||
// we later load io/ioutil from export data, the path
|
||||
// no longer resolves.
|
||||
//
|
||||
// If an exported type embeds the unexported type,
|
||||
// then (part of) the unexported type will become part
|
||||
// of the type information and our path will resolve
|
||||
// again.
|
||||
continue
|
||||
}
|
||||
pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact)
|
||||
}
|
||||
}
|
||||
|
||||
for _, req := range a.Requires {
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
for _, a := range analyzers {
|
||||
dfs(a)
|
||||
}
|
||||
|
||||
if failed {
|
||||
pkg.fromSource = true
|
||||
// XXX we added facts to the maps, we need to get rid of those
|
||||
return r.ld.LoadFromSource(pkg.Package)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type analysisError struct {
|
||||
analyzer *analysis.Analyzer
|
||||
pkg *Package
|
||||
err error
|
||||
}
|
||||
|
||||
func (err analysisError) Error() string {
|
||||
return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err)
|
||||
}
|
||||
|
||||
// processPkg processes a package. This involves loading the package,
|
||||
// either from export data or from source. For packages loaded from
|
||||
// source, the provides analyzers will be run on the package.
|
||||
func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
|
||||
defer func() {
|
||||
// Clear information we no longer need. Make sure to do this
|
||||
// when returning from processPkg so that we clear
|
||||
// dependencies, not just initial packages.
|
||||
pkg.TypesInfo = nil
|
||||
pkg.Syntax = nil
|
||||
pkg.results = nil
|
||||
|
||||
atomic.AddUint32(&r.stats.ProcessedPackages, 1)
|
||||
pkg.decUse()
|
||||
close(pkg.done)
|
||||
}()
|
||||
|
||||
// Ensure all packages have the generated map and config. This is
|
||||
// required by interna of the runner. Analyses that themselves
|
||||
// make use of either have an explicit dependency so that other
|
||||
// runners work correctly, too.
|
||||
analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...)
|
||||
|
||||
if len(pkg.errs) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, imp := range pkg.Imports {
|
||||
<-imp.done
|
||||
if len(imp.errs) > 0 {
|
||||
if imp.initial {
|
||||
// Don't print the error of the dependency since it's
|
||||
// an initial package and we're already printing the
|
||||
// error.
|
||||
pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg))
|
||||
} else {
|
||||
var s string
|
||||
for _, err := range imp.errs {
|
||||
s += "\n\t" + err.Error()
|
||||
}
|
||||
pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if pkg.PkgPath == "unsafe" {
|
||||
pkg.Types = types.Unsafe
|
||||
return
|
||||
}
|
||||
|
||||
r.loadSem <- struct{}{}
|
||||
atomic.AddUint32(&r.stats.ActiveWorkers, 1)
|
||||
defer func() {
|
||||
<-r.loadSem
|
||||
atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0))
|
||||
}()
|
||||
if err := r.loadPkg(pkg, analyzers); err != nil {
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
return
|
||||
}
|
||||
|
||||
// A package's object facts is the union of all of its dependencies.
|
||||
for _, imp := range pkg.Imports {
|
||||
for ai, m := range imp.facts {
|
||||
for obj, facts := range m {
|
||||
pkg.facts[ai][obj] = facts[0:len(facts):len(facts)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !pkg.fromSource {
|
||||
// Nothing left to do for the package.
|
||||
return
|
||||
}
|
||||
|
||||
// Run analyses on initial packages and those missing facts
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(analyzers))
|
||||
errs := make([]error, len(analyzers))
|
||||
var acs []*analysisAction
|
||||
for i, a := range analyzers {
|
||||
i := i
|
||||
a := a
|
||||
ac := r.makeAnalysisAction(a, pkg)
|
||||
acs = append(acs, ac)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Only initial packages and packages with missing
|
||||
// facts will have been loaded from source.
|
||||
if pkg.initial || r.hasFacts(a) {
|
||||
if _, err := r.runAnalysis(ac); err != nil {
|
||||
errs[i] = analysisError{a, pkg, err}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
depErrors := map[dependencyError]int{}
|
||||
for _, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
switch err := err.(type) {
|
||||
case analysisError:
|
||||
switch err := err.err.(type) {
|
||||
case dependencyError:
|
||||
depErrors[err.nested()]++
|
||||
default:
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
}
|
||||
default:
|
||||
pkg.errs = append(pkg.errs, err)
|
||||
}
|
||||
}
|
||||
for err, count := range depErrors {
|
||||
pkg.errs = append(pkg.errs,
|
||||
fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err))
|
||||
}
|
||||
|
||||
// We can't process ignores at this point because `unused` needs
|
||||
// to see more than one package to make its decision.
|
||||
ignores, problems := parseDirectives(pkg.Package)
|
||||
pkg.ignores = append(pkg.ignores, ignores...)
|
||||
pkg.problems = append(pkg.problems, problems...)
|
||||
for _, ac := range acs {
|
||||
pkg.problems = append(pkg.problems, ac.problems...)
|
||||
}
|
||||
|
||||
if pkg.initial {
|
||||
// Only initial packages have these analyzers run, and only
|
||||
// initial packages need these.
|
||||
if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil {
|
||||
pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config)
|
||||
}
|
||||
pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator)
|
||||
}
|
||||
|
||||
// In a previous version of the code, we would throw away all type
|
||||
// information and reload it from export data. That was
|
||||
// nonsensical. The *types.Package doesn't keep any information
|
||||
// live that export data wouldn't also. We only need to discard
|
||||
// the AST and the TypesInfo maps; that happens after we return
|
||||
// from processPkg.
|
||||
}
|
||||
|
||||
// hasFacts reports whether an analysis exports any facts. An analysis
|
||||
// that has a transitive dependency that exports facts is considered
|
||||
// to be exporting facts.
|
||||
func (r *Runner) hasFacts(a *analysis.Analyzer) bool {
|
||||
ret := false
|
||||
seen := make([]bool, len(r.analyzerIDs.m))
|
||||
var dfs func(*analysis.Analyzer)
|
||||
dfs = func(a *analysis.Analyzer) {
|
||||
if seen[r.analyzerIDs.get(a)] {
|
||||
return
|
||||
}
|
||||
seen[r.analyzerIDs.get(a)] = true
|
||||
if len(a.FactTypes) > 0 {
|
||||
ret = true
|
||||
}
|
||||
for _, req := range a.Requires {
|
||||
if ret {
|
||||
break
|
||||
}
|
||||
dfs(req)
|
||||
}
|
||||
}
|
||||
dfs(a)
|
||||
return ret
|
||||
}
|
||||
|
||||
func parseDirective(s string) (cmd string, args []string) {
|
||||
if !strings.HasPrefix(s, "//lint:") {
|
||||
return "", nil
|
||||
}
|
||||
s = strings.TrimPrefix(s, "//lint:")
|
||||
fields := strings.Split(s, " ")
|
||||
return fields[0], fields[1:]
|
||||
}
|
||||
|
||||
// parseDirectives extracts all linter directives from the source
|
||||
// files of the package. Malformed directives are returned as problems.
|
||||
func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) {
|
||||
var ignores []Ignore
|
||||
var problems []Problem
|
||||
|
||||
for _, f := range pkg.Syntax {
|
||||
found := false
|
||||
commentLoop:
|
||||
for _, cg := range f.Comments {
|
||||
for _, c := range cg.List {
|
||||
if strings.Contains(c.Text, "//lint:") {
|
||||
found = true
|
||||
break commentLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
|
||||
for node, cgs := range cm {
|
||||
for _, cg := range cgs {
|
||||
for _, c := range cg.List {
|
||||
if !strings.HasPrefix(c.Text, "//lint:") {
|
||||
continue
|
||||
}
|
||||
cmd, args := parseDirective(c.Text)
|
||||
switch cmd {
|
||||
case "ignore", "file-ignore":
|
||||
if len(args) < 2 {
|
||||
p := Problem{
|
||||
Pos: DisplayPosition(pkg.Fset, c.Pos()),
|
||||
Message: "malformed linter directive; missing the required reason field?",
|
||||
Severity: Error,
|
||||
Check: "compile",
|
||||
}
|
||||
problems = append(problems, p)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
// unknown directive, ignore
|
||||
continue
|
||||
}
|
||||
checks := strings.Split(args[0], ",")
|
||||
pos := DisplayPosition(pkg.Fset, node.Pos())
|
||||
var ig Ignore
|
||||
switch cmd {
|
||||
case "ignore":
|
||||
ig = &LineIgnore{
|
||||
File: pos.Filename,
|
||||
Line: pos.Line,
|
||||
Checks: checks,
|
||||
Pos: c.Pos(),
|
||||
}
|
||||
case "file-ignore":
|
||||
ig = &FileIgnore{
|
||||
File: pos.Filename,
|
||||
Checks: checks,
|
||||
}
|
||||
}
|
||||
ignores = append(ignores, ig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ignores, problems
|
||||
}
|
||||
|
||||
// packageHash computes a package's hash. The hash is based on all Go
|
||||
// files that make up the package, as well as the hashes of imported
|
||||
// packages.
|
||||
func packageHash(pkg *Package) (string, error) {
|
||||
key := cache.NewHash("package hash")
|
||||
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
|
||||
for _, f := range pkg.CompiledGoFiles {
|
||||
h, err := cache.FileHash(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fmt.Fprintf(key, "file %s %x\n", f, h)
|
||||
}
|
||||
|
||||
imps := make([]*Package, len(pkg.Imports))
|
||||
copy(imps, pkg.Imports)
|
||||
sort.Slice(imps, func(i, j int) bool {
|
||||
return imps[i].PkgPath < imps[j].PkgPath
|
||||
})
|
||||
for _, dep := range imps {
|
||||
if dep.PkgPath == "unsafe" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash)
|
||||
}
|
||||
h := key.Sum()
|
||||
return hex.EncodeToString(h[:]), nil
|
||||
}
|
||||
|
||||
// passActionID computes an ActionID for an analysis pass.
|
||||
func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) {
|
||||
key := cache.NewHash("action ID")
|
||||
fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
|
||||
fmt.Fprintf(key, "pkghash %s\n", pkg.hash)
|
||||
fmt.Fprintf(key, "analyzer %s\n", analyzer.Name)
|
||||
|
||||
return key.Sum(), nil
|
||||
}
|
20
vendor/honnef.co/go/tools/lint/stats.go
vendored
Normal file
20
vendor/honnef.co/go/tools/lint/stats.go
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package lint
|
||||
|
||||
const (
|
||||
StateInitializing = 0
|
||||
StateGraph = 1
|
||||
StateProcessing = 2
|
||||
StateCumulative = 3
|
||||
)
|
||||
|
||||
type Stats struct {
|
||||
State uint32
|
||||
|
||||
InitialPackages uint32
|
||||
TotalPackages uint32
|
||||
ProcessedPackages uint32
|
||||
ProcessedInitialPackages uint32
|
||||
Problems uint32
|
||||
ActiveWorkers uint32
|
||||
TotalWorkers uint32
|
||||
}
|
197
vendor/honnef.co/go/tools/loader/loader.go
vendored
Normal file
197
vendor/honnef.co/go/tools/loader/loader.go
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
package loader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/go/gcexportdata"
|
||||
"golang.org/x/tools/go/packages"
|
||||
)
|
||||
|
||||
type Loader struct {
|
||||
exportMu sync.RWMutex
|
||||
}
|
||||
|
||||
// Graph resolves patterns and returns packages with all the
|
||||
// information required to later load type information, and optionally
|
||||
// syntax trees.
|
||||
//
|
||||
// The provided config can set any setting with the exception of Mode.
|
||||
func (ld *Loader) Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) {
|
||||
cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes
|
||||
pkgs, err := packages.Load(&cfg, patterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fset := token.NewFileSet()
|
||||
packages.Visit(pkgs, nil, func(pkg *packages.Package) {
|
||||
pkg.Fset = fset
|
||||
})
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
// LoadFromExport loads a package from export data. All of its
|
||||
// dependencies must have been loaded already.
|
||||
func (ld *Loader) LoadFromExport(pkg *packages.Package) error {
|
||||
ld.exportMu.Lock()
|
||||
defer ld.exportMu.Unlock()
|
||||
|
||||
pkg.IllTyped = true
|
||||
for path, pkg := range pkg.Imports {
|
||||
if pkg.Types == nil {
|
||||
return fmt.Errorf("dependency %q hasn't been loaded yet", path)
|
||||
}
|
||||
}
|
||||
if pkg.ExportFile == "" {
|
||||
return fmt.Errorf("no export data for %q", pkg.ID)
|
||||
}
|
||||
f, err := os.Open(pkg.ExportFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r, err := gcexportdata.NewReader(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
view := make(map[string]*types.Package) // view seen by gcexportdata
|
||||
seen := make(map[*packages.Package]bool) // all visited packages
|
||||
var visit func(pkgs map[string]*packages.Package)
|
||||
visit = func(pkgs map[string]*packages.Package) {
|
||||
for _, pkg := range pkgs {
|
||||
if !seen[pkg] {
|
||||
seen[pkg] = true
|
||||
view[pkg.PkgPath] = pkg.Types
|
||||
visit(pkg.Imports)
|
||||
}
|
||||
}
|
||||
}
|
||||
visit(pkg.Imports)
|
||||
tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg.Types = tpkg
|
||||
pkg.IllTyped = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFromSource loads a package from source. All of its dependencies
|
||||
// must have been loaded already.
|
||||
func (ld *Loader) LoadFromSource(pkg *packages.Package) error {
|
||||
ld.exportMu.RLock()
|
||||
defer ld.exportMu.RUnlock()
|
||||
|
||||
pkg.IllTyped = true
|
||||
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
|
||||
|
||||
// OPT(dh): many packages have few files, much fewer than there
|
||||
// are CPU cores. Additionally, parsing each individual file is
|
||||
// very fast. A naive parallel implementation of this loop won't
|
||||
// be faster, and tends to be slower due to extra scheduling,
|
||||
// bookkeeping and potentially false sharing of cache lines.
|
||||
pkg.Syntax = make([]*ast.File, len(pkg.CompiledGoFiles))
|
||||
for i, file := range pkg.CompiledGoFiles {
|
||||
f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
pkg.Errors = append(pkg.Errors, convertError(err)...)
|
||||
return err
|
||||
}
|
||||
pkg.Syntax[i] = f
|
||||
}
|
||||
pkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
}
|
||||
|
||||
importer := func(path string) (*types.Package, error) {
|
||||
if path == "unsafe" {
|
||||
return types.Unsafe, nil
|
||||
}
|
||||
imp := pkg.Imports[path]
|
||||
if imp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if len(imp.Errors) > 0 {
|
||||
return nil, imp.Errors[0]
|
||||
}
|
||||
return imp.Types, nil
|
||||
}
|
||||
tc := &types.Config{
|
||||
Importer: importerFunc(importer),
|
||||
Error: func(err error) {
|
||||
pkg.Errors = append(pkg.Errors, convertError(err)...)
|
||||
},
|
||||
}
|
||||
err := types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg.IllTyped = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertError(err error) []packages.Error {
|
||||
var errs []packages.Error
|
||||
// taken from go/packages
|
||||
switch err := err.(type) {
|
||||
case packages.Error:
|
||||
// from driver
|
||||
errs = append(errs, err)
|
||||
|
||||
case *os.PathError:
|
||||
// from parser
|
||||
errs = append(errs, packages.Error{
|
||||
Pos: err.Path + ":1",
|
||||
Msg: err.Err.Error(),
|
||||
Kind: packages.ParseError,
|
||||
})
|
||||
|
||||
case scanner.ErrorList:
|
||||
// from parser
|
||||
for _, err := range err {
|
||||
errs = append(errs, packages.Error{
|
||||
Pos: err.Pos.String(),
|
||||
Msg: err.Msg,
|
||||
Kind: packages.ParseError,
|
||||
})
|
||||
}
|
||||
|
||||
case types.Error:
|
||||
// from type checker
|
||||
errs = append(errs, packages.Error{
|
||||
Pos: err.Fset.Position(err.Pos).String(),
|
||||
Msg: err.Msg,
|
||||
Kind: packages.TypeError,
|
||||
})
|
||||
|
||||
default:
|
||||
// unexpected impoverished error from parser?
|
||||
errs = append(errs, packages.Error{
|
||||
Pos: "-",
|
||||
Msg: err.Error(),
|
||||
Kind: packages.UnknownError,
|
||||
})
|
||||
|
||||
// If you see this error message, please file a bug.
|
||||
log.Printf("internal error: error %q (%T) without position", err, err)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
type importerFunc func(path string) (*types.Package, error)
|
||||
|
||||
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
|
11
vendor/honnef.co/go/tools/printf/fuzz.go
vendored
Normal file
11
vendor/honnef.co/go/tools/printf/fuzz.go
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build gofuzz
|
||||
|
||||
package printf
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
_, err := Parse(string(data))
|
||||
if err == nil {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
197
vendor/honnef.co/go/tools/printf/printf.go
vendored
Normal file
197
vendor/honnef.co/go/tools/printf/printf.go
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
// Package printf implements a parser for fmt.Printf-style format
|
||||
// strings.
|
||||
//
|
||||
// It parses verbs according to the following syntax:
|
||||
// Numeric -> '0'-'9'
|
||||
// Letter -> 'a'-'z' | 'A'-'Z'
|
||||
// Index -> '[' Numeric+ ']'
|
||||
// Star -> '*'
|
||||
// Star -> Index '*'
|
||||
//
|
||||
// Precision -> Numeric+ | Star
|
||||
// Width -> Numeric+ | Star
|
||||
//
|
||||
// WidthAndPrecision -> Width '.' Precision
|
||||
// WidthAndPrecision -> Width '.'
|
||||
// WidthAndPrecision -> Width
|
||||
// WidthAndPrecision -> '.' Precision
|
||||
// WidthAndPrecision -> '.'
|
||||
//
|
||||
// Flag -> '+' | '-' | '#' | ' ' | '0'
|
||||
// Verb -> Letter | '%'
|
||||
//
|
||||
// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb
|
||||
package printf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrInvalid is returned for invalid format strings or verbs.
|
||||
var ErrInvalid = errors.New("invalid format string")
|
||||
|
||||
type Verb struct {
|
||||
Letter rune
|
||||
Flags string
|
||||
|
||||
Width Argument
|
||||
Precision Argument
|
||||
// Which value in the argument list the verb uses.
|
||||
// -1 denotes the next argument,
|
||||
// values > 0 denote explicit arguments.
|
||||
// The value 0 denotes that no argument is consumed. This is the case for %%.
|
||||
Value int
|
||||
|
||||
Raw string
|
||||
}
|
||||
|
||||
// Argument is an implicit or explicit width or precision.
|
||||
type Argument interface {
|
||||
isArgument()
|
||||
}
|
||||
|
||||
// The Default value, when no width or precision is provided.
|
||||
type Default struct{}
|
||||
|
||||
// Zero is the implicit zero value.
|
||||
// This value may only appear for precisions in format strings like %6.f
|
||||
type Zero struct{}
|
||||
|
||||
// Star is a * value, which may either refer to the next argument (Index == -1) or an explicit argument.
|
||||
type Star struct{ Index int }
|
||||
|
||||
// A Literal value, such as 6 in %6d.
|
||||
type Literal int
|
||||
|
||||
func (Default) isArgument() {}
|
||||
func (Zero) isArgument() {}
|
||||
func (Star) isArgument() {}
|
||||
func (Literal) isArgument() {}
|
||||
|
||||
// Parse parses f and returns a list of actions.
|
||||
// An action may either be a literal string, or a Verb.
|
||||
func Parse(f string) ([]interface{}, error) {
|
||||
var out []interface{}
|
||||
for len(f) > 0 {
|
||||
if f[0] == '%' {
|
||||
v, n, err := ParseVerb(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f = f[n:]
|
||||
out = append(out, v)
|
||||
} else {
|
||||
n := strings.IndexByte(f, '%')
|
||||
if n > -1 {
|
||||
out = append(out, f[:n])
|
||||
f = f[n:]
|
||||
} else {
|
||||
out = append(out, f)
|
||||
f = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func atoi(s string) int {
|
||||
n, _ := strconv.Atoi(s)
|
||||
return n
|
||||
}
|
||||
|
||||
// ParseVerb parses the verb at the beginning of f.
|
||||
// It returns the verb, how much of the input was consumed, and an error, if any.
|
||||
func ParseVerb(f string) (Verb, int, error) {
|
||||
if len(f) < 2 {
|
||||
return Verb{}, 0, ErrInvalid
|
||||
}
|
||||
const (
|
||||
flags = 1
|
||||
|
||||
width = 2
|
||||
widthStar = 3
|
||||
widthIndex = 5
|
||||
|
||||
dot = 6
|
||||
prec = 7
|
||||
precStar = 8
|
||||
precIndex = 10
|
||||
|
||||
verbIndex = 11
|
||||
verb = 12
|
||||
)
|
||||
|
||||
m := re.FindStringSubmatch(f)
|
||||
if m == nil {
|
||||
return Verb{}, 0, ErrInvalid
|
||||
}
|
||||
|
||||
v := Verb{
|
||||
Letter: []rune(m[verb])[0],
|
||||
Flags: m[flags],
|
||||
Raw: m[0],
|
||||
}
|
||||
|
||||
if m[width] != "" {
|
||||
// Literal width
|
||||
v.Width = Literal(atoi(m[width]))
|
||||
} else if m[widthStar] != "" {
|
||||
// Star width
|
||||
if m[widthIndex] != "" {
|
||||
v.Width = Star{atoi(m[widthIndex])}
|
||||
} else {
|
||||
v.Width = Star{-1}
|
||||
}
|
||||
} else {
|
||||
// Default width
|
||||
v.Width = Default{}
|
||||
}
|
||||
|
||||
if m[dot] == "" {
|
||||
// default precision
|
||||
v.Precision = Default{}
|
||||
} else {
|
||||
if m[prec] != "" {
|
||||
// Literal precision
|
||||
v.Precision = Literal(atoi(m[prec]))
|
||||
} else if m[precStar] != "" {
|
||||
// Star precision
|
||||
if m[precIndex] != "" {
|
||||
v.Precision = Star{atoi(m[precIndex])}
|
||||
} else {
|
||||
v.Precision = Star{-1}
|
||||
}
|
||||
} else {
|
||||
// Zero precision
|
||||
v.Precision = Zero{}
|
||||
}
|
||||
}
|
||||
|
||||
if m[verb] == "%" {
|
||||
v.Value = 0
|
||||
} else if m[verbIndex] != "" {
|
||||
v.Value = atoi(m[verbIndex])
|
||||
} else {
|
||||
v.Value = -1
|
||||
}
|
||||
|
||||
return v, len(m[0]), nil
|
||||
}
|
||||
|
||||
const (
|
||||
flags = `([+#0 -]*)`
|
||||
verb = `([a-zA-Z%])`
|
||||
index = `(?:\[([0-9]+)\])`
|
||||
star = `((` + index + `)?\*)`
|
||||
width1 = `([0-9]+)`
|
||||
width2 = star
|
||||
width = `(?:` + width1 + `|` + width2 + `)`
|
||||
precision = width
|
||||
widthAndPrecision = `(?:(?:` + width + `)?(?:(\.)(?:` + precision + `)?)?)`
|
||||
)
|
||||
|
||||
var re = regexp.MustCompile(`^%` + flags + widthAndPrecision + `?` + index + `?` + verb)
|
223
vendor/honnef.co/go/tools/simple/analysis.go
vendored
Normal file
223
vendor/honnef.co/go/tools/simple/analysis.go
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
package simple
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/internal/passes/buildssa"
|
||||
"honnef.co/go/tools/lint/lintutil"
|
||||
)
|
||||
|
||||
func newFlagSet() flag.FlagSet {
|
||||
fs := flag.NewFlagSet("", flag.PanicOnError)
|
||||
fs.Var(lintutil.NewVersionFlag(), "go", "Target Go version")
|
||||
return *fs
|
||||
}
|
||||
|
||||
var Analyzers = map[string]*analysis.Analyzer{
|
||||
"S1000": {
|
||||
Name: "S1000",
|
||||
Run: LintSingleCaseSelect,
|
||||
Doc: Docs["S1000"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1001": {
|
||||
Name: "S1001",
|
||||
Run: LintLoopCopy,
|
||||
Doc: Docs["S1001"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1002": {
|
||||
Name: "S1002",
|
||||
Run: LintIfBoolCmp,
|
||||
Doc: Docs["S1002"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1003": {
|
||||
Name: "S1003",
|
||||
Run: LintStringsContains,
|
||||
Doc: Docs["S1003"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1004": {
|
||||
Name: "S1004",
|
||||
Run: LintBytesCompare,
|
||||
Doc: Docs["S1004"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1005": {
|
||||
Name: "S1005",
|
||||
Run: LintUnnecessaryBlank,
|
||||
Doc: Docs["S1005"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1006": {
|
||||
Name: "S1006",
|
||||
Run: LintForTrue,
|
||||
Doc: Docs["S1006"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1007": {
|
||||
Name: "S1007",
|
||||
Run: LintRegexpRaw,
|
||||
Doc: Docs["S1007"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1008": {
|
||||
Name: "S1008",
|
||||
Run: LintIfReturn,
|
||||
Doc: Docs["S1008"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1009": {
|
||||
Name: "S1009",
|
||||
Run: LintRedundantNilCheckWithLen,
|
||||
Doc: Docs["S1009"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1010": {
|
||||
Name: "S1010",
|
||||
Run: LintSlicing,
|
||||
Doc: Docs["S1010"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1011": {
|
||||
Name: "S1011",
|
||||
Run: LintLoopAppend,
|
||||
Doc: Docs["S1011"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1012": {
|
||||
Name: "S1012",
|
||||
Run: LintTimeSince,
|
||||
Doc: Docs["S1012"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1016": {
|
||||
Name: "S1016",
|
||||
Run: LintSimplerStructConversion,
|
||||
Doc: Docs["S1016"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1017": {
|
||||
Name: "S1017",
|
||||
Run: LintTrim,
|
||||
Doc: Docs["S1017"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1018": {
|
||||
Name: "S1018",
|
||||
Run: LintLoopSlide,
|
||||
Doc: Docs["S1018"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1019": {
|
||||
Name: "S1019",
|
||||
Run: LintMakeLenCap,
|
||||
Doc: Docs["S1019"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1020": {
|
||||
Name: "S1020",
|
||||
Run: LintAssertNotNil,
|
||||
Doc: Docs["S1020"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1021": {
|
||||
Name: "S1021",
|
||||
Run: LintDeclareAssign,
|
||||
Doc: Docs["S1021"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1023": {
|
||||
Name: "S1023",
|
||||
Run: LintRedundantBreak,
|
||||
Doc: Docs["S1023"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1024": {
|
||||
Name: "S1024",
|
||||
Run: LintTimeUntil,
|
||||
Doc: Docs["S1024"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1025": {
|
||||
Name: "S1025",
|
||||
Run: LintRedundantSprintf,
|
||||
Doc: Docs["S1025"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1028": {
|
||||
Name: "S1028",
|
||||
Run: LintErrorsNewSprintf,
|
||||
Doc: Docs["S1028"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1029": {
|
||||
Name: "S1029",
|
||||
Run: LintRangeStringRunes,
|
||||
Doc: Docs["S1029"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1030": {
|
||||
Name: "S1030",
|
||||
Run: LintBytesBufferConversions,
|
||||
Doc: Docs["S1030"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1031": {
|
||||
Name: "S1031",
|
||||
Run: LintNilCheckAroundRange,
|
||||
Doc: Docs["S1031"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1032": {
|
||||
Name: "S1032",
|
||||
Run: LintSortHelpers,
|
||||
Doc: Docs["S1032"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1033": {
|
||||
Name: "S1033",
|
||||
Run: LintGuardedDelete,
|
||||
Doc: Docs["S1033"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"S1034": {
|
||||
Name: "S1034",
|
||||
Run: LintSimplifyTypeSwitch,
|
||||
Doc: Docs["S1034"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
}
|
425
vendor/honnef.co/go/tools/simple/doc.go
vendored
Normal file
425
vendor/honnef.co/go/tools/simple/doc.go
vendored
Normal file
@ -0,0 +1,425 @@
|
||||
package simple
|
||||
|
||||
import "honnef.co/go/tools/lint"
|
||||
|
||||
var Docs = map[string]*lint.Documentation{
|
||||
"S1000": &lint.Documentation{
|
||||
Title: `Use plain channel send or receive instead of single-case select`,
|
||||
Text: `Select statements with a single case can be replaced with a simple
|
||||
send or receive.
|
||||
|
||||
Before:
|
||||
|
||||
select {
|
||||
case x := <-ch:
|
||||
fmt.Println(x)
|
||||
}
|
||||
|
||||
After:
|
||||
|
||||
x := <-ch
|
||||
fmt.Println(x)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1001": &lint.Documentation{
|
||||
Title: `Replace for loop with call to copy`,
|
||||
Text: `Use copy() for copying elements from one slice to another.
|
||||
|
||||
Before:
|
||||
|
||||
for i, x := range src {
|
||||
dst[i] = x
|
||||
}
|
||||
|
||||
After:
|
||||
|
||||
copy(dst, src)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1002": &lint.Documentation{
|
||||
Title: `Omit comparison with boolean constant`,
|
||||
Text: `Before:
|
||||
|
||||
if x == true {}
|
||||
|
||||
After:
|
||||
|
||||
if x {}`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1003": &lint.Documentation{
|
||||
Title: `Replace call to strings.Index with strings.Contains`,
|
||||
Text: `Before:
|
||||
|
||||
if strings.Index(x, y) != -1 {}
|
||||
|
||||
After:
|
||||
|
||||
if strings.Contains(x, y) {}`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1004": &lint.Documentation{
|
||||
Title: `Replace call to bytes.Compare with bytes.Equal`,
|
||||
Text: `Before:
|
||||
|
||||
if bytes.Compare(x, y) == 0 {}
|
||||
|
||||
After:
|
||||
|
||||
if bytes.Equal(x, y) {}`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1005": &lint.Documentation{
|
||||
Title: `Drop unnecessary use of the blank identifier`,
|
||||
Text: `In many cases, assigning to the blank identifier is unnecessary.
|
||||
|
||||
Before:
|
||||
|
||||
for _ = range s {}
|
||||
x, _ = someMap[key]
|
||||
_ = <-ch
|
||||
|
||||
After:
|
||||
|
||||
for range s{}
|
||||
x = someMap[key]
|
||||
<-ch`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1006": &lint.Documentation{
|
||||
Title: `Use for { ... } for infinite loops`,
|
||||
Text: `For infinite loops, using for { ... } is the most idiomatic choice.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1007": &lint.Documentation{
|
||||
Title: `Simplify regular expression by using raw string literal`,
|
||||
Text: `Raw string literals use ` + "`" + ` instead of " and do not support
|
||||
any escape sequences. This means that the backslash (\) can be used
|
||||
freely, without the need of escaping.
|
||||
|
||||
Since regular expressions have their own escape sequences, raw strings
|
||||
can improve their readability.
|
||||
|
||||
Before:
|
||||
|
||||
regexp.Compile("\\A(\\w+) profile: total \\d+\\n\\z")
|
||||
|
||||
After:
|
||||
|
||||
regexp.Compile(` + "`" + `\A(\w+) profile: total \d+\n\z` + "`" + `)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1008": &lint.Documentation{
|
||||
Title: `Simplify returning boolean expression`,
|
||||
Text: `Before:
|
||||
|
||||
if <expr> {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
After:
|
||||
|
||||
return <expr>`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1009": &lint.Documentation{
|
||||
Title: `Omit redundant nil check on slices`,
|
||||
Text: `The len function is defined for all slices, even nil ones, which have
|
||||
a length of zero. It is not necessary to check if a slice is not nil
|
||||
before checking that its length is not zero.
|
||||
|
||||
Before:
|
||||
|
||||
if x != nil && len(x) != 0 {}
|
||||
|
||||
After:
|
||||
|
||||
if len(x) != 0 {}`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1010": &lint.Documentation{
|
||||
Title: `Omit default slice index`,
|
||||
Text: `When slicing, the second index defaults to the length of the value,
|
||||
making s[n:len(s)] and s[n:] equivalent.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1011": &lint.Documentation{
|
||||
Title: `Use a single append to concatenate two slices`,
|
||||
Text: `Before:
|
||||
|
||||
for _, e := range y {
|
||||
x = append(x, e)
|
||||
}
|
||||
|
||||
After:
|
||||
|
||||
x = append(x, y...)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1012": &lint.Documentation{
|
||||
Title: `Replace time.Now().Sub(x) with time.Since(x)`,
|
||||
Text: `The time.Since helper has the same effect as using time.Now().Sub(x)
|
||||
but is easier to read.
|
||||
|
||||
Before:
|
||||
|
||||
time.Now().Sub(x)
|
||||
|
||||
After:
|
||||
|
||||
time.Since(x)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1016": &lint.Documentation{
|
||||
Title: `Use a type conversion instead of manually copying struct fields`,
|
||||
Text: `Two struct types with identical fields can be converted between each
|
||||
other. In older versions of Go, the fields had to have identical
|
||||
struct tags. Since Go 1.8, however, struct tags are ignored during
|
||||
conversions. It is thus not necessary to manually copy every field
|
||||
individually.
|
||||
|
||||
Before:
|
||||
|
||||
var x T1
|
||||
y := T2{
|
||||
Field1: x.Field1,
|
||||
Field2: x.Field2,
|
||||
}
|
||||
|
||||
After:
|
||||
|
||||
var x T1
|
||||
y := T2(x)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1017": &lint.Documentation{
|
||||
Title: `Replace manual trimming with strings.TrimPrefix`,
|
||||
Text: `Instead of using strings.HasPrefix and manual slicing, use the
|
||||
strings.TrimPrefix function. If the string doesn't start with the
|
||||
prefix, the original string will be returned. Using strings.TrimPrefix
|
||||
reduces complexity, and avoids common bugs, such as off-by-one
|
||||
mistakes.
|
||||
|
||||
Before:
|
||||
|
||||
if strings.HasPrefix(str, prefix) {
|
||||
str = str[len(prefix):]
|
||||
}
|
||||
|
||||
After:
|
||||
|
||||
str = strings.TrimPrefix(str, prefix)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1018": &lint.Documentation{
|
||||
Title: `Use copy for sliding elements`,
|
||||
Text: `copy() permits using the same source and destination slice, even with
|
||||
overlapping ranges. This makes it ideal for sliding elements in a
|
||||
slice.
|
||||
|
||||
Before:
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
bs[i] = bs[offset+i]
|
||||
}
|
||||
|
||||
After:
|
||||
|
||||
copy(bs[:n], bs[offset:])`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1019": &lint.Documentation{
|
||||
Title: `Simplify make call by omitting redundant arguments`,
|
||||
Text: `The make function has default values for the length and capacity
|
||||
arguments. For channels and maps, the length defaults to zero.
|
||||
Additionally, for slices the capacity defaults to the length.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1020": &lint.Documentation{
|
||||
Title: `Omit redundant nil check in type assertion`,
|
||||
Text: `Before:
|
||||
|
||||
if _, ok := i.(T); ok && i != nil {}
|
||||
|
||||
After:
|
||||
|
||||
if _, ok := i.(T); ok {}`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1021": &lint.Documentation{
|
||||
Title: `Merge variable declaration and assignment`,
|
||||
Text: `Before:
|
||||
|
||||
var x uint
|
||||
x = 1
|
||||
|
||||
After:
|
||||
|
||||
var x uint = 1`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1023": &lint.Documentation{
|
||||
Title: `Omit redundant control flow`,
|
||||
Text: `Functions that have no return value do not need a return statement as
|
||||
the final statement of the function.
|
||||
|
||||
Switches in Go do not have automatic fallthrough, unlike languages
|
||||
like C. It is not necessary to have a break statement as the final
|
||||
statement in a case block.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1024": &lint.Documentation{
|
||||
Title: `Replace x.Sub(time.Now()) with time.Until(x)`,
|
||||
Text: `The time.Until helper has the same effect as using x.Sub(time.Now())
|
||||
but is easier to read.
|
||||
|
||||
Before:
|
||||
|
||||
x.Sub(time.Now())
|
||||
|
||||
After:
|
||||
|
||||
time.Until(x)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1025": &lint.Documentation{
|
||||
Title: `Don't use fmt.Sprintf("%s", x) unnecessarily`,
|
||||
Text: `In many instances, there are easier and more efficient ways of getting
|
||||
a value's string representation. Whenever a value's underlying type is
|
||||
a string already, or the type has a String method, they should be used
|
||||
directly.
|
||||
|
||||
Given the following shared definitions
|
||||
|
||||
type T1 string
|
||||
type T2 int
|
||||
|
||||
func (T2) String() string { return "Hello, world" }
|
||||
|
||||
var x string
|
||||
var y T1
|
||||
var z T2
|
||||
|
||||
we can simplify the following
|
||||
|
||||
fmt.Sprintf("%s", x)
|
||||
fmt.Sprintf("%s", y)
|
||||
fmt.Sprintf("%s", z)
|
||||
|
||||
to
|
||||
|
||||
x
|
||||
string(y)
|
||||
z.String()`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1028": &lint.Documentation{
|
||||
Title: `Simplify error construction with fmt.Errorf`,
|
||||
Text: `Before:
|
||||
|
||||
errors.New(fmt.Sprintf(...))
|
||||
|
||||
After:
|
||||
|
||||
fmt.Errorf(...)`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1029": &lint.Documentation{
|
||||
Title: `Range over the string directly`,
|
||||
Text: `Ranging over a string will yield byte offsets and runes. If the offset
|
||||
isn't used, this is functionally equivalent to converting the string
|
||||
to a slice of runes and ranging over that. Ranging directly over the
|
||||
string will be more performant, however, as it avoids allocating a new
|
||||
slice, the size of which depends on the length of the string.
|
||||
|
||||
Before:
|
||||
|
||||
for _, r := range []rune(s) {}
|
||||
|
||||
After:
|
||||
|
||||
for _, r := range s {}`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1030": &lint.Documentation{
|
||||
Title: `Use bytes.Buffer.String or bytes.Buffer.Bytes`,
|
||||
Text: `bytes.Buffer has both a String and a Bytes method. It is never
|
||||
necessary to use string(buf.Bytes()) or []byte(buf.String()) – simply
|
||||
use the other method.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1031": &lint.Documentation{
|
||||
Title: `Omit redundant nil check around loop`,
|
||||
Text: `You can use range on nil slices and maps, the loop will simply never
|
||||
execute. This makes an additional nil check around the loop
|
||||
unnecessary.
|
||||
|
||||
Before:
|
||||
|
||||
if s != nil {
|
||||
for _, x := range s {
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
After:
|
||||
|
||||
for _, x := range s {
|
||||
...
|
||||
}`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"S1032": &lint.Documentation{
|
||||
Title: `Use sort.Ints(x), sort.Float64s(x), and sort.Strings(x)`,
|
||||
Text: `The sort.Ints, sort.Float64s and sort.Strings functions are easier to
|
||||
read than sort.Sort(sort.IntSlice(x)), sort.Sort(sort.Float64Slice(x))
|
||||
and sort.Sort(sort.StringSlice(x)).
|
||||
|
||||
Before:
|
||||
|
||||
sort.Sort(sort.StringSlice(x))
|
||||
|
||||
After:
|
||||
|
||||
sort.Strings(x)`,
|
||||
Since: "2019.1",
|
||||
},
|
||||
|
||||
"S1033": &lint.Documentation{
|
||||
Title: `Unnecessary guard around call to delete`,
|
||||
Text: `Calling delete on a nil map is a no-op.`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"S1034": &lint.Documentation{
|
||||
Title: `Use result of type assertion to simplify cases`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
}
|
1816
vendor/honnef.co/go/tools/simple/lint.go
vendored
Normal file
1816
vendor/honnef.co/go/tools/simple/lint.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
28
vendor/honnef.co/go/tools/ssa/LICENSE
vendored
Normal file
28
vendor/honnef.co/go/tools/ssa/LICENSE
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2016 Dominik Honnef. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
195
vendor/honnef.co/go/tools/ssa/blockopt.go
vendored
Normal file
195
vendor/honnef.co/go/tools/ssa/blockopt.go
vendored
Normal file
@ -0,0 +1,195 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// Simple block optimizations to simplify the control flow graph.
|
||||
|
||||
// TODO(adonovan): opt: instead of creating several "unreachable" blocks
|
||||
// per function in the Builder, reuse a single one (e.g. at Blocks[1])
|
||||
// to reduce garbage.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// If true, perform sanity checking and show progress at each
|
||||
// successive iteration of optimizeBlocks. Very verbose.
|
||||
const debugBlockOpt = false
|
||||
|
||||
// markReachable sets Index=-1 for all blocks reachable from b.
|
||||
func markReachable(b *BasicBlock) {
|
||||
b.Index = -1
|
||||
for _, succ := range b.Succs {
|
||||
if succ.Index == 0 {
|
||||
markReachable(succ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteUnreachableBlocks(f *Function) {
|
||||
deleteUnreachableBlocks(f)
|
||||
}
|
||||
|
||||
// deleteUnreachableBlocks marks all reachable blocks of f and
|
||||
// eliminates (nils) all others, including possibly cyclic subgraphs.
|
||||
//
|
||||
func deleteUnreachableBlocks(f *Function) {
|
||||
const white, black = 0, -1
|
||||
// We borrow b.Index temporarily as the mark bit.
|
||||
for _, b := range f.Blocks {
|
||||
b.Index = white
|
||||
}
|
||||
markReachable(f.Blocks[0])
|
||||
if f.Recover != nil {
|
||||
markReachable(f.Recover)
|
||||
}
|
||||
for i, b := range f.Blocks {
|
||||
if b.Index == white {
|
||||
for _, c := range b.Succs {
|
||||
if c.Index == black {
|
||||
c.removePred(b) // delete white->black edge
|
||||
}
|
||||
}
|
||||
if debugBlockOpt {
|
||||
fmt.Fprintln(os.Stderr, "unreachable", b)
|
||||
}
|
||||
f.Blocks[i] = nil // delete b
|
||||
}
|
||||
}
|
||||
f.removeNilBlocks()
|
||||
}
|
||||
|
||||
// jumpThreading attempts to apply simple jump-threading to block b,
|
||||
// in which a->b->c become a->c if b is just a Jump.
|
||||
// The result is true if the optimization was applied.
|
||||
//
|
||||
func jumpThreading(f *Function, b *BasicBlock) bool {
|
||||
if b.Index == 0 {
|
||||
return false // don't apply to entry block
|
||||
}
|
||||
if b.Instrs == nil {
|
||||
return false
|
||||
}
|
||||
if _, ok := b.Instrs[0].(*Jump); !ok {
|
||||
return false // not just a jump
|
||||
}
|
||||
c := b.Succs[0]
|
||||
if c == b {
|
||||
return false // don't apply to degenerate jump-to-self.
|
||||
}
|
||||
if c.hasPhi() {
|
||||
return false // not sound without more effort
|
||||
}
|
||||
for j, a := range b.Preds {
|
||||
a.replaceSucc(b, c)
|
||||
|
||||
// If a now has two edges to c, replace its degenerate If by Jump.
|
||||
if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c {
|
||||
jump := new(Jump)
|
||||
jump.setBlock(a)
|
||||
a.Instrs[len(a.Instrs)-1] = jump
|
||||
a.Succs = a.Succs[:1]
|
||||
c.removePred(b)
|
||||
} else {
|
||||
if j == 0 {
|
||||
c.replacePred(b, a)
|
||||
} else {
|
||||
c.Preds = append(c.Preds, a)
|
||||
}
|
||||
}
|
||||
|
||||
if debugBlockOpt {
|
||||
fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c)
|
||||
}
|
||||
}
|
||||
f.Blocks[b.Index] = nil // delete b
|
||||
return true
|
||||
}
|
||||
|
||||
// fuseBlocks attempts to apply the block fusion optimization to block
|
||||
// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1.
|
||||
// The result is true if the optimization was applied.
|
||||
//
|
||||
func fuseBlocks(f *Function, a *BasicBlock) bool {
|
||||
if len(a.Succs) != 1 {
|
||||
return false
|
||||
}
|
||||
b := a.Succs[0]
|
||||
if len(b.Preds) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Degenerate &&/|| ops may result in a straight-line CFG
|
||||
// containing φ-nodes. (Ideally we'd replace such them with
|
||||
// their sole operand but that requires Referrers, built later.)
|
||||
if b.hasPhi() {
|
||||
return false // not sound without further effort
|
||||
}
|
||||
|
||||
// Eliminate jump at end of A, then copy all of B across.
|
||||
a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...)
|
||||
for _, instr := range b.Instrs {
|
||||
instr.setBlock(a)
|
||||
}
|
||||
|
||||
// A inherits B's successors
|
||||
a.Succs = append(a.succs2[:0], b.Succs...)
|
||||
|
||||
// Fix up Preds links of all successors of B.
|
||||
for _, c := range b.Succs {
|
||||
c.replacePred(b, a)
|
||||
}
|
||||
|
||||
if debugBlockOpt {
|
||||
fmt.Fprintln(os.Stderr, "fuseBlocks", a, b)
|
||||
}
|
||||
|
||||
f.Blocks[b.Index] = nil // delete b
|
||||
return true
|
||||
}
|
||||
|
||||
func OptimizeBlocks(f *Function) {
|
||||
optimizeBlocks(f)
|
||||
}
|
||||
|
||||
// optimizeBlocks() performs some simple block optimizations on a
|
||||
// completed function: dead block elimination, block fusion, jump
|
||||
// threading.
|
||||
//
|
||||
func optimizeBlocks(f *Function) {
|
||||
deleteUnreachableBlocks(f)
|
||||
|
||||
// Loop until no further progress.
|
||||
changed := true
|
||||
for changed {
|
||||
changed = false
|
||||
|
||||
if debugBlockOpt {
|
||||
f.WriteTo(os.Stderr)
|
||||
mustSanityCheck(f, nil)
|
||||
}
|
||||
|
||||
for _, b := range f.Blocks {
|
||||
// f.Blocks will temporarily contain nils to indicate
|
||||
// deleted blocks; we remove them at the end.
|
||||
if b == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Fuse blocks. b->c becomes bc.
|
||||
if fuseBlocks(f, b) {
|
||||
changed = true
|
||||
}
|
||||
|
||||
// a->b->c becomes a->c if b contains only a Jump.
|
||||
if jumpThreading(f, b) {
|
||||
changed = true
|
||||
continue // (b was disconnected)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.removeNilBlocks()
|
||||
}
|
2379
vendor/honnef.co/go/tools/ssa/builder.go
vendored
Normal file
2379
vendor/honnef.co/go/tools/ssa/builder.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
169
vendor/honnef.co/go/tools/ssa/const.go
vendored
Normal file
169
vendor/honnef.co/go/tools/ssa/const.go
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines the Const SSA value type.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// NewConst returns a new constant of the specified value and type.
|
||||
// val must be valid according to the specification of Const.Value.
|
||||
//
|
||||
func NewConst(val constant.Value, typ types.Type) *Const {
|
||||
return &Const{typ, val}
|
||||
}
|
||||
|
||||
// intConst returns an 'int' constant that evaluates to i.
|
||||
// (i is an int64 in case the host is narrower than the target.)
|
||||
func intConst(i int64) *Const {
|
||||
return NewConst(constant.MakeInt64(i), tInt)
|
||||
}
|
||||
|
||||
// nilConst returns a nil constant of the specified type, which may
|
||||
// be any reference type, including interfaces.
|
||||
//
|
||||
func nilConst(typ types.Type) *Const {
|
||||
return NewConst(nil, typ)
|
||||
}
|
||||
|
||||
// stringConst returns a 'string' constant that evaluates to s.
|
||||
func stringConst(s string) *Const {
|
||||
return NewConst(constant.MakeString(s), tString)
|
||||
}
|
||||
|
||||
// zeroConst returns a new "zero" constant of the specified type,
|
||||
// which must not be an array or struct type: the zero values of
|
||||
// aggregates are well-defined but cannot be represented by Const.
|
||||
//
|
||||
func zeroConst(t types.Type) *Const {
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
switch {
|
||||
case t.Info()&types.IsBoolean != 0:
|
||||
return NewConst(constant.MakeBool(false), t)
|
||||
case t.Info()&types.IsNumeric != 0:
|
||||
return NewConst(constant.MakeInt64(0), t)
|
||||
case t.Info()&types.IsString != 0:
|
||||
return NewConst(constant.MakeString(""), t)
|
||||
case t.Kind() == types.UnsafePointer:
|
||||
fallthrough
|
||||
case t.Kind() == types.UntypedNil:
|
||||
return nilConst(t)
|
||||
default:
|
||||
panic(fmt.Sprint("zeroConst for unexpected type:", t))
|
||||
}
|
||||
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
|
||||
return nilConst(t)
|
||||
case *types.Named:
|
||||
return NewConst(zeroConst(t.Underlying()).Value, t)
|
||||
case *types.Array, *types.Struct, *types.Tuple:
|
||||
panic(fmt.Sprint("zeroConst applied to aggregate:", t))
|
||||
}
|
||||
panic(fmt.Sprint("zeroConst: unexpected ", t))
|
||||
}
|
||||
|
||||
func (c *Const) RelString(from *types.Package) string {
|
||||
var s string
|
||||
if c.Value == nil {
|
||||
s = "nil"
|
||||
} else if c.Value.Kind() == constant.String {
|
||||
s = constant.StringVal(c.Value)
|
||||
const max = 20
|
||||
// TODO(adonovan): don't cut a rune in half.
|
||||
if len(s) > max {
|
||||
s = s[:max-3] + "..." // abbreviate
|
||||
}
|
||||
s = strconv.Quote(s)
|
||||
} else {
|
||||
s = c.Value.String()
|
||||
}
|
||||
return s + ":" + relType(c.Type(), from)
|
||||
}
|
||||
|
||||
func (c *Const) Name() string {
|
||||
return c.RelString(nil)
|
||||
}
|
||||
|
||||
func (c *Const) String() string {
|
||||
return c.Name()
|
||||
}
|
||||
|
||||
func (c *Const) Type() types.Type {
|
||||
return c.typ
|
||||
}
|
||||
|
||||
func (c *Const) Referrers() *[]Instruction {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Const) Parent() *Function { return nil }
|
||||
|
||||
func (c *Const) Pos() token.Pos {
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
// IsNil returns true if this constant represents a typed or untyped nil value.
|
||||
func (c *Const) IsNil() bool {
|
||||
return c.Value == nil
|
||||
}
|
||||
|
||||
// TODO(adonovan): move everything below into honnef.co/go/tools/ssa/interp.
|
||||
|
||||
// Int64 returns the numeric value of this constant truncated to fit
|
||||
// a signed 64-bit integer.
|
||||
//
|
||||
func (c *Const) Int64() int64 {
|
||||
switch x := constant.ToInt(c.Value); x.Kind() {
|
||||
case constant.Int:
|
||||
if i, ok := constant.Int64Val(x); ok {
|
||||
return i
|
||||
}
|
||||
return 0
|
||||
case constant.Float:
|
||||
f, _ := constant.Float64Val(x)
|
||||
return int64(f)
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
|
||||
}
|
||||
|
||||
// Uint64 returns the numeric value of this constant truncated to fit
|
||||
// an unsigned 64-bit integer.
|
||||
//
|
||||
func (c *Const) Uint64() uint64 {
|
||||
switch x := constant.ToInt(c.Value); x.Kind() {
|
||||
case constant.Int:
|
||||
if u, ok := constant.Uint64Val(x); ok {
|
||||
return u
|
||||
}
|
||||
return 0
|
||||
case constant.Float:
|
||||
f, _ := constant.Float64Val(x)
|
||||
return uint64(f)
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
|
||||
}
|
||||
|
||||
// Float64 returns the numeric value of this constant truncated to fit
|
||||
// a float64.
|
||||
//
|
||||
func (c *Const) Float64() float64 {
|
||||
f, _ := constant.Float64Val(c.Value)
|
||||
return f
|
||||
}
|
||||
|
||||
// Complex128 returns the complex value of this constant truncated to
|
||||
// fit a complex128.
|
||||
//
|
||||
func (c *Const) Complex128() complex128 {
|
||||
re, _ := constant.Float64Val(constant.Real(c.Value))
|
||||
im, _ := constant.Float64Val(constant.Imag(c.Value))
|
||||
return complex(re, im)
|
||||
}
|
270
vendor/honnef.co/go/tools/ssa/create.go
vendored
Normal file
270
vendor/honnef.co/go/tools/ssa/create.go
vendored
Normal file
@ -0,0 +1,270 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file implements the CREATE phase of SSA construction.
|
||||
// See builder.go for explanation.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
// NewProgram returns a new SSA Program.
|
||||
//
|
||||
// mode controls diagnostics and checking during SSA construction.
|
||||
//
|
||||
func NewProgram(fset *token.FileSet, mode BuilderMode) *Program {
|
||||
prog := &Program{
|
||||
Fset: fset,
|
||||
imported: make(map[string]*Package),
|
||||
packages: make(map[*types.Package]*Package),
|
||||
thunks: make(map[selectionKey]*Function),
|
||||
bounds: make(map[*types.Func]*Function),
|
||||
mode: mode,
|
||||
}
|
||||
|
||||
h := typeutil.MakeHasher() // protected by methodsMu, in effect
|
||||
prog.methodSets.SetHasher(h)
|
||||
prog.canon.SetHasher(h)
|
||||
|
||||
return prog
|
||||
}
|
||||
|
||||
// memberFromObject populates package pkg with a member for the
|
||||
// typechecker object obj.
|
||||
//
|
||||
// For objects from Go source code, syntax is the associated syntax
|
||||
// tree (for funcs and vars only); it will be used during the build
|
||||
// phase.
|
||||
//
|
||||
func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
|
||||
name := obj.Name()
|
||||
switch obj := obj.(type) {
|
||||
case *types.Builtin:
|
||||
if pkg.Pkg != types.Unsafe {
|
||||
panic("unexpected builtin object: " + obj.String())
|
||||
}
|
||||
|
||||
case *types.TypeName:
|
||||
pkg.Members[name] = &Type{
|
||||
object: obj,
|
||||
pkg: pkg,
|
||||
}
|
||||
|
||||
case *types.Const:
|
||||
c := &NamedConst{
|
||||
object: obj,
|
||||
Value: NewConst(obj.Val(), obj.Type()),
|
||||
pkg: pkg,
|
||||
}
|
||||
pkg.values[obj] = c.Value
|
||||
pkg.Members[name] = c
|
||||
|
||||
case *types.Var:
|
||||
g := &Global{
|
||||
Pkg: pkg,
|
||||
name: name,
|
||||
object: obj,
|
||||
typ: types.NewPointer(obj.Type()), // address
|
||||
pos: obj.Pos(),
|
||||
}
|
||||
pkg.values[obj] = g
|
||||
pkg.Members[name] = g
|
||||
|
||||
case *types.Func:
|
||||
sig := obj.Type().(*types.Signature)
|
||||
if sig.Recv() == nil && name == "init" {
|
||||
pkg.ninit++
|
||||
name = fmt.Sprintf("init#%d", pkg.ninit)
|
||||
}
|
||||
fn := &Function{
|
||||
name: name,
|
||||
object: obj,
|
||||
Signature: sig,
|
||||
syntax: syntax,
|
||||
pos: obj.Pos(),
|
||||
Pkg: pkg,
|
||||
Prog: pkg.Prog,
|
||||
}
|
||||
if syntax == nil {
|
||||
fn.Synthetic = "loaded from gc object file"
|
||||
}
|
||||
|
||||
pkg.values[obj] = fn
|
||||
if sig.Recv() == nil {
|
||||
pkg.Members[name] = fn // package-level function
|
||||
}
|
||||
|
||||
default: // (incl. *types.Package)
|
||||
panic("unexpected Object type: " + obj.String())
|
||||
}
|
||||
}
|
||||
|
||||
// membersFromDecl populates package pkg with members for each
|
||||
// typechecker object (var, func, const or type) associated with the
|
||||
// specified decl.
|
||||
//
|
||||
func membersFromDecl(pkg *Package, decl ast.Decl) {
|
||||
switch decl := decl.(type) {
|
||||
case *ast.GenDecl: // import, const, type or var
|
||||
switch decl.Tok {
|
||||
case token.CONST:
|
||||
for _, spec := range decl.Specs {
|
||||
for _, id := range spec.(*ast.ValueSpec).Names {
|
||||
if !isBlankIdent(id) {
|
||||
memberFromObject(pkg, pkg.info.Defs[id], nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case token.VAR:
|
||||
for _, spec := range decl.Specs {
|
||||
for _, id := range spec.(*ast.ValueSpec).Names {
|
||||
if !isBlankIdent(id) {
|
||||
memberFromObject(pkg, pkg.info.Defs[id], spec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case token.TYPE:
|
||||
for _, spec := range decl.Specs {
|
||||
id := spec.(*ast.TypeSpec).Name
|
||||
if !isBlankIdent(id) {
|
||||
memberFromObject(pkg, pkg.info.Defs[id], nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.FuncDecl:
|
||||
id := decl.Name
|
||||
if !isBlankIdent(id) {
|
||||
memberFromObject(pkg, pkg.info.Defs[id], decl)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CreatePackage constructs and returns an SSA Package from the
|
||||
// specified type-checked, error-free file ASTs, and populates its
|
||||
// Members mapping.
|
||||
//
|
||||
// importable determines whether this package should be returned by a
|
||||
// subsequent call to ImportedPackage(pkg.Path()).
|
||||
//
|
||||
// The real work of building SSA form for each function is not done
|
||||
// until a subsequent call to Package.Build().
|
||||
//
|
||||
func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package {
|
||||
p := &Package{
|
||||
Prog: prog,
|
||||
Members: make(map[string]Member),
|
||||
values: make(map[types.Object]Value),
|
||||
Pkg: pkg,
|
||||
info: info, // transient (CREATE and BUILD phases)
|
||||
files: files, // transient (CREATE and BUILD phases)
|
||||
}
|
||||
|
||||
// Add init() function.
|
||||
p.init = &Function{
|
||||
name: "init",
|
||||
Signature: new(types.Signature),
|
||||
Synthetic: "package initializer",
|
||||
Pkg: p,
|
||||
Prog: prog,
|
||||
}
|
||||
p.Members[p.init.name] = p.init
|
||||
|
||||
// CREATE phase.
|
||||
// Allocate all package members: vars, funcs, consts and types.
|
||||
if len(files) > 0 {
|
||||
// Go source package.
|
||||
for _, file := range files {
|
||||
for _, decl := range file.Decls {
|
||||
membersFromDecl(p, decl)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// GC-compiled binary package (or "unsafe")
|
||||
// No code.
|
||||
// No position information.
|
||||
scope := p.Pkg.Scope()
|
||||
for _, name := range scope.Names() {
|
||||
obj := scope.Lookup(name)
|
||||
memberFromObject(p, obj, nil)
|
||||
if obj, ok := obj.(*types.TypeName); ok {
|
||||
if named, ok := obj.Type().(*types.Named); ok {
|
||||
for i, n := 0, named.NumMethods(); i < n; i++ {
|
||||
memberFromObject(p, named.Method(i), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if prog.mode&BareInits == 0 {
|
||||
// Add initializer guard variable.
|
||||
initguard := &Global{
|
||||
Pkg: p,
|
||||
name: "init$guard",
|
||||
typ: types.NewPointer(tBool),
|
||||
}
|
||||
p.Members[initguard.Name()] = initguard
|
||||
}
|
||||
|
||||
if prog.mode&GlobalDebug != 0 {
|
||||
p.SetDebugMode(true)
|
||||
}
|
||||
|
||||
if prog.mode&PrintPackages != 0 {
|
||||
printMu.Lock()
|
||||
p.WriteTo(os.Stdout)
|
||||
printMu.Unlock()
|
||||
}
|
||||
|
||||
if importable {
|
||||
prog.imported[p.Pkg.Path()] = p
|
||||
}
|
||||
prog.packages[p.Pkg] = p
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// printMu serializes printing of Packages/Functions to stdout.
|
||||
var printMu sync.Mutex
|
||||
|
||||
// AllPackages returns a new slice containing all packages in the
|
||||
// program prog in unspecified order.
|
||||
//
|
||||
func (prog *Program) AllPackages() []*Package {
|
||||
pkgs := make([]*Package, 0, len(prog.packages))
|
||||
for _, pkg := range prog.packages {
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
return pkgs
|
||||
}
|
||||
|
||||
// ImportedPackage returns the importable Package whose PkgPath
|
||||
// is path, or nil if no such Package has been created.
|
||||
//
|
||||
// A parameter to CreatePackage determines whether a package should be
|
||||
// considered importable. For example, no import declaration can resolve
|
||||
// to the ad-hoc main package created by 'go build foo.go'.
|
||||
//
|
||||
// TODO(adonovan): rethink this function and the "importable" concept;
|
||||
// most packages are importable. This function assumes that all
|
||||
// types.Package.Path values are unique within the ssa.Program, which is
|
||||
// false---yet this function remains very convenient.
|
||||
// Clients should use (*Program).Package instead where possible.
|
||||
// SSA doesn't really need a string-keyed map of packages.
|
||||
//
|
||||
func (prog *Program) ImportedPackage(path string) *Package {
|
||||
return prog.imported[path]
|
||||
}
|
125
vendor/honnef.co/go/tools/ssa/doc.go
vendored
Normal file
125
vendor/honnef.co/go/tools/ssa/doc.go
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ssa defines a representation of the elements of Go programs
|
||||
// (packages, types, functions, variables and constants) using a
|
||||
// static single-assignment (SSA) form intermediate representation
|
||||
// (IR) for the bodies of functions.
|
||||
//
|
||||
// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
|
||||
//
|
||||
// For an introduction to SSA form, see
|
||||
// http://en.wikipedia.org/wiki/Static_single_assignment_form.
|
||||
// This page provides a broader reading list:
|
||||
// http://www.dcs.gla.ac.uk/~jsinger/ssa.html.
|
||||
//
|
||||
// The level of abstraction of the SSA form is intentionally close to
|
||||
// the source language to facilitate construction of source analysis
|
||||
// tools. It is not intended for machine code generation.
|
||||
//
|
||||
// All looping, branching and switching constructs are replaced with
|
||||
// unstructured control flow. Higher-level control flow constructs
|
||||
// such as multi-way branch can be reconstructed as needed; see
|
||||
// ssautil.Switches() for an example.
|
||||
//
|
||||
// The simplest way to create the SSA representation of a package is
|
||||
// to load typed syntax trees using golang.org/x/tools/go/packages, then
|
||||
// invoke the ssautil.Packages helper function. See ExampleLoadPackages
|
||||
// and ExampleWholeProgram for examples.
|
||||
// The resulting ssa.Program contains all the packages and their
|
||||
// members, but SSA code is not created for function bodies until a
|
||||
// subsequent call to (*Package).Build or (*Program).Build.
|
||||
//
|
||||
// The builder initially builds a naive SSA form in which all local
|
||||
// variables are addresses of stack locations with explicit loads and
|
||||
// stores. Registerisation of eligible locals and φ-node insertion
|
||||
// using dominance and dataflow are then performed as a second pass
|
||||
// called "lifting" to improve the accuracy and performance of
|
||||
// subsequent analyses; this pass can be skipped by setting the
|
||||
// NaiveForm builder flag.
|
||||
//
|
||||
// The primary interfaces of this package are:
|
||||
//
|
||||
// - Member: a named member of a Go package.
|
||||
// - Value: an expression that yields a value.
|
||||
// - Instruction: a statement that consumes values and performs computation.
|
||||
// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph)
|
||||
//
|
||||
// A computation that yields a result implements both the Value and
|
||||
// Instruction interfaces. The following table shows for each
|
||||
// concrete type which of these interfaces it implements.
|
||||
//
|
||||
// Value? Instruction? Member?
|
||||
// *Alloc ✔ ✔
|
||||
// *BinOp ✔ ✔
|
||||
// *Builtin ✔
|
||||
// *Call ✔ ✔
|
||||
// *ChangeInterface ✔ ✔
|
||||
// *ChangeType ✔ ✔
|
||||
// *Const ✔
|
||||
// *Convert ✔ ✔
|
||||
// *DebugRef ✔
|
||||
// *Defer ✔
|
||||
// *Extract ✔ ✔
|
||||
// *Field ✔ ✔
|
||||
// *FieldAddr ✔ ✔
|
||||
// *FreeVar ✔
|
||||
// *Function ✔ ✔ (func)
|
||||
// *Global ✔ ✔ (var)
|
||||
// *Go ✔
|
||||
// *If ✔
|
||||
// *Index ✔ ✔
|
||||
// *IndexAddr ✔ ✔
|
||||
// *Jump ✔
|
||||
// *Lookup ✔ ✔
|
||||
// *MakeChan ✔ ✔
|
||||
// *MakeClosure ✔ ✔
|
||||
// *MakeInterface ✔ ✔
|
||||
// *MakeMap ✔ ✔
|
||||
// *MakeSlice ✔ ✔
|
||||
// *MapUpdate ✔
|
||||
// *NamedConst ✔ (const)
|
||||
// *Next ✔ ✔
|
||||
// *Panic ✔
|
||||
// *Parameter ✔
|
||||
// *Phi ✔ ✔
|
||||
// *Range ✔ ✔
|
||||
// *Return ✔
|
||||
// *RunDefers ✔
|
||||
// *Select ✔ ✔
|
||||
// *Send ✔
|
||||
// *Slice ✔ ✔
|
||||
// *Store ✔
|
||||
// *Type ✔ (type)
|
||||
// *TypeAssert ✔ ✔
|
||||
// *UnOp ✔ ✔
|
||||
//
|
||||
// Other key types in this package include: Program, Package, Function
|
||||
// and BasicBlock.
|
||||
//
|
||||
// The program representation constructed by this package is fully
|
||||
// resolved internally, i.e. it does not rely on the names of Values,
|
||||
// Packages, Functions, Types or BasicBlocks for the correct
|
||||
// interpretation of the program. Only the identities of objects and
|
||||
// the topology of the SSA and type graphs are semantically
|
||||
// significant. (There is one exception: Ids, used to identify field
|
||||
// and method names, contain strings.) Avoidance of name-based
|
||||
// operations simplifies the implementation of subsequent passes and
|
||||
// can make them very efficient. Many objects are nonetheless named
|
||||
// to aid in debugging, but it is not essential that the names be
|
||||
// either accurate or unambiguous. The public API exposes a number of
|
||||
// name-based maps for client convenience.
|
||||
//
|
||||
// The ssa/ssautil package provides various utilities that depend only
|
||||
// on the public API of this package.
|
||||
//
|
||||
// TODO(adonovan): Consider the exceptional control-flow implications
|
||||
// of defer and recover().
|
||||
//
|
||||
// TODO(adonovan): write a how-to document for all the various cases
|
||||
// of trying to determine corresponding elements across the four
|
||||
// domains of source locations, ast.Nodes, types.Objects,
|
||||
// ssa.Values/Instructions.
|
||||
//
|
||||
package ssa // import "honnef.co/go/tools/ssa"
|
343
vendor/honnef.co/go/tools/ssa/dom.go
vendored
Normal file
343
vendor/honnef.co/go/tools/ssa/dom.go
vendored
Normal file
@ -0,0 +1,343 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines algorithms related to dominance.
|
||||
|
||||
// Dominator tree construction ----------------------------------------
|
||||
//
|
||||
// We use the algorithm described in Lengauer & Tarjan. 1979. A fast
|
||||
// algorithm for finding dominators in a flowgraph.
|
||||
// http://doi.acm.org/10.1145/357062.357071
|
||||
//
|
||||
// We also apply the optimizations to SLT described in Georgiadis et
|
||||
// al, Finding Dominators in Practice, JGAA 2006,
|
||||
// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf
|
||||
// to avoid the need for buckets of size > 1.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Idom returns the block that immediately dominates b:
|
||||
// its parent in the dominator tree, if any.
|
||||
// Neither the entry node (b.Index==0) nor recover node
|
||||
// (b==b.Parent().Recover()) have a parent.
|
||||
//
|
||||
func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom }
|
||||
|
||||
// Dominees returns the list of blocks that b immediately dominates:
|
||||
// its children in the dominator tree.
|
||||
//
|
||||
func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children }
|
||||
|
||||
// Dominates reports whether b dominates c.
|
||||
func (b *BasicBlock) Dominates(c *BasicBlock) bool {
|
||||
return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post
|
||||
}
|
||||
|
||||
type byDomPreorder []*BasicBlock
|
||||
|
||||
func (a byDomPreorder) Len() int { return len(a) }
|
||||
func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre }
|
||||
|
||||
// DomPreorder returns a new slice containing the blocks of f in
|
||||
// dominator tree preorder.
|
||||
//
|
||||
func (f *Function) DomPreorder() []*BasicBlock {
|
||||
n := len(f.Blocks)
|
||||
order := make(byDomPreorder, n)
|
||||
copy(order, f.Blocks)
|
||||
sort.Sort(order)
|
||||
return order
|
||||
}
|
||||
|
||||
// domInfo contains a BasicBlock's dominance information.
|
||||
type domInfo struct {
|
||||
idom *BasicBlock // immediate dominator (parent in domtree)
|
||||
children []*BasicBlock // nodes immediately dominated by this one
|
||||
pre, post int32 // pre- and post-order numbering within domtree
|
||||
}
|
||||
|
||||
// ltState holds the working state for Lengauer-Tarjan algorithm
|
||||
// (during which domInfo.pre is repurposed for CFG DFS preorder number).
|
||||
type ltState struct {
|
||||
// Each slice is indexed by b.Index.
|
||||
sdom []*BasicBlock // b's semidominator
|
||||
parent []*BasicBlock // b's parent in DFS traversal of CFG
|
||||
ancestor []*BasicBlock // b's ancestor with least sdom
|
||||
}
|
||||
|
||||
// dfs implements the depth-first search part of the LT algorithm.
|
||||
func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 {
|
||||
preorder[i] = v
|
||||
v.dom.pre = i // For now: DFS preorder of spanning tree of CFG
|
||||
i++
|
||||
lt.sdom[v.Index] = v
|
||||
lt.link(nil, v)
|
||||
for _, w := range v.Succs {
|
||||
if lt.sdom[w.Index] == nil {
|
||||
lt.parent[w.Index] = v
|
||||
i = lt.dfs(w, i, preorder)
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// eval implements the EVAL part of the LT algorithm.
|
||||
func (lt *ltState) eval(v *BasicBlock) *BasicBlock {
|
||||
// TODO(adonovan): opt: do path compression per simple LT.
|
||||
u := v
|
||||
for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] {
|
||||
if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre {
|
||||
u = v
|
||||
}
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
// link implements the LINK part of the LT algorithm.
|
||||
func (lt *ltState) link(v, w *BasicBlock) {
|
||||
lt.ancestor[w.Index] = v
|
||||
}
|
||||
|
||||
// buildDomTree computes the dominator tree of f using the LT algorithm.
|
||||
// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run).
|
||||
//
|
||||
func buildDomTree(f *Function) {
|
||||
// The step numbers refer to the original LT paper; the
|
||||
// reordering is due to Georgiadis.
|
||||
|
||||
// Clear any previous domInfo.
|
||||
for _, b := range f.Blocks {
|
||||
b.dom = domInfo{}
|
||||
}
|
||||
|
||||
n := len(f.Blocks)
|
||||
// Allocate space for 5 contiguous [n]*BasicBlock arrays:
|
||||
// sdom, parent, ancestor, preorder, buckets.
|
||||
space := make([]*BasicBlock, 5*n)
|
||||
lt := ltState{
|
||||
sdom: space[0:n],
|
||||
parent: space[n : 2*n],
|
||||
ancestor: space[2*n : 3*n],
|
||||
}
|
||||
|
||||
// Step 1. Number vertices by depth-first preorder.
|
||||
preorder := space[3*n : 4*n]
|
||||
root := f.Blocks[0]
|
||||
prenum := lt.dfs(root, 0, preorder)
|
||||
recover := f.Recover
|
||||
if recover != nil {
|
||||
lt.dfs(recover, prenum, preorder)
|
||||
}
|
||||
|
||||
buckets := space[4*n : 5*n]
|
||||
copy(buckets, preorder)
|
||||
|
||||
// In reverse preorder...
|
||||
for i := int32(n) - 1; i > 0; i-- {
|
||||
w := preorder[i]
|
||||
|
||||
// Step 3. Implicitly define the immediate dominator of each node.
|
||||
for v := buckets[i]; v != w; v = buckets[v.dom.pre] {
|
||||
u := lt.eval(v)
|
||||
if lt.sdom[u.Index].dom.pre < i {
|
||||
v.dom.idom = u
|
||||
} else {
|
||||
v.dom.idom = w
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2. Compute the semidominators of all nodes.
|
||||
lt.sdom[w.Index] = lt.parent[w.Index]
|
||||
for _, v := range w.Preds {
|
||||
u := lt.eval(v)
|
||||
if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre {
|
||||
lt.sdom[w.Index] = lt.sdom[u.Index]
|
||||
}
|
||||
}
|
||||
|
||||
lt.link(lt.parent[w.Index], w)
|
||||
|
||||
if lt.parent[w.Index] == lt.sdom[w.Index] {
|
||||
w.dom.idom = lt.parent[w.Index]
|
||||
} else {
|
||||
buckets[i] = buckets[lt.sdom[w.Index].dom.pre]
|
||||
buckets[lt.sdom[w.Index].dom.pre] = w
|
||||
}
|
||||
}
|
||||
|
||||
// The final 'Step 3' is now outside the loop.
|
||||
for v := buckets[0]; v != root; v = buckets[v.dom.pre] {
|
||||
v.dom.idom = root
|
||||
}
|
||||
|
||||
// Step 4. Explicitly define the immediate dominator of each
|
||||
// node, in preorder.
|
||||
for _, w := range preorder[1:] {
|
||||
if w == root || w == recover {
|
||||
w.dom.idom = nil
|
||||
} else {
|
||||
if w.dom.idom != lt.sdom[w.Index] {
|
||||
w.dom.idom = w.dom.idom.dom.idom
|
||||
}
|
||||
// Calculate Children relation as inverse of Idom.
|
||||
w.dom.idom.dom.children = append(w.dom.idom.dom.children, w)
|
||||
}
|
||||
}
|
||||
|
||||
pre, post := numberDomTree(root, 0, 0)
|
||||
if recover != nil {
|
||||
numberDomTree(recover, pre, post)
|
||||
}
|
||||
|
||||
// printDomTreeDot(os.Stderr, f) // debugging
|
||||
// printDomTreeText(os.Stderr, root, 0) // debugging
|
||||
|
||||
if f.Prog.mode&SanityCheckFunctions != 0 {
|
||||
sanityCheckDomTree(f)
|
||||
}
|
||||
}
|
||||
|
||||
// numberDomTree sets the pre- and post-order numbers of a depth-first
|
||||
// traversal of the dominator tree rooted at v. These are used to
|
||||
// answer dominance queries in constant time.
|
||||
//
|
||||
func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
|
||||
v.dom.pre = pre
|
||||
pre++
|
||||
for _, child := range v.dom.children {
|
||||
pre, post = numberDomTree(child, pre, post)
|
||||
}
|
||||
v.dom.post = post
|
||||
post++
|
||||
return pre, post
|
||||
}
|
||||
|
||||
// Testing utilities ----------------------------------------
|
||||
|
||||
// sanityCheckDomTree checks the correctness of the dominator tree
|
||||
// computed by the LT algorithm by comparing against the dominance
|
||||
// relation computed by a naive Kildall-style forward dataflow
|
||||
// analysis (Algorithm 10.16 from the "Dragon" book).
|
||||
//
|
||||
func sanityCheckDomTree(f *Function) {
|
||||
n := len(f.Blocks)
|
||||
|
||||
// D[i] is the set of blocks that dominate f.Blocks[i],
|
||||
// represented as a bit-set of block indices.
|
||||
D := make([]big.Int, n)
|
||||
|
||||
one := big.NewInt(1)
|
||||
|
||||
// all is the set of all blocks; constant.
|
||||
var all big.Int
|
||||
all.Set(one).Lsh(&all, uint(n)).Sub(&all, one)
|
||||
|
||||
// Initialization.
|
||||
for i, b := range f.Blocks {
|
||||
if i == 0 || b == f.Recover {
|
||||
// A root is dominated only by itself.
|
||||
D[i].SetBit(&D[0], 0, 1)
|
||||
} else {
|
||||
// All other blocks are (initially) dominated
|
||||
// by every block.
|
||||
D[i].Set(&all)
|
||||
}
|
||||
}
|
||||
|
||||
// Iteration until fixed point.
|
||||
for changed := true; changed; {
|
||||
changed = false
|
||||
for i, b := range f.Blocks {
|
||||
if i == 0 || b == f.Recover {
|
||||
continue
|
||||
}
|
||||
// Compute intersection across predecessors.
|
||||
var x big.Int
|
||||
x.Set(&all)
|
||||
for _, pred := range b.Preds {
|
||||
x.And(&x, &D[pred.Index])
|
||||
}
|
||||
x.SetBit(&x, i, 1) // a block always dominates itself.
|
||||
if D[i].Cmp(&x) != 0 {
|
||||
D[i].Set(&x)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the entire relation. O(n^2).
|
||||
// The Recover block (if any) must be treated specially so we skip it.
|
||||
ok := true
|
||||
for i := 0; i < n; i++ {
|
||||
for j := 0; j < n; j++ {
|
||||
b, c := f.Blocks[i], f.Blocks[j]
|
||||
if c == f.Recover {
|
||||
continue
|
||||
}
|
||||
actual := b.Dominates(c)
|
||||
expected := D[j].Bit(i) == 1
|
||||
if actual != expected {
|
||||
fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
preorder := f.DomPreorder()
|
||||
for _, b := range f.Blocks {
|
||||
if got := preorder[b.dom.pre]; got != b {
|
||||
fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
panic("sanityCheckDomTree failed for " + f.String())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Printing functions ----------------------------------------
|
||||
|
||||
// printDomTree prints the dominator tree as text, using indentation.
|
||||
//lint:ignore U1000 used during debugging
|
||||
func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) {
|
||||
fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
|
||||
for _, child := range v.dom.children {
|
||||
printDomTreeText(buf, child, indent+1)
|
||||
}
|
||||
}
|
||||
|
||||
// printDomTreeDot prints the dominator tree of f in AT&T GraphViz
|
||||
// (.dot) format.
|
||||
//lint:ignore U1000 used during debugging
|
||||
func printDomTreeDot(buf *bytes.Buffer, f *Function) {
|
||||
fmt.Fprintln(buf, "//", f)
|
||||
fmt.Fprintln(buf, "digraph domtree {")
|
||||
for i, b := range f.Blocks {
|
||||
v := b.dom
|
||||
fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post)
|
||||
// TODO(adonovan): improve appearance of edges
|
||||
// belonging to both dominator tree and CFG.
|
||||
|
||||
// Dominator tree edge.
|
||||
if i != 0 {
|
||||
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre)
|
||||
}
|
||||
// CFG edges.
|
||||
for _, pred := range b.Preds {
|
||||
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(buf, "}")
|
||||
}
|
469
vendor/honnef.co/go/tools/ssa/emit.go
vendored
Normal file
469
vendor/honnef.co/go/tools/ssa/emit.go
vendored
Normal file
@ -0,0 +1,469 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// Helpers for emitting SSA instructions.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// emitNew emits to f a new (heap Alloc) instruction allocating an
|
||||
// object of type typ. pos is the optional source location.
|
||||
//
|
||||
func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc {
|
||||
v := &Alloc{Heap: true}
|
||||
v.setType(types.NewPointer(typ))
|
||||
v.setPos(pos)
|
||||
f.emit(v)
|
||||
return v
|
||||
}
|
||||
|
||||
// emitLoad emits to f an instruction to load the address addr into a
|
||||
// new temporary, and returns the value so defined.
|
||||
//
|
||||
func emitLoad(f *Function, addr Value) *UnOp {
|
||||
v := &UnOp{Op: token.MUL, X: addr}
|
||||
v.setType(deref(addr.Type()))
|
||||
f.emit(v)
|
||||
return v
|
||||
}
|
||||
|
||||
// emitDebugRef emits to f a DebugRef pseudo-instruction associating
|
||||
// expression e with value v.
|
||||
//
|
||||
func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) {
|
||||
if !f.debugInfo() {
|
||||
return // debugging not enabled
|
||||
}
|
||||
if v == nil || e == nil {
|
||||
panic("nil")
|
||||
}
|
||||
var obj types.Object
|
||||
e = unparen(e)
|
||||
if id, ok := e.(*ast.Ident); ok {
|
||||
if isBlankIdent(id) {
|
||||
return
|
||||
}
|
||||
obj = f.Pkg.objectOf(id)
|
||||
switch obj.(type) {
|
||||
case *types.Nil, *types.Const, *types.Builtin:
|
||||
return
|
||||
}
|
||||
}
|
||||
f.emit(&DebugRef{
|
||||
X: v,
|
||||
Expr: e,
|
||||
IsAddr: isAddr,
|
||||
object: obj,
|
||||
})
|
||||
}
|
||||
|
||||
// emitArith emits to f code to compute the binary operation op(x, y)
|
||||
// where op is an eager shift, logical or arithmetic operation.
|
||||
// (Use emitCompare() for comparisons and Builder.logicalBinop() for
|
||||
// non-eager operations.)
|
||||
//
|
||||
func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value {
|
||||
switch op {
|
||||
case token.SHL, token.SHR:
|
||||
x = emitConv(f, x, t)
|
||||
// y may be signed or an 'untyped' constant.
|
||||
// TODO(adonovan): whence signed values?
|
||||
if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 {
|
||||
y = emitConv(f, y, types.Typ[types.Uint64])
|
||||
}
|
||||
|
||||
case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
|
||||
x = emitConv(f, x, t)
|
||||
y = emitConv(f, y, t)
|
||||
|
||||
default:
|
||||
panic("illegal op in emitArith: " + op.String())
|
||||
|
||||
}
|
||||
v := &BinOp{
|
||||
Op: op,
|
||||
X: x,
|
||||
Y: y,
|
||||
}
|
||||
v.setPos(pos)
|
||||
v.setType(t)
|
||||
return f.emit(v)
|
||||
}
|
||||
|
||||
// emitCompare emits to f code compute the boolean result of
|
||||
// comparison comparison 'x op y'.
|
||||
//
|
||||
func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
|
||||
xt := x.Type().Underlying()
|
||||
yt := y.Type().Underlying()
|
||||
|
||||
// Special case to optimise a tagless SwitchStmt so that
|
||||
// these are equivalent
|
||||
// switch { case e: ...}
|
||||
// switch true { case e: ... }
|
||||
// if e==true { ... }
|
||||
// even in the case when e's type is an interface.
|
||||
// TODO(adonovan): opt: generalise to x==true, false!=y, etc.
|
||||
if x == vTrue && op == token.EQL {
|
||||
if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 {
|
||||
return y
|
||||
}
|
||||
}
|
||||
|
||||
if types.Identical(xt, yt) {
|
||||
// no conversion necessary
|
||||
} else if _, ok := xt.(*types.Interface); ok {
|
||||
y = emitConv(f, y, x.Type())
|
||||
} else if _, ok := yt.(*types.Interface); ok {
|
||||
x = emitConv(f, x, y.Type())
|
||||
} else if _, ok := x.(*Const); ok {
|
||||
x = emitConv(f, x, y.Type())
|
||||
} else if _, ok := y.(*Const); ok {
|
||||
y = emitConv(f, y, x.Type())
|
||||
//lint:ignore SA9003 no-op
|
||||
} else {
|
||||
// other cases, e.g. channels. No-op.
|
||||
}
|
||||
|
||||
v := &BinOp{
|
||||
Op: op,
|
||||
X: x,
|
||||
Y: y,
|
||||
}
|
||||
v.setPos(pos)
|
||||
v.setType(tBool)
|
||||
return f.emit(v)
|
||||
}
|
||||
|
||||
// isValuePreserving returns true if a conversion from ut_src to
|
||||
// ut_dst is value-preserving, i.e. just a change of type.
|
||||
// Precondition: neither argument is a named type.
|
||||
//
|
||||
func isValuePreserving(ut_src, ut_dst types.Type) bool {
|
||||
// Identical underlying types?
|
||||
if structTypesIdentical(ut_dst, ut_src) {
|
||||
return true
|
||||
}
|
||||
|
||||
switch ut_dst.(type) {
|
||||
case *types.Chan:
|
||||
// Conversion between channel types?
|
||||
_, ok := ut_src.(*types.Chan)
|
||||
return ok
|
||||
|
||||
case *types.Pointer:
|
||||
// Conversion between pointers with identical base types?
|
||||
_, ok := ut_src.(*types.Pointer)
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// emitConv emits to f code to convert Value val to exactly type typ,
|
||||
// and returns the converted value. Implicit conversions are required
|
||||
// by language assignability rules in assignments, parameter passing,
|
||||
// etc. Conversions cannot fail dynamically.
|
||||
//
|
||||
func emitConv(f *Function, val Value, typ types.Type) Value {
|
||||
t_src := val.Type()
|
||||
|
||||
// Identical types? Conversion is a no-op.
|
||||
if types.Identical(t_src, typ) {
|
||||
return val
|
||||
}
|
||||
|
||||
ut_dst := typ.Underlying()
|
||||
ut_src := t_src.Underlying()
|
||||
|
||||
// Just a change of type, but not value or representation?
|
||||
if isValuePreserving(ut_src, ut_dst) {
|
||||
c := &ChangeType{X: val}
|
||||
c.setType(typ)
|
||||
return f.emit(c)
|
||||
}
|
||||
|
||||
// Conversion to, or construction of a value of, an interface type?
|
||||
if _, ok := ut_dst.(*types.Interface); ok {
|
||||
// Assignment from one interface type to another?
|
||||
if _, ok := ut_src.(*types.Interface); ok {
|
||||
c := &ChangeInterface{X: val}
|
||||
c.setType(typ)
|
||||
return f.emit(c)
|
||||
}
|
||||
|
||||
// Untyped nil constant? Return interface-typed nil constant.
|
||||
if ut_src == tUntypedNil {
|
||||
return nilConst(typ)
|
||||
}
|
||||
|
||||
// Convert (non-nil) "untyped" literals to their default type.
|
||||
if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 {
|
||||
val = emitConv(f, val, DefaultType(ut_src))
|
||||
}
|
||||
|
||||
f.Pkg.Prog.needMethodsOf(val.Type())
|
||||
mi := &MakeInterface{X: val}
|
||||
mi.setType(typ)
|
||||
return f.emit(mi)
|
||||
}
|
||||
|
||||
// Conversion of a compile-time constant value?
|
||||
if c, ok := val.(*Const); ok {
|
||||
if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() {
|
||||
// Conversion of a compile-time constant to
|
||||
// another constant type results in a new
|
||||
// constant of the destination type and
|
||||
// (initially) the same abstract value.
|
||||
// We don't truncate the value yet.
|
||||
return NewConst(c.Value, typ)
|
||||
}
|
||||
|
||||
// We're converting from constant to non-constant type,
|
||||
// e.g. string -> []byte/[]rune.
|
||||
}
|
||||
|
||||
// A representation-changing conversion?
|
||||
// At least one of {ut_src,ut_dst} must be *Basic.
|
||||
// (The other may be []byte or []rune.)
|
||||
_, ok1 := ut_src.(*types.Basic)
|
||||
_, ok2 := ut_dst.(*types.Basic)
|
||||
if ok1 || ok2 {
|
||||
c := &Convert{X: val}
|
||||
c.setType(typ)
|
||||
return f.emit(c)
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ))
|
||||
}
|
||||
|
||||
// emitStore emits to f an instruction to store value val at location
|
||||
// addr, applying implicit conversions as required by assignability rules.
|
||||
//
|
||||
func emitStore(f *Function, addr, val Value, pos token.Pos) *Store {
|
||||
s := &Store{
|
||||
Addr: addr,
|
||||
Val: emitConv(f, val, deref(addr.Type())),
|
||||
pos: pos,
|
||||
}
|
||||
f.emit(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// emitJump emits to f a jump to target, and updates the control-flow graph.
|
||||
// Postcondition: f.currentBlock is nil.
|
||||
//
|
||||
func emitJump(f *Function, target *BasicBlock) {
|
||||
b := f.currentBlock
|
||||
b.emit(new(Jump))
|
||||
addEdge(b, target)
|
||||
f.currentBlock = nil
|
||||
}
|
||||
|
||||
// emitIf emits to f a conditional jump to tblock or fblock based on
|
||||
// cond, and updates the control-flow graph.
|
||||
// Postcondition: f.currentBlock is nil.
|
||||
//
|
||||
func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) {
|
||||
b := f.currentBlock
|
||||
b.emit(&If{Cond: cond})
|
||||
addEdge(b, tblock)
|
||||
addEdge(b, fblock)
|
||||
f.currentBlock = nil
|
||||
}
|
||||
|
||||
// emitExtract emits to f an instruction to extract the index'th
|
||||
// component of tuple. It returns the extracted value.
|
||||
//
|
||||
func emitExtract(f *Function, tuple Value, index int) Value {
|
||||
e := &Extract{Tuple: tuple, Index: index}
|
||||
e.setType(tuple.Type().(*types.Tuple).At(index).Type())
|
||||
return f.emit(e)
|
||||
}
|
||||
|
||||
// emitTypeAssert emits to f a type assertion value := x.(t) and
|
||||
// returns the value. x.Type() must be an interface.
|
||||
//
|
||||
func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value {
|
||||
a := &TypeAssert{X: x, AssertedType: t}
|
||||
a.setPos(pos)
|
||||
a.setType(t)
|
||||
return f.emit(a)
|
||||
}
|
||||
|
||||
// emitTypeTest emits to f a type test value,ok := x.(t) and returns
|
||||
// a (value, ok) tuple. x.Type() must be an interface.
|
||||
//
|
||||
func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value {
|
||||
a := &TypeAssert{
|
||||
X: x,
|
||||
AssertedType: t,
|
||||
CommaOk: true,
|
||||
}
|
||||
a.setPos(pos)
|
||||
a.setType(types.NewTuple(
|
||||
newVar("value", t),
|
||||
varOk,
|
||||
))
|
||||
return f.emit(a)
|
||||
}
|
||||
|
||||
// emitTailCall emits to f a function call in tail position. The
|
||||
// caller is responsible for all fields of 'call' except its type.
|
||||
// Intended for wrapper methods.
|
||||
// Precondition: f does/will not use deferred procedure calls.
|
||||
// Postcondition: f.currentBlock is nil.
|
||||
//
|
||||
func emitTailCall(f *Function, call *Call) {
|
||||
tresults := f.Signature.Results()
|
||||
nr := tresults.Len()
|
||||
if nr == 1 {
|
||||
call.typ = tresults.At(0).Type()
|
||||
} else {
|
||||
call.typ = tresults
|
||||
}
|
||||
tuple := f.emit(call)
|
||||
var ret Return
|
||||
switch nr {
|
||||
case 0:
|
||||
// no-op
|
||||
case 1:
|
||||
ret.Results = []Value{tuple}
|
||||
default:
|
||||
for i := 0; i < nr; i++ {
|
||||
v := emitExtract(f, tuple, i)
|
||||
// TODO(adonovan): in principle, this is required:
|
||||
// v = emitConv(f, o.Type, f.Signature.Results[i].Type)
|
||||
// but in practice emitTailCall is only used when
|
||||
// the types exactly match.
|
||||
ret.Results = append(ret.Results, v)
|
||||
}
|
||||
}
|
||||
f.emit(&ret)
|
||||
f.currentBlock = nil
|
||||
}
|
||||
|
||||
// emitImplicitSelections emits to f code to apply the sequence of
|
||||
// implicit field selections specified by indices to base value v, and
|
||||
// returns the selected value.
|
||||
//
|
||||
// If v is the address of a struct, the result will be the address of
|
||||
// a field; if it is the value of a struct, the result will be the
|
||||
// value of a field.
|
||||
//
|
||||
func emitImplicitSelections(f *Function, v Value, indices []int) Value {
|
||||
for _, index := range indices {
|
||||
fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
|
||||
|
||||
if isPointer(v.Type()) {
|
||||
instr := &FieldAddr{
|
||||
X: v,
|
||||
Field: index,
|
||||
}
|
||||
instr.setType(types.NewPointer(fld.Type()))
|
||||
v = f.emit(instr)
|
||||
// Load the field's value iff indirectly embedded.
|
||||
if isPointer(fld.Type()) {
|
||||
v = emitLoad(f, v)
|
||||
}
|
||||
} else {
|
||||
instr := &Field{
|
||||
X: v,
|
||||
Field: index,
|
||||
}
|
||||
instr.setType(fld.Type())
|
||||
v = f.emit(instr)
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// emitFieldSelection emits to f code to select the index'th field of v.
|
||||
//
|
||||
// If wantAddr, the input must be a pointer-to-struct and the result
|
||||
// will be the field's address; otherwise the result will be the
|
||||
// field's value.
|
||||
// Ident id is used for position and debug info.
|
||||
//
|
||||
func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value {
|
||||
fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
|
||||
if isPointer(v.Type()) {
|
||||
instr := &FieldAddr{
|
||||
X: v,
|
||||
Field: index,
|
||||
}
|
||||
instr.setPos(id.Pos())
|
||||
instr.setType(types.NewPointer(fld.Type()))
|
||||
v = f.emit(instr)
|
||||
// Load the field's value iff we don't want its address.
|
||||
if !wantAddr {
|
||||
v = emitLoad(f, v)
|
||||
}
|
||||
} else {
|
||||
instr := &Field{
|
||||
X: v,
|
||||
Field: index,
|
||||
}
|
||||
instr.setPos(id.Pos())
|
||||
instr.setType(fld.Type())
|
||||
v = f.emit(instr)
|
||||
}
|
||||
emitDebugRef(f, id, v, wantAddr)
|
||||
return v
|
||||
}
|
||||
|
||||
// zeroValue emits to f code to produce a zero value of type t,
|
||||
// and returns it.
|
||||
//
|
||||
func zeroValue(f *Function, t types.Type) Value {
|
||||
switch t.Underlying().(type) {
|
||||
case *types.Struct, *types.Array:
|
||||
return emitLoad(f, f.addLocal(t, token.NoPos))
|
||||
default:
|
||||
return zeroConst(t)
|
||||
}
|
||||
}
|
||||
|
||||
// createRecoverBlock emits to f a block of code to return after a
|
||||
// recovered panic, and sets f.Recover to it.
|
||||
//
|
||||
// If f's result parameters are named, the code loads and returns
|
||||
// their current values, otherwise it returns the zero values of their
|
||||
// type.
|
||||
//
|
||||
// Idempotent.
|
||||
//
|
||||
func createRecoverBlock(f *Function) {
|
||||
if f.Recover != nil {
|
||||
return // already created
|
||||
}
|
||||
saved := f.currentBlock
|
||||
|
||||
f.Recover = f.newBasicBlock("recover")
|
||||
f.currentBlock = f.Recover
|
||||
|
||||
var results []Value
|
||||
if f.namedResults != nil {
|
||||
// Reload NRPs to form value tuple.
|
||||
for _, r := range f.namedResults {
|
||||
results = append(results, emitLoad(f, r))
|
||||
}
|
||||
} else {
|
||||
R := f.Signature.Results()
|
||||
for i, n := 0, R.Len(); i < n; i++ {
|
||||
T := R.At(i).Type()
|
||||
|
||||
// Return zero value of each result type.
|
||||
results = append(results, zeroValue(f, T))
|
||||
}
|
||||
}
|
||||
f.emit(&Return{Results: results})
|
||||
|
||||
f.currentBlock = saved
|
||||
}
|
765
vendor/honnef.co/go/tools/ssa/func.go
vendored
Normal file
765
vendor/honnef.co/go/tools/ssa/func.go
vendored
Normal file
@ -0,0 +1,765 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file implements the Function and BasicBlock types.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// addEdge adds a control-flow graph edge from from to to.
|
||||
func addEdge(from, to *BasicBlock) {
|
||||
from.Succs = append(from.Succs, to)
|
||||
to.Preds = append(to.Preds, from)
|
||||
}
|
||||
|
||||
// Parent returns the function that contains block b.
|
||||
func (b *BasicBlock) Parent() *Function { return b.parent }
|
||||
|
||||
// String returns a human-readable label of this block.
|
||||
// It is not guaranteed unique within the function.
|
||||
//
|
||||
func (b *BasicBlock) String() string {
|
||||
return fmt.Sprintf("%d", b.Index)
|
||||
}
|
||||
|
||||
// emit appends an instruction to the current basic block.
|
||||
// If the instruction defines a Value, it is returned.
|
||||
//
|
||||
func (b *BasicBlock) emit(i Instruction) Value {
|
||||
i.setBlock(b)
|
||||
b.Instrs = append(b.Instrs, i)
|
||||
v, _ := i.(Value)
|
||||
return v
|
||||
}
|
||||
|
||||
// predIndex returns the i such that b.Preds[i] == c or panics if
|
||||
// there is none.
|
||||
func (b *BasicBlock) predIndex(c *BasicBlock) int {
|
||||
for i, pred := range b.Preds {
|
||||
if pred == c {
|
||||
return i
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("no edge %s -> %s", c, b))
|
||||
}
|
||||
|
||||
// hasPhi returns true if b.Instrs contains φ-nodes.
|
||||
func (b *BasicBlock) hasPhi() bool {
|
||||
_, ok := b.Instrs[0].(*Phi)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (b *BasicBlock) Phis() []Instruction {
|
||||
return b.phis()
|
||||
}
|
||||
|
||||
// phis returns the prefix of b.Instrs containing all the block's φ-nodes.
|
||||
func (b *BasicBlock) phis() []Instruction {
|
||||
for i, instr := range b.Instrs {
|
||||
if _, ok := instr.(*Phi); !ok {
|
||||
return b.Instrs[:i]
|
||||
}
|
||||
}
|
||||
return nil // unreachable in well-formed blocks
|
||||
}
|
||||
|
||||
// replacePred replaces all occurrences of p in b's predecessor list with q.
|
||||
// Ordinarily there should be at most one.
|
||||
//
|
||||
func (b *BasicBlock) replacePred(p, q *BasicBlock) {
|
||||
for i, pred := range b.Preds {
|
||||
if pred == p {
|
||||
b.Preds[i] = q
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// replaceSucc replaces all occurrences of p in b's successor list with q.
|
||||
// Ordinarily there should be at most one.
|
||||
//
|
||||
func (b *BasicBlock) replaceSucc(p, q *BasicBlock) {
|
||||
for i, succ := range b.Succs {
|
||||
if succ == p {
|
||||
b.Succs[i] = q
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BasicBlock) RemovePred(p *BasicBlock) {
|
||||
b.removePred(p)
|
||||
}
|
||||
|
||||
// removePred removes all occurrences of p in b's
|
||||
// predecessor list and φ-nodes.
|
||||
// Ordinarily there should be at most one.
|
||||
//
|
||||
func (b *BasicBlock) removePred(p *BasicBlock) {
|
||||
phis := b.phis()
|
||||
|
||||
// We must preserve edge order for φ-nodes.
|
||||
j := 0
|
||||
for i, pred := range b.Preds {
|
||||
if pred != p {
|
||||
b.Preds[j] = b.Preds[i]
|
||||
// Strike out φ-edge too.
|
||||
for _, instr := range phis {
|
||||
phi := instr.(*Phi)
|
||||
phi.Edges[j] = phi.Edges[i]
|
||||
}
|
||||
j++
|
||||
}
|
||||
}
|
||||
// Nil out b.Preds[j:] and φ-edges[j:] to aid GC.
|
||||
for i := j; i < len(b.Preds); i++ {
|
||||
b.Preds[i] = nil
|
||||
for _, instr := range phis {
|
||||
instr.(*Phi).Edges[i] = nil
|
||||
}
|
||||
}
|
||||
b.Preds = b.Preds[:j]
|
||||
for _, instr := range phis {
|
||||
phi := instr.(*Phi)
|
||||
phi.Edges = phi.Edges[:j]
|
||||
}
|
||||
}
|
||||
|
||||
// Destinations associated with unlabelled for/switch/select stmts.
|
||||
// We push/pop one of these as we enter/leave each construct and for
|
||||
// each BranchStmt we scan for the innermost target of the right type.
|
||||
//
|
||||
type targets struct {
|
||||
tail *targets // rest of stack
|
||||
_break *BasicBlock
|
||||
_continue *BasicBlock
|
||||
_fallthrough *BasicBlock
|
||||
}
|
||||
|
||||
// Destinations associated with a labelled block.
|
||||
// We populate these as labels are encountered in forward gotos or
|
||||
// labelled statements.
|
||||
//
|
||||
type lblock struct {
|
||||
_goto *BasicBlock
|
||||
_break *BasicBlock
|
||||
_continue *BasicBlock
|
||||
}
|
||||
|
||||
// labelledBlock returns the branch target associated with the
|
||||
// specified label, creating it if needed.
|
||||
//
|
||||
func (f *Function) labelledBlock(label *ast.Ident) *lblock {
|
||||
lb := f.lblocks[label.Obj]
|
||||
if lb == nil {
|
||||
lb = &lblock{_goto: f.newBasicBlock(label.Name)}
|
||||
if f.lblocks == nil {
|
||||
f.lblocks = make(map[*ast.Object]*lblock)
|
||||
}
|
||||
f.lblocks[label.Obj] = lb
|
||||
}
|
||||
return lb
|
||||
}
|
||||
|
||||
// addParam adds a (non-escaping) parameter to f.Params of the
|
||||
// specified name, type and source position.
|
||||
//
|
||||
func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter {
|
||||
v := &Parameter{
|
||||
name: name,
|
||||
typ: typ,
|
||||
pos: pos,
|
||||
parent: f,
|
||||
}
|
||||
f.Params = append(f.Params, v)
|
||||
return v
|
||||
}
|
||||
|
||||
func (f *Function) addParamObj(obj types.Object) *Parameter {
|
||||
name := obj.Name()
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("arg%d", len(f.Params))
|
||||
}
|
||||
param := f.addParam(name, obj.Type(), obj.Pos())
|
||||
param.object = obj
|
||||
return param
|
||||
}
|
||||
|
||||
// addSpilledParam declares a parameter that is pre-spilled to the
|
||||
// stack; the function body will load/store the spilled location.
|
||||
// Subsequent lifting will eliminate spills where possible.
|
||||
//
|
||||
func (f *Function) addSpilledParam(obj types.Object) {
|
||||
param := f.addParamObj(obj)
|
||||
spill := &Alloc{Comment: obj.Name()}
|
||||
spill.setType(types.NewPointer(obj.Type()))
|
||||
spill.setPos(obj.Pos())
|
||||
f.objects[obj] = spill
|
||||
f.Locals = append(f.Locals, spill)
|
||||
f.emit(spill)
|
||||
f.emit(&Store{Addr: spill, Val: param})
|
||||
}
|
||||
|
||||
// startBody initializes the function prior to generating SSA code for its body.
|
||||
// Precondition: f.Type() already set.
|
||||
//
|
||||
func (f *Function) startBody() {
|
||||
f.currentBlock = f.newBasicBlock("entry")
|
||||
f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init
|
||||
}
|
||||
|
||||
// createSyntacticParams populates f.Params and generates code (spills
|
||||
// and named result locals) for all the parameters declared in the
|
||||
// syntax. In addition it populates the f.objects mapping.
|
||||
//
|
||||
// Preconditions:
|
||||
// f.startBody() was called.
|
||||
// Postcondition:
|
||||
// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0)
|
||||
//
|
||||
func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) {
|
||||
// Receiver (at most one inner iteration).
|
||||
if recv != nil {
|
||||
for _, field := range recv.List {
|
||||
for _, n := range field.Names {
|
||||
f.addSpilledParam(f.Pkg.info.Defs[n])
|
||||
}
|
||||
// Anonymous receiver? No need to spill.
|
||||
if field.Names == nil {
|
||||
f.addParamObj(f.Signature.Recv())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parameters.
|
||||
if functype.Params != nil {
|
||||
n := len(f.Params) // 1 if has recv, 0 otherwise
|
||||
for _, field := range functype.Params.List {
|
||||
for _, n := range field.Names {
|
||||
f.addSpilledParam(f.Pkg.info.Defs[n])
|
||||
}
|
||||
// Anonymous parameter? No need to spill.
|
||||
if field.Names == nil {
|
||||
f.addParamObj(f.Signature.Params().At(len(f.Params) - n))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Named results.
|
||||
if functype.Results != nil {
|
||||
for _, field := range functype.Results.List {
|
||||
// Implicit "var" decl of locals for named results.
|
||||
for _, n := range field.Names {
|
||||
f.namedResults = append(f.namedResults, f.addLocalForIdent(n))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// numberRegisters assigns numbers to all SSA registers
|
||||
// (value-defining Instructions) in f, to aid debugging.
|
||||
// (Non-Instruction Values are named at construction.)
|
||||
//
|
||||
func numberRegisters(f *Function) {
|
||||
v := 0
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
switch instr.(type) {
|
||||
case Value:
|
||||
instr.(interface {
|
||||
setNum(int)
|
||||
}).setNum(v)
|
||||
v++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildReferrers populates the def/use information in all non-nil
|
||||
// Value.Referrers slice.
|
||||
// Precondition: all such slices are initially empty.
|
||||
func buildReferrers(f *Function) {
|
||||
var rands []*Value
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
rands = instr.Operands(rands[:0]) // recycle storage
|
||||
for _, rand := range rands {
|
||||
if r := *rand; r != nil {
|
||||
if ref := r.Referrers(); ref != nil {
|
||||
*ref = append(*ref, instr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finishBody() finalizes the function after SSA code generation of its body.
|
||||
func (f *Function) finishBody() {
|
||||
f.objects = nil
|
||||
f.currentBlock = nil
|
||||
f.lblocks = nil
|
||||
|
||||
// Don't pin the AST in memory (except in debug mode).
|
||||
if n := f.syntax; n != nil && !f.debugInfo() {
|
||||
f.syntax = extentNode{n.Pos(), n.End()}
|
||||
}
|
||||
|
||||
// Remove from f.Locals any Allocs that escape to the heap.
|
||||
j := 0
|
||||
for _, l := range f.Locals {
|
||||
if !l.Heap {
|
||||
f.Locals[j] = l
|
||||
j++
|
||||
}
|
||||
}
|
||||
// Nil out f.Locals[j:] to aid GC.
|
||||
for i := j; i < len(f.Locals); i++ {
|
||||
f.Locals[i] = nil
|
||||
}
|
||||
f.Locals = f.Locals[:j]
|
||||
|
||||
// comma-ok receiving from a time.Tick channel will never return
|
||||
// ok == false, so any branching on the value of ok can be
|
||||
// replaced with an unconditional jump. This will primarily match
|
||||
// `for range time.Tick(x)` loops, but it can also match
|
||||
// user-written code.
|
||||
for _, block := range f.Blocks {
|
||||
if len(block.Instrs) < 3 {
|
||||
continue
|
||||
}
|
||||
if len(block.Succs) != 2 {
|
||||
continue
|
||||
}
|
||||
var instrs []*Instruction
|
||||
for i, ins := range block.Instrs {
|
||||
if _, ok := ins.(*DebugRef); ok {
|
||||
continue
|
||||
}
|
||||
instrs = append(instrs, &block.Instrs[i])
|
||||
}
|
||||
|
||||
for i, ins := range instrs {
|
||||
unop, ok := (*ins).(*UnOp)
|
||||
if !ok || unop.Op != token.ARROW {
|
||||
continue
|
||||
}
|
||||
call, ok := unop.X.(*Call)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if call.Common().IsInvoke() {
|
||||
continue
|
||||
}
|
||||
|
||||
// OPT(dh): surely there is a more efficient way of doing
|
||||
// this, than using FullName. We should already have
|
||||
// resolved time.Tick somewhere?
|
||||
v, ok := call.Common().Value.(*Function)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
t, ok := v.Object().(*types.Func)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if t.FullName() != "time.Tick" {
|
||||
continue
|
||||
}
|
||||
ex, ok := (*instrs[i+1]).(*Extract)
|
||||
if !ok || ex.Tuple != unop || ex.Index != 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
ifstmt, ok := (*instrs[i+2]).(*If)
|
||||
if !ok || ifstmt.Cond != ex {
|
||||
continue
|
||||
}
|
||||
|
||||
*instrs[i+2] = NewJump(block)
|
||||
succ := block.Succs[1]
|
||||
block.Succs = block.Succs[0:1]
|
||||
succ.RemovePred(block)
|
||||
}
|
||||
}
|
||||
|
||||
optimizeBlocks(f)
|
||||
|
||||
buildReferrers(f)
|
||||
|
||||
buildDomTree(f)
|
||||
|
||||
if f.Prog.mode&NaiveForm == 0 {
|
||||
// For debugging pre-state of lifting pass:
|
||||
// numberRegisters(f)
|
||||
// f.WriteTo(os.Stderr)
|
||||
lift(f)
|
||||
}
|
||||
|
||||
f.namedResults = nil // (used by lifting)
|
||||
|
||||
numberRegisters(f)
|
||||
|
||||
if f.Prog.mode&PrintFunctions != 0 {
|
||||
printMu.Lock()
|
||||
f.WriteTo(os.Stdout)
|
||||
printMu.Unlock()
|
||||
}
|
||||
|
||||
if f.Prog.mode&SanityCheckFunctions != 0 {
|
||||
mustSanityCheck(f, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Function) RemoveNilBlocks() {
|
||||
f.removeNilBlocks()
|
||||
}
|
||||
|
||||
// removeNilBlocks eliminates nils from f.Blocks and updates each
|
||||
// BasicBlock.Index. Use this after any pass that may delete blocks.
|
||||
//
|
||||
func (f *Function) removeNilBlocks() {
|
||||
j := 0
|
||||
for _, b := range f.Blocks {
|
||||
if b != nil {
|
||||
b.Index = j
|
||||
f.Blocks[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
// Nil out f.Blocks[j:] to aid GC.
|
||||
for i := j; i < len(f.Blocks); i++ {
|
||||
f.Blocks[i] = nil
|
||||
}
|
||||
f.Blocks = f.Blocks[:j]
|
||||
}
|
||||
|
||||
// SetDebugMode sets the debug mode for package pkg. If true, all its
|
||||
// functions will include full debug info. This greatly increases the
|
||||
// size of the instruction stream, and causes Functions to depend upon
|
||||
// the ASTs, potentially keeping them live in memory for longer.
|
||||
//
|
||||
func (pkg *Package) SetDebugMode(debug bool) {
|
||||
// TODO(adonovan): do we want ast.File granularity?
|
||||
pkg.debug = debug
|
||||
}
|
||||
|
||||
// debugInfo reports whether debug info is wanted for this function.
|
||||
func (f *Function) debugInfo() bool {
|
||||
return f.Pkg != nil && f.Pkg.debug
|
||||
}
|
||||
|
||||
// addNamedLocal creates a local variable, adds it to function f and
|
||||
// returns it. Its name and type are taken from obj. Subsequent
|
||||
// calls to f.lookup(obj) will return the same local.
|
||||
//
|
||||
func (f *Function) addNamedLocal(obj types.Object) *Alloc {
|
||||
l := f.addLocal(obj.Type(), obj.Pos())
|
||||
l.Comment = obj.Name()
|
||||
f.objects[obj] = l
|
||||
return l
|
||||
}
|
||||
|
||||
func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc {
|
||||
return f.addNamedLocal(f.Pkg.info.Defs[id])
|
||||
}
|
||||
|
||||
// addLocal creates an anonymous local variable of type typ, adds it
|
||||
// to function f and returns it. pos is the optional source location.
|
||||
//
|
||||
func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc {
|
||||
v := &Alloc{}
|
||||
v.setType(types.NewPointer(typ))
|
||||
v.setPos(pos)
|
||||
f.Locals = append(f.Locals, v)
|
||||
f.emit(v)
|
||||
return v
|
||||
}
|
||||
|
||||
// lookup returns the address of the named variable identified by obj
|
||||
// that is local to function f or one of its enclosing functions.
|
||||
// If escaping, the reference comes from a potentially escaping pointer
|
||||
// expression and the referent must be heap-allocated.
|
||||
//
|
||||
func (f *Function) lookup(obj types.Object, escaping bool) Value {
|
||||
if v, ok := f.objects[obj]; ok {
|
||||
if alloc, ok := v.(*Alloc); ok && escaping {
|
||||
alloc.Heap = true
|
||||
}
|
||||
return v // function-local var (address)
|
||||
}
|
||||
|
||||
// Definition must be in an enclosing function;
|
||||
// plumb it through intervening closures.
|
||||
if f.parent == nil {
|
||||
panic("no ssa.Value for " + obj.String())
|
||||
}
|
||||
outer := f.parent.lookup(obj, true) // escaping
|
||||
v := &FreeVar{
|
||||
name: obj.Name(),
|
||||
typ: outer.Type(),
|
||||
pos: outer.Pos(),
|
||||
outer: outer,
|
||||
parent: f,
|
||||
}
|
||||
f.objects[obj] = v
|
||||
f.FreeVars = append(f.FreeVars, v)
|
||||
return v
|
||||
}
|
||||
|
||||
// emit emits the specified instruction to function f.
|
||||
func (f *Function) emit(instr Instruction) Value {
|
||||
return f.currentBlock.emit(instr)
|
||||
}
|
||||
|
||||
// RelString returns the full name of this function, qualified by
|
||||
// package name, receiver type, etc.
|
||||
//
|
||||
// The specific formatting rules are not guaranteed and may change.
|
||||
//
|
||||
// Examples:
|
||||
// "math.IsNaN" // a package-level function
|
||||
// "(*bytes.Buffer).Bytes" // a declared method or a wrapper
|
||||
// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0)
|
||||
// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure)
|
||||
// "main.main$1" // an anonymous function in main
|
||||
// "main.init#1" // a declared init function
|
||||
// "main.init" // the synthesized package initializer
|
||||
//
|
||||
// When these functions are referred to from within the same package
|
||||
// (i.e. from == f.Pkg.Object), they are rendered without the package path.
|
||||
// For example: "IsNaN", "(*Buffer).Bytes", etc.
|
||||
//
|
||||
// All non-synthetic functions have distinct package-qualified names.
|
||||
// (But two methods may have the same name "(T).f" if one is a synthetic
|
||||
// wrapper promoting a non-exported method "f" from another package; in
|
||||
// that case, the strings are equal but the identifiers "f" are distinct.)
|
||||
//
|
||||
func (f *Function) RelString(from *types.Package) string {
|
||||
// Anonymous?
|
||||
if f.parent != nil {
|
||||
// An anonymous function's Name() looks like "parentName$1",
|
||||
// but its String() should include the type/package/etc.
|
||||
parent := f.parent.RelString(from)
|
||||
for i, anon := range f.parent.AnonFuncs {
|
||||
if anon == f {
|
||||
return fmt.Sprintf("%s$%d", parent, 1+i)
|
||||
}
|
||||
}
|
||||
|
||||
return f.name // should never happen
|
||||
}
|
||||
|
||||
// Method (declared or wrapper)?
|
||||
if recv := f.Signature.Recv(); recv != nil {
|
||||
return f.relMethod(from, recv.Type())
|
||||
}
|
||||
|
||||
// Thunk?
|
||||
if f.method != nil {
|
||||
return f.relMethod(from, f.method.Recv())
|
||||
}
|
||||
|
||||
// Bound?
|
||||
if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") {
|
||||
return f.relMethod(from, f.FreeVars[0].Type())
|
||||
}
|
||||
|
||||
// Package-level function?
|
||||
// Prefix with package name for cross-package references only.
|
||||
if p := f.pkg(); p != nil && p != from {
|
||||
return fmt.Sprintf("%s.%s", p.Path(), f.name)
|
||||
}
|
||||
|
||||
// Unknown.
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *Function) relMethod(from *types.Package, recv types.Type) string {
|
||||
return fmt.Sprintf("(%s).%s", relType(recv, from), f.name)
|
||||
}
|
||||
|
||||
// writeSignature writes to buf the signature sig in declaration syntax.
|
||||
func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) {
|
||||
buf.WriteString("func ")
|
||||
if recv := sig.Recv(); recv != nil {
|
||||
buf.WriteString("(")
|
||||
if n := params[0].Name(); n != "" {
|
||||
buf.WriteString(n)
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
types.WriteType(buf, params[0].Type(), types.RelativeTo(from))
|
||||
buf.WriteString(") ")
|
||||
}
|
||||
buf.WriteString(name)
|
||||
types.WriteSignature(buf, sig, types.RelativeTo(from))
|
||||
}
|
||||
|
||||
func (f *Function) pkg() *types.Package {
|
||||
if f.Pkg != nil {
|
||||
return f.Pkg.Pkg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer
|
||||
|
||||
func (f *Function) WriteTo(w io.Writer) (int64, error) {
|
||||
var buf bytes.Buffer
|
||||
WriteFunction(&buf, f)
|
||||
n, err := w.Write(buf.Bytes())
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// WriteFunction writes to buf a human-readable "disassembly" of f.
|
||||
func WriteFunction(buf *bytes.Buffer, f *Function) {
|
||||
fmt.Fprintf(buf, "# Name: %s\n", f.String())
|
||||
if f.Pkg != nil {
|
||||
fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path())
|
||||
}
|
||||
if syn := f.Synthetic; syn != "" {
|
||||
fmt.Fprintln(buf, "# Synthetic:", syn)
|
||||
}
|
||||
if pos := f.Pos(); pos.IsValid() {
|
||||
fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos))
|
||||
}
|
||||
|
||||
if f.parent != nil {
|
||||
fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name())
|
||||
}
|
||||
|
||||
if f.Recover != nil {
|
||||
fmt.Fprintf(buf, "# Recover: %s\n", f.Recover)
|
||||
}
|
||||
|
||||
from := f.pkg()
|
||||
|
||||
if f.FreeVars != nil {
|
||||
buf.WriteString("# Free variables:\n")
|
||||
for i, fv := range f.FreeVars {
|
||||
fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from))
|
||||
}
|
||||
}
|
||||
|
||||
if len(f.Locals) > 0 {
|
||||
buf.WriteString("# Locals:\n")
|
||||
for i, l := range f.Locals {
|
||||
fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from))
|
||||
}
|
||||
}
|
||||
writeSignature(buf, from, f.Name(), f.Signature, f.Params)
|
||||
buf.WriteString(":\n")
|
||||
|
||||
if f.Blocks == nil {
|
||||
buf.WriteString("\t(external)\n")
|
||||
}
|
||||
|
||||
// NB. column calculations are confused by non-ASCII
|
||||
// characters and assume 8-space tabs.
|
||||
const punchcard = 80 // for old time's sake.
|
||||
const tabwidth = 8
|
||||
for _, b := range f.Blocks {
|
||||
if b == nil {
|
||||
// Corrupt CFG.
|
||||
fmt.Fprintf(buf, ".nil:\n")
|
||||
continue
|
||||
}
|
||||
n, _ := fmt.Fprintf(buf, "%d:", b.Index)
|
||||
bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs))
|
||||
fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg)
|
||||
|
||||
if false { // CFG debugging
|
||||
fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs)
|
||||
}
|
||||
for _, instr := range b.Instrs {
|
||||
buf.WriteString("\t")
|
||||
switch v := instr.(type) {
|
||||
case Value:
|
||||
l := punchcard - tabwidth
|
||||
// Left-align the instruction.
|
||||
if name := v.Name(); name != "" {
|
||||
n, _ := fmt.Fprintf(buf, "%s = ", name)
|
||||
l -= n
|
||||
}
|
||||
n, _ := buf.WriteString(instr.String())
|
||||
l -= n
|
||||
// Right-align the type if there's space.
|
||||
if t := v.Type(); t != nil {
|
||||
buf.WriteByte(' ')
|
||||
ts := relType(t, from)
|
||||
l -= len(ts) + len(" ") // (spaces before and after type)
|
||||
if l > 0 {
|
||||
fmt.Fprintf(buf, "%*s", l, "")
|
||||
}
|
||||
buf.WriteString(ts)
|
||||
}
|
||||
case nil:
|
||||
// Be robust against bad transforms.
|
||||
buf.WriteString("<deleted>")
|
||||
default:
|
||||
buf.WriteString(instr.String())
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
|
||||
// newBasicBlock adds to f a new basic block and returns it. It does
|
||||
// not automatically become the current block for subsequent calls to emit.
|
||||
// comment is an optional string for more readable debugging output.
|
||||
//
|
||||
func (f *Function) newBasicBlock(comment string) *BasicBlock {
|
||||
b := &BasicBlock{
|
||||
Index: len(f.Blocks),
|
||||
Comment: comment,
|
||||
parent: f,
|
||||
}
|
||||
b.Succs = b.succs2[:0]
|
||||
f.Blocks = append(f.Blocks, b)
|
||||
return b
|
||||
}
|
||||
|
||||
// NewFunction returns a new synthetic Function instance belonging to
|
||||
// prog, with its name and signature fields set as specified.
|
||||
//
|
||||
// The caller is responsible for initializing the remaining fields of
|
||||
// the function object, e.g. Pkg, Params, Blocks.
|
||||
//
|
||||
// It is practically impossible for clients to construct well-formed
|
||||
// SSA functions/packages/programs directly, so we assume this is the
|
||||
// job of the Builder alone. NewFunction exists to provide clients a
|
||||
// little flexibility. For example, analysis tools may wish to
|
||||
// construct fake Functions for the root of the callgraph, a fake
|
||||
// "reflect" package, etc.
|
||||
//
|
||||
// TODO(adonovan): think harder about the API here.
|
||||
//
|
||||
func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function {
|
||||
return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance}
|
||||
}
|
||||
|
||||
type extentNode [2]token.Pos
|
||||
|
||||
func (n extentNode) Pos() token.Pos { return n[0] }
|
||||
func (n extentNode) End() token.Pos { return n[1] }
|
||||
|
||||
// Syntax returns an ast.Node whose Pos/End methods provide the
|
||||
// lexical extent of the function if it was defined by Go source code
|
||||
// (f.Synthetic==""), or nil otherwise.
|
||||
//
|
||||
// If f was built with debug information (see Package.SetDebugRef),
|
||||
// the result is the *ast.FuncDecl or *ast.FuncLit that declared the
|
||||
// function. Otherwise, it is an opaque Node providing only position
|
||||
// information; this avoids pinning the AST in memory.
|
||||
//
|
||||
func (f *Function) Syntax() ast.Node { return f.syntax }
|
7
vendor/honnef.co/go/tools/ssa/identical.go
vendored
Normal file
7
vendor/honnef.co/go/tools/ssa/identical.go
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build go1.8
|
||||
|
||||
package ssa
|
||||
|
||||
import "go/types"
|
||||
|
||||
var structTypesIdentical = types.IdenticalIgnoreTags
|
7
vendor/honnef.co/go/tools/ssa/identical_17.go
vendored
Normal file
7
vendor/honnef.co/go/tools/ssa/identical_17.go
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build !go1.8
|
||||
|
||||
package ssa
|
||||
|
||||
import "go/types"
|
||||
|
||||
var structTypesIdentical = types.Identical
|
657
vendor/honnef.co/go/tools/ssa/lift.go
vendored
Normal file
657
vendor/honnef.co/go/tools/ssa/lift.go
vendored
Normal file
@ -0,0 +1,657 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines the lifting pass which tries to "lift" Alloc
|
||||
// cells (new/local variables) into SSA registers, replacing loads
|
||||
// with the dominating stored value, eliminating loads and stores, and
|
||||
// inserting φ-nodes as needed.
|
||||
|
||||
// Cited papers and resources:
|
||||
//
|
||||
// Ron Cytron et al. 1991. Efficiently computing SSA form...
|
||||
// http://doi.acm.org/10.1145/115372.115320
|
||||
//
|
||||
// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm.
|
||||
// Software Practice and Experience 2001, 4:1-10.
|
||||
// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
|
||||
//
|
||||
// Daniel Berlin, llvmdev mailing list, 2012.
|
||||
// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html
|
||||
// (Be sure to expand the whole thread.)
|
||||
|
||||
// TODO(adonovan): opt: there are many optimizations worth evaluating, and
|
||||
// the conventional wisdom for SSA construction is that a simple
|
||||
// algorithm well engineered often beats those of better asymptotic
|
||||
// complexity on all but the most egregious inputs.
|
||||
//
|
||||
// Danny Berlin suggests that the Cooper et al. algorithm for
|
||||
// computing the dominance frontier is superior to Cytron et al.
|
||||
// Furthermore he recommends that rather than computing the DF for the
|
||||
// whole function then renaming all alloc cells, it may be cheaper to
|
||||
// compute the DF for each alloc cell separately and throw it away.
|
||||
//
|
||||
// Consider exploiting liveness information to avoid creating dead
|
||||
// φ-nodes which we then immediately remove.
|
||||
//
|
||||
// Also see many other "TODO: opt" suggestions in the code.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"math/big"
|
||||
"os"
|
||||
)
|
||||
|
||||
// If true, show diagnostic information at each step of lifting.
|
||||
// Very verbose.
|
||||
const debugLifting = false
|
||||
|
||||
// domFrontier maps each block to the set of blocks in its dominance
|
||||
// frontier. The outer slice is conceptually a map keyed by
|
||||
// Block.Index. The inner slice is conceptually a set, possibly
|
||||
// containing duplicates.
|
||||
//
|
||||
// TODO(adonovan): opt: measure impact of dups; consider a packed bit
|
||||
// representation, e.g. big.Int, and bitwise parallel operations for
|
||||
// the union step in the Children loop.
|
||||
//
|
||||
// domFrontier's methods mutate the slice's elements but not its
|
||||
// length, so their receivers needn't be pointers.
|
||||
//
|
||||
type domFrontier [][]*BasicBlock
|
||||
|
||||
func (df domFrontier) add(u, v *BasicBlock) {
|
||||
p := &df[u.Index]
|
||||
*p = append(*p, v)
|
||||
}
|
||||
|
||||
// build builds the dominance frontier df for the dominator (sub)tree
|
||||
// rooted at u, using the Cytron et al. algorithm.
|
||||
//
|
||||
// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA
|
||||
// by pruning the entire IDF computation, rather than merely pruning
|
||||
// the DF -> IDF step.
|
||||
func (df domFrontier) build(u *BasicBlock) {
|
||||
// Encounter each node u in postorder of dom tree.
|
||||
for _, child := range u.dom.children {
|
||||
df.build(child)
|
||||
}
|
||||
for _, vb := range u.Succs {
|
||||
if v := vb.dom; v.idom != u {
|
||||
df.add(u, vb)
|
||||
}
|
||||
}
|
||||
for _, w := range u.dom.children {
|
||||
for _, vb := range df[w.Index] {
|
||||
// TODO(adonovan): opt: use word-parallel bitwise union.
|
||||
if v := vb.dom; v.idom != u {
|
||||
df.add(u, vb)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func buildDomFrontier(fn *Function) domFrontier {
|
||||
df := make(domFrontier, len(fn.Blocks))
|
||||
df.build(fn.Blocks[0])
|
||||
if fn.Recover != nil {
|
||||
df.build(fn.Recover)
|
||||
}
|
||||
return df
|
||||
}
|
||||
|
||||
func removeInstr(refs []Instruction, instr Instruction) []Instruction {
|
||||
i := 0
|
||||
for _, ref := range refs {
|
||||
if ref == instr {
|
||||
continue
|
||||
}
|
||||
refs[i] = ref
|
||||
i++
|
||||
}
|
||||
for j := i; j != len(refs); j++ {
|
||||
refs[j] = nil // aid GC
|
||||
}
|
||||
return refs[:i]
|
||||
}
|
||||
|
||||
// lift replaces local and new Allocs accessed only with
|
||||
// load/store by SSA registers, inserting φ-nodes where necessary.
|
||||
// The result is a program in classical pruned SSA form.
|
||||
//
|
||||
// Preconditions:
|
||||
// - fn has no dead blocks (blockopt has run).
|
||||
// - Def/use info (Operands and Referrers) is up-to-date.
|
||||
// - The dominator tree is up-to-date.
|
||||
//
|
||||
func lift(fn *Function) {
|
||||
// TODO(adonovan): opt: lots of little optimizations may be
|
||||
// worthwhile here, especially if they cause us to avoid
|
||||
// buildDomFrontier. For example:
|
||||
//
|
||||
// - Alloc never loaded? Eliminate.
|
||||
// - Alloc never stored? Replace all loads with a zero constant.
|
||||
// - Alloc stored once? Replace loads with dominating store;
|
||||
// don't forget that an Alloc is itself an effective store
|
||||
// of zero.
|
||||
// - Alloc used only within a single block?
|
||||
// Use degenerate algorithm avoiding φ-nodes.
|
||||
// - Consider synergy with scalar replacement of aggregates (SRA).
|
||||
// e.g. *(&x.f) where x is an Alloc.
|
||||
// Perhaps we'd get better results if we generated this as x.f
|
||||
// i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)).
|
||||
// Unclear.
|
||||
//
|
||||
// But we will start with the simplest correct code.
|
||||
df := buildDomFrontier(fn)
|
||||
|
||||
if debugLifting {
|
||||
title := false
|
||||
for i, blocks := range df {
|
||||
if blocks != nil {
|
||||
if !title {
|
||||
fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn)
|
||||
title = true
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newPhis := make(newPhiMap)
|
||||
|
||||
// During this pass we will replace some BasicBlock.Instrs
|
||||
// (allocs, loads and stores) with nil, keeping a count in
|
||||
// BasicBlock.gaps. At the end we will reset Instrs to the
|
||||
// concatenation of all non-dead newPhis and non-nil Instrs
|
||||
// for the block, reusing the original array if space permits.
|
||||
|
||||
// While we're here, we also eliminate 'rundefers'
|
||||
// instructions in functions that contain no 'defer'
|
||||
// instructions.
|
||||
usesDefer := false
|
||||
|
||||
// A counter used to generate ~unique ids for Phi nodes, as an
|
||||
// aid to debugging. We use large numbers to make them highly
|
||||
// visible. All nodes are renumbered later.
|
||||
fresh := 1000
|
||||
|
||||
// Determine which allocs we can lift and number them densely.
|
||||
// The renaming phase uses this numbering for compact maps.
|
||||
numAllocs := 0
|
||||
for _, b := range fn.Blocks {
|
||||
b.gaps = 0
|
||||
b.rundefers = 0
|
||||
for _, instr := range b.Instrs {
|
||||
switch instr := instr.(type) {
|
||||
case *Alloc:
|
||||
index := -1
|
||||
if liftAlloc(df, instr, newPhis, &fresh) {
|
||||
index = numAllocs
|
||||
numAllocs++
|
||||
}
|
||||
instr.index = index
|
||||
case *Defer:
|
||||
usesDefer = true
|
||||
case *RunDefers:
|
||||
b.rundefers++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// renaming maps an alloc (keyed by index) to its replacement
|
||||
// value. Initially the renaming contains nil, signifying the
|
||||
// zero constant of the appropriate type; we construct the
|
||||
// Const lazily at most once on each path through the domtree.
|
||||
// TODO(adonovan): opt: cache per-function not per subtree.
|
||||
renaming := make([]Value, numAllocs)
|
||||
|
||||
// Renaming.
|
||||
rename(fn.Blocks[0], renaming, newPhis)
|
||||
|
||||
// Eliminate dead φ-nodes.
|
||||
removeDeadPhis(fn.Blocks, newPhis)
|
||||
|
||||
// Prepend remaining live φ-nodes to each block.
|
||||
for _, b := range fn.Blocks {
|
||||
nps := newPhis[b]
|
||||
j := len(nps)
|
||||
|
||||
rundefersToKill := b.rundefers
|
||||
if usesDefer {
|
||||
rundefersToKill = 0
|
||||
}
|
||||
|
||||
if j+b.gaps+rundefersToKill == 0 {
|
||||
continue // fast path: no new phis or gaps
|
||||
}
|
||||
|
||||
// Compact nps + non-nil Instrs into a new slice.
|
||||
// TODO(adonovan): opt: compact in situ (rightwards)
|
||||
// if Instrs has sufficient space or slack.
|
||||
dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill)
|
||||
for i, np := range nps {
|
||||
dst[i] = np.phi
|
||||
}
|
||||
for _, instr := range b.Instrs {
|
||||
if instr == nil {
|
||||
continue
|
||||
}
|
||||
if !usesDefer {
|
||||
if _, ok := instr.(*RunDefers); ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
dst[j] = instr
|
||||
j++
|
||||
}
|
||||
b.Instrs = dst
|
||||
}
|
||||
|
||||
// Remove any fn.Locals that were lifted.
|
||||
j := 0
|
||||
for _, l := range fn.Locals {
|
||||
if l.index < 0 {
|
||||
fn.Locals[j] = l
|
||||
j++
|
||||
}
|
||||
}
|
||||
// Nil out fn.Locals[j:] to aid GC.
|
||||
for i := j; i < len(fn.Locals); i++ {
|
||||
fn.Locals[i] = nil
|
||||
}
|
||||
fn.Locals = fn.Locals[:j]
|
||||
}
|
||||
|
||||
// removeDeadPhis removes φ-nodes not transitively needed by a
|
||||
// non-Phi, non-DebugRef instruction.
|
||||
func removeDeadPhis(blocks []*BasicBlock, newPhis newPhiMap) {
|
||||
// First pass: find the set of "live" φ-nodes: those reachable
|
||||
// from some non-Phi instruction.
|
||||
//
|
||||
// We compute reachability in reverse, starting from each φ,
|
||||
// rather than forwards, starting from each live non-Phi
|
||||
// instruction, because this way visits much less of the
|
||||
// Value graph.
|
||||
livePhis := make(map[*Phi]bool)
|
||||
for _, npList := range newPhis {
|
||||
for _, np := range npList {
|
||||
phi := np.phi
|
||||
if !livePhis[phi] && phiHasDirectReferrer(phi) {
|
||||
markLivePhi(livePhis, phi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Existing φ-nodes due to && and || operators
|
||||
// are all considered live (see Go issue 19622).
|
||||
for _, b := range blocks {
|
||||
for _, phi := range b.phis() {
|
||||
markLivePhi(livePhis, phi.(*Phi))
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: eliminate unused phis from newPhis.
|
||||
for block, npList := range newPhis {
|
||||
j := 0
|
||||
for _, np := range npList {
|
||||
if livePhis[np.phi] {
|
||||
npList[j] = np
|
||||
j++
|
||||
} else {
|
||||
// discard it, first removing it from referrers
|
||||
for _, val := range np.phi.Edges {
|
||||
if refs := val.Referrers(); refs != nil {
|
||||
*refs = removeInstr(*refs, np.phi)
|
||||
}
|
||||
}
|
||||
np.phi.block = nil
|
||||
}
|
||||
}
|
||||
newPhis[block] = npList[:j]
|
||||
}
|
||||
}
|
||||
|
||||
// markLivePhi marks phi, and all φ-nodes transitively reachable via
|
||||
// its Operands, live.
|
||||
func markLivePhi(livePhis map[*Phi]bool, phi *Phi) {
|
||||
livePhis[phi] = true
|
||||
for _, rand := range phi.Operands(nil) {
|
||||
if q, ok := (*rand).(*Phi); ok {
|
||||
if !livePhis[q] {
|
||||
markLivePhi(livePhis, q)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// phiHasDirectReferrer reports whether phi is directly referred to by
|
||||
// a non-Phi instruction. Such instructions are the
|
||||
// roots of the liveness traversal.
|
||||
func phiHasDirectReferrer(phi *Phi) bool {
|
||||
for _, instr := range *phi.Referrers() {
|
||||
if _, ok := instr.(*Phi); !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type BlockSet struct{ big.Int } // (inherit methods from Int)
|
||||
|
||||
// add adds b to the set and returns true if the set changed.
|
||||
func (s *BlockSet) Add(b *BasicBlock) bool {
|
||||
i := b.Index
|
||||
if s.Bit(i) != 0 {
|
||||
return false
|
||||
}
|
||||
s.SetBit(&s.Int, i, 1)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *BlockSet) Has(b *BasicBlock) bool {
|
||||
return s.Bit(b.Index) == 1
|
||||
}
|
||||
|
||||
// take removes an arbitrary element from a set s and
|
||||
// returns its index, or returns -1 if empty.
|
||||
func (s *BlockSet) Take() int {
|
||||
l := s.BitLen()
|
||||
for i := 0; i < l; i++ {
|
||||
if s.Bit(i) == 1 {
|
||||
s.SetBit(&s.Int, i, 0)
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// newPhi is a pair of a newly introduced φ-node and the lifted Alloc
|
||||
// it replaces.
|
||||
type newPhi struct {
|
||||
phi *Phi
|
||||
alloc *Alloc
|
||||
}
|
||||
|
||||
// newPhiMap records for each basic block, the set of newPhis that
|
||||
// must be prepended to the block.
|
||||
type newPhiMap map[*BasicBlock][]newPhi
|
||||
|
||||
// liftAlloc determines whether alloc can be lifted into registers,
|
||||
// and if so, it populates newPhis with all the φ-nodes it may require
|
||||
// and returns true.
|
||||
//
|
||||
// fresh is a source of fresh ids for phi nodes.
|
||||
//
|
||||
func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool {
|
||||
// Don't lift aggregates into registers, because we don't have
|
||||
// a way to express their zero-constants.
|
||||
switch deref(alloc.Type()).Underlying().(type) {
|
||||
case *types.Array, *types.Struct:
|
||||
return false
|
||||
}
|
||||
|
||||
// Don't lift named return values in functions that defer
|
||||
// calls that may recover from panic.
|
||||
if fn := alloc.Parent(); fn.Recover != nil {
|
||||
for _, nr := range fn.namedResults {
|
||||
if nr == alloc {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute defblocks, the set of blocks containing a
|
||||
// definition of the alloc cell.
|
||||
var defblocks BlockSet
|
||||
for _, instr := range *alloc.Referrers() {
|
||||
// Bail out if we discover the alloc is not liftable;
|
||||
// the only operations permitted to use the alloc are
|
||||
// loads/stores into the cell, and DebugRef.
|
||||
switch instr := instr.(type) {
|
||||
case *Store:
|
||||
if instr.Val == alloc {
|
||||
return false // address used as value
|
||||
}
|
||||
if instr.Addr != alloc {
|
||||
panic("Alloc.Referrers is inconsistent")
|
||||
}
|
||||
defblocks.Add(instr.Block())
|
||||
case *UnOp:
|
||||
if instr.Op != token.MUL {
|
||||
return false // not a load
|
||||
}
|
||||
if instr.X != alloc {
|
||||
panic("Alloc.Referrers is inconsistent")
|
||||
}
|
||||
case *DebugRef:
|
||||
// ok
|
||||
default:
|
||||
return false // some other instruction
|
||||
}
|
||||
}
|
||||
// The Alloc itself counts as a (zero) definition of the cell.
|
||||
defblocks.Add(alloc.Block())
|
||||
|
||||
if debugLifting {
|
||||
fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name())
|
||||
}
|
||||
|
||||
fn := alloc.Parent()
|
||||
|
||||
// Φ-insertion.
|
||||
//
|
||||
// What follows is the body of the main loop of the insert-φ
|
||||
// function described by Cytron et al, but instead of using
|
||||
// counter tricks, we just reset the 'hasAlready' and 'work'
|
||||
// sets each iteration. These are bitmaps so it's pretty cheap.
|
||||
//
|
||||
// TODO(adonovan): opt: recycle slice storage for W,
|
||||
// hasAlready, defBlocks across liftAlloc calls.
|
||||
var hasAlready BlockSet
|
||||
|
||||
// Initialize W and work to defblocks.
|
||||
var work BlockSet = defblocks // blocks seen
|
||||
var W BlockSet // blocks to do
|
||||
W.Set(&defblocks.Int)
|
||||
|
||||
// Traverse iterated dominance frontier, inserting φ-nodes.
|
||||
for i := W.Take(); i != -1; i = W.Take() {
|
||||
u := fn.Blocks[i]
|
||||
for _, v := range df[u.Index] {
|
||||
if hasAlready.Add(v) {
|
||||
// Create φ-node.
|
||||
// It will be prepended to v.Instrs later, if needed.
|
||||
phi := &Phi{
|
||||
Edges: make([]Value, len(v.Preds)),
|
||||
Comment: alloc.Comment,
|
||||
}
|
||||
// This is merely a debugging aid:
|
||||
phi.setNum(*fresh)
|
||||
*fresh++
|
||||
|
||||
phi.pos = alloc.Pos()
|
||||
phi.setType(deref(alloc.Type()))
|
||||
phi.block = v
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v)
|
||||
}
|
||||
newPhis[v] = append(newPhis[v], newPhi{phi, alloc})
|
||||
|
||||
if work.Add(v) {
|
||||
W.Add(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// replaceAll replaces all intraprocedural uses of x with y,
|
||||
// updating x.Referrers and y.Referrers.
|
||||
// Precondition: x.Referrers() != nil, i.e. x must be local to some function.
|
||||
//
|
||||
func replaceAll(x, y Value) {
|
||||
var rands []*Value
|
||||
pxrefs := x.Referrers()
|
||||
pyrefs := y.Referrers()
|
||||
for _, instr := range *pxrefs {
|
||||
rands = instr.Operands(rands[:0]) // recycle storage
|
||||
for _, rand := range rands {
|
||||
if *rand != nil {
|
||||
if *rand == x {
|
||||
*rand = y
|
||||
}
|
||||
}
|
||||
}
|
||||
if pyrefs != nil {
|
||||
*pyrefs = append(*pyrefs, instr) // dups ok
|
||||
}
|
||||
}
|
||||
*pxrefs = nil // x is now unreferenced
|
||||
}
|
||||
|
||||
// renamed returns the value to which alloc is being renamed,
|
||||
// constructing it lazily if it's the implicit zero initialization.
|
||||
//
|
||||
func renamed(renaming []Value, alloc *Alloc) Value {
|
||||
v := renaming[alloc.index]
|
||||
if v == nil {
|
||||
v = zeroConst(deref(alloc.Type()))
|
||||
renaming[alloc.index] = v
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// rename implements the (Cytron et al) SSA renaming algorithm, a
|
||||
// preorder traversal of the dominator tree replacing all loads of
|
||||
// Alloc cells with the value stored to that cell by the dominating
|
||||
// store instruction. For lifting, we need only consider loads,
|
||||
// stores and φ-nodes.
|
||||
//
|
||||
// renaming is a map from *Alloc (keyed by index number) to its
|
||||
// dominating stored value; newPhis[x] is the set of new φ-nodes to be
|
||||
// prepended to block x.
|
||||
//
|
||||
func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) {
|
||||
// Each φ-node becomes the new name for its associated Alloc.
|
||||
for _, np := range newPhis[u] {
|
||||
phi := np.phi
|
||||
alloc := np.alloc
|
||||
renaming[alloc.index] = phi
|
||||
}
|
||||
|
||||
// Rename loads and stores of allocs.
|
||||
for i, instr := range u.Instrs {
|
||||
switch instr := instr.(type) {
|
||||
case *Alloc:
|
||||
if instr.index >= 0 { // store of zero to Alloc cell
|
||||
// Replace dominated loads by the zero value.
|
||||
renaming[instr.index] = nil
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr)
|
||||
}
|
||||
// Delete the Alloc.
|
||||
u.Instrs[i] = nil
|
||||
u.gaps++
|
||||
}
|
||||
|
||||
case *Store:
|
||||
if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell
|
||||
// Replace dominated loads by the stored value.
|
||||
renaming[alloc.index] = instr.Val
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n",
|
||||
instr, instr.Val.Name())
|
||||
}
|
||||
// Remove the store from the referrer list of the stored value.
|
||||
if refs := instr.Val.Referrers(); refs != nil {
|
||||
*refs = removeInstr(*refs, instr)
|
||||
}
|
||||
// Delete the Store.
|
||||
u.Instrs[i] = nil
|
||||
u.gaps++
|
||||
}
|
||||
|
||||
case *UnOp:
|
||||
if instr.Op == token.MUL {
|
||||
if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell
|
||||
newval := renamed(renaming, alloc)
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n",
|
||||
instr.Name(), instr, newval.Name())
|
||||
}
|
||||
// Replace all references to
|
||||
// the loaded value by the
|
||||
// dominating stored value.
|
||||
replaceAll(instr, newval)
|
||||
// Delete the Load.
|
||||
u.Instrs[i] = nil
|
||||
u.gaps++
|
||||
}
|
||||
}
|
||||
|
||||
case *DebugRef:
|
||||
if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell
|
||||
if instr.IsAddr {
|
||||
instr.X = renamed(renaming, alloc)
|
||||
instr.IsAddr = false
|
||||
|
||||
// Add DebugRef to instr.X's referrers.
|
||||
if refs := instr.X.Referrers(); refs != nil {
|
||||
*refs = append(*refs, instr)
|
||||
}
|
||||
} else {
|
||||
// A source expression denotes the address
|
||||
// of an Alloc that was optimized away.
|
||||
instr.X = nil
|
||||
|
||||
// Delete the DebugRef.
|
||||
u.Instrs[i] = nil
|
||||
u.gaps++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For each φ-node in a CFG successor, rename the edge.
|
||||
for _, v := range u.Succs {
|
||||
phis := newPhis[v]
|
||||
if len(phis) == 0 {
|
||||
continue
|
||||
}
|
||||
i := v.predIndex(u)
|
||||
for _, np := range phis {
|
||||
phi := np.phi
|
||||
alloc := np.alloc
|
||||
newval := renamed(renaming, alloc)
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n",
|
||||
phi.Name(), u, v, i, alloc.Name(), newval.Name())
|
||||
}
|
||||
phi.Edges[i] = newval
|
||||
if prefs := newval.Referrers(); prefs != nil {
|
||||
*prefs = append(*prefs, phi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Continue depth-first recursion over domtree, pushing a
|
||||
// fresh copy of the renaming map for each subtree.
|
||||
for i, v := range u.dom.children {
|
||||
r := renaming
|
||||
if i < len(u.dom.children)-1 {
|
||||
// On all but the final iteration, we must make
|
||||
// a copy to avoid destructive update.
|
||||
r = make([]Value, len(renaming))
|
||||
copy(r, renaming)
|
||||
}
|
||||
rename(v, r, newPhis)
|
||||
}
|
||||
|
||||
}
|
123
vendor/honnef.co/go/tools/ssa/lvalue.go
vendored
Normal file
123
vendor/honnef.co/go/tools/ssa/lvalue.go
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// lvalues are the union of addressable expressions and map-index
|
||||
// expressions.
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// An lvalue represents an assignable location that may appear on the
|
||||
// left-hand side of an assignment. This is a generalization of a
|
||||
// pointer to permit updates to elements of maps.
|
||||
//
|
||||
type lvalue interface {
|
||||
store(fn *Function, v Value) // stores v into the location
|
||||
load(fn *Function) Value // loads the contents of the location
|
||||
address(fn *Function) Value // address of the location
|
||||
typ() types.Type // returns the type of the location
|
||||
}
|
||||
|
||||
// An address is an lvalue represented by a true pointer.
|
||||
type address struct {
|
||||
addr Value
|
||||
pos token.Pos // source position
|
||||
expr ast.Expr // source syntax of the value (not address) [debug mode]
|
||||
}
|
||||
|
||||
func (a *address) load(fn *Function) Value {
|
||||
load := emitLoad(fn, a.addr)
|
||||
load.pos = a.pos
|
||||
return load
|
||||
}
|
||||
|
||||
func (a *address) store(fn *Function, v Value) {
|
||||
store := emitStore(fn, a.addr, v, a.pos)
|
||||
if a.expr != nil {
|
||||
// store.Val is v, converted for assignability.
|
||||
emitDebugRef(fn, a.expr, store.Val, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *address) address(fn *Function) Value {
|
||||
if a.expr != nil {
|
||||
emitDebugRef(fn, a.expr, a.addr, true)
|
||||
}
|
||||
return a.addr
|
||||
}
|
||||
|
||||
func (a *address) typ() types.Type {
|
||||
return deref(a.addr.Type())
|
||||
}
|
||||
|
||||
// An element is an lvalue represented by m[k], the location of an
|
||||
// element of a map or string. These locations are not addressable
|
||||
// since pointers cannot be formed from them, but they do support
|
||||
// load(), and in the case of maps, store().
|
||||
//
|
||||
type element struct {
|
||||
m, k Value // map or string
|
||||
t types.Type // map element type or string byte type
|
||||
pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v)
|
||||
}
|
||||
|
||||
func (e *element) load(fn *Function) Value {
|
||||
l := &Lookup{
|
||||
X: e.m,
|
||||
Index: e.k,
|
||||
}
|
||||
l.setPos(e.pos)
|
||||
l.setType(e.t)
|
||||
return fn.emit(l)
|
||||
}
|
||||
|
||||
func (e *element) store(fn *Function, v Value) {
|
||||
up := &MapUpdate{
|
||||
Map: e.m,
|
||||
Key: e.k,
|
||||
Value: emitConv(fn, v, e.t),
|
||||
}
|
||||
up.pos = e.pos
|
||||
fn.emit(up)
|
||||
}
|
||||
|
||||
func (e *element) address(fn *Function) Value {
|
||||
panic("map/string elements are not addressable")
|
||||
}
|
||||
|
||||
func (e *element) typ() types.Type {
|
||||
return e.t
|
||||
}
|
||||
|
||||
// A blank is a dummy variable whose name is "_".
|
||||
// It is not reified: loads are illegal and stores are ignored.
|
||||
//
|
||||
type blank struct{}
|
||||
|
||||
func (bl blank) load(fn *Function) Value {
|
||||
panic("blank.load is illegal")
|
||||
}
|
||||
|
||||
func (bl blank) store(fn *Function, v Value) {
|
||||
s := &BlankStore{
|
||||
Val: v,
|
||||
}
|
||||
fn.emit(s)
|
||||
}
|
||||
|
||||
func (bl blank) address(fn *Function) Value {
|
||||
panic("blank var is not addressable")
|
||||
}
|
||||
|
||||
func (bl blank) typ() types.Type {
|
||||
// This should be the type of the blank Ident; the typechecker
|
||||
// doesn't provide this yet, but fortunately, we don't need it
|
||||
// yet either.
|
||||
panic("blank.typ is unimplemented")
|
||||
}
|
239
vendor/honnef.co/go/tools/ssa/methods.go
vendored
Normal file
239
vendor/honnef.co/go/tools/ssa/methods.go
vendored
Normal file
@ -0,0 +1,239 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines utilities for population of method sets.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// MethodValue returns the Function implementing method sel, building
|
||||
// wrapper methods on demand. It returns nil if sel denotes an
|
||||
// abstract (interface) method.
|
||||
//
|
||||
// Precondition: sel.Kind() == MethodVal.
|
||||
//
|
||||
// Thread-safe.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
|
||||
//
|
||||
func (prog *Program) MethodValue(sel *types.Selection) *Function {
|
||||
if sel.Kind() != types.MethodVal {
|
||||
panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel))
|
||||
}
|
||||
T := sel.Recv()
|
||||
if isInterface(T) {
|
||||
return nil // abstract method
|
||||
}
|
||||
if prog.mode&LogSource != 0 {
|
||||
defer logStack("MethodValue %s %v", T, sel)()
|
||||
}
|
||||
|
||||
prog.methodsMu.Lock()
|
||||
defer prog.methodsMu.Unlock()
|
||||
|
||||
return prog.addMethod(prog.createMethodSet(T), sel)
|
||||
}
|
||||
|
||||
// LookupMethod returns the implementation of the method of type T
|
||||
// identified by (pkg, name). It returns nil if the method exists but
|
||||
// is abstract, and panics if T has no such method.
|
||||
//
|
||||
func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function {
|
||||
sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name)
|
||||
if sel == nil {
|
||||
panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name)))
|
||||
}
|
||||
return prog.MethodValue(sel)
|
||||
}
|
||||
|
||||
// methodSet contains the (concrete) methods of a non-interface type.
|
||||
type methodSet struct {
|
||||
mapping map[string]*Function // populated lazily
|
||||
complete bool // mapping contains all methods
|
||||
}
|
||||
|
||||
// Precondition: !isInterface(T).
|
||||
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
|
||||
func (prog *Program) createMethodSet(T types.Type) *methodSet {
|
||||
mset, ok := prog.methodSets.At(T).(*methodSet)
|
||||
if !ok {
|
||||
mset = &methodSet{mapping: make(map[string]*Function)}
|
||||
prog.methodSets.Set(T, mset)
|
||||
}
|
||||
return mset
|
||||
}
|
||||
|
||||
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
|
||||
func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function {
|
||||
if sel.Kind() == types.MethodExpr {
|
||||
panic(sel)
|
||||
}
|
||||
id := sel.Obj().Id()
|
||||
fn := mset.mapping[id]
|
||||
if fn == nil {
|
||||
obj := sel.Obj().(*types.Func)
|
||||
|
||||
needsPromotion := len(sel.Index()) > 1
|
||||
needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv())
|
||||
if needsPromotion || needsIndirection {
|
||||
fn = makeWrapper(prog, sel)
|
||||
} else {
|
||||
fn = prog.declaredFunc(obj)
|
||||
}
|
||||
if fn.Signature.Recv() == nil {
|
||||
panic(fn) // missing receiver
|
||||
}
|
||||
mset.mapping[id] = fn
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
// RuntimeTypes returns a new unordered slice containing all
|
||||
// concrete types in the program for which a complete (non-empty)
|
||||
// method set is required at run-time.
|
||||
//
|
||||
// Thread-safe.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
|
||||
//
|
||||
func (prog *Program) RuntimeTypes() []types.Type {
|
||||
prog.methodsMu.Lock()
|
||||
defer prog.methodsMu.Unlock()
|
||||
|
||||
var res []types.Type
|
||||
prog.methodSets.Iterate(func(T types.Type, v interface{}) {
|
||||
if v.(*methodSet).complete {
|
||||
res = append(res, T)
|
||||
}
|
||||
})
|
||||
return res
|
||||
}
|
||||
|
||||
// declaredFunc returns the concrete function/method denoted by obj.
|
||||
// Panic ensues if there is none.
|
||||
//
|
||||
func (prog *Program) declaredFunc(obj *types.Func) *Function {
|
||||
if v := prog.packageLevelValue(obj); v != nil {
|
||||
return v.(*Function)
|
||||
}
|
||||
panic("no concrete method: " + obj.String())
|
||||
}
|
||||
|
||||
// needMethodsOf ensures that runtime type information (including the
|
||||
// complete method set) is available for the specified type T and all
|
||||
// its subcomponents.
|
||||
//
|
||||
// needMethodsOf must be called for at least every type that is an
|
||||
// operand of some MakeInterface instruction, and for the type of
|
||||
// every exported package member.
|
||||
//
|
||||
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
|
||||
//
|
||||
// Thread-safe. (Called via emitConv from multiple builder goroutines.)
|
||||
//
|
||||
// TODO(adonovan): make this faster. It accounts for 20% of SSA build time.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
|
||||
//
|
||||
func (prog *Program) needMethodsOf(T types.Type) {
|
||||
prog.methodsMu.Lock()
|
||||
prog.needMethods(T, false)
|
||||
prog.methodsMu.Unlock()
|
||||
}
|
||||
|
||||
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
|
||||
// Recursive case: skip => don't create methods for T.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
|
||||
//
|
||||
func (prog *Program) needMethods(T types.Type, skip bool) {
|
||||
// Each package maintains its own set of types it has visited.
|
||||
if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok {
|
||||
// needMethods(T) was previously called
|
||||
if !prevSkip || skip {
|
||||
return // already seen, with same or false 'skip' value
|
||||
}
|
||||
}
|
||||
prog.runtimeTypes.Set(T, skip)
|
||||
|
||||
tmset := prog.MethodSets.MethodSet(T)
|
||||
|
||||
if !skip && !isInterface(T) && tmset.Len() > 0 {
|
||||
// Create methods of T.
|
||||
mset := prog.createMethodSet(T)
|
||||
if !mset.complete {
|
||||
mset.complete = true
|
||||
n := tmset.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
prog.addMethod(mset, tmset.At(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recursion over signatures of each method.
|
||||
for i := 0; i < tmset.Len(); i++ {
|
||||
sig := tmset.At(i).Type().(*types.Signature)
|
||||
prog.needMethods(sig.Params(), false)
|
||||
prog.needMethods(sig.Results(), false)
|
||||
}
|
||||
|
||||
switch t := T.(type) {
|
||||
case *types.Basic:
|
||||
// nop
|
||||
|
||||
case *types.Interface:
|
||||
// nop---handled by recursion over method set.
|
||||
|
||||
case *types.Pointer:
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Slice:
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Chan:
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Map:
|
||||
prog.needMethods(t.Key(), false)
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Signature:
|
||||
if t.Recv() != nil {
|
||||
panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
|
||||
}
|
||||
prog.needMethods(t.Params(), false)
|
||||
prog.needMethods(t.Results(), false)
|
||||
|
||||
case *types.Named:
|
||||
// A pointer-to-named type can be derived from a named
|
||||
// type via reflection. It may have methods too.
|
||||
prog.needMethods(types.NewPointer(T), false)
|
||||
|
||||
// Consider 'type T struct{S}' where S has methods.
|
||||
// Reflection provides no way to get from T to struct{S},
|
||||
// only to S, so the method set of struct{S} is unwanted,
|
||||
// so set 'skip' flag during recursion.
|
||||
prog.needMethods(t.Underlying(), true)
|
||||
|
||||
case *types.Array:
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Struct:
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
prog.needMethods(t.Field(i).Type(), false)
|
||||
}
|
||||
|
||||
case *types.Tuple:
|
||||
for i, n := 0, t.Len(); i < n; i++ {
|
||||
prog.needMethods(t.At(i).Type(), false)
|
||||
}
|
||||
|
||||
default:
|
||||
panic(T)
|
||||
}
|
||||
}
|
100
vendor/honnef.co/go/tools/ssa/mode.go
vendored
Normal file
100
vendor/honnef.co/go/tools/ssa/mode.go
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines the BuilderMode type and its command-line flag.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// BuilderMode is a bitmask of options for diagnostics and checking.
|
||||
//
|
||||
// *BuilderMode satisfies the flag.Value interface. Example:
|
||||
//
|
||||
// var mode = ssa.BuilderMode(0)
|
||||
// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) }
|
||||
//
|
||||
type BuilderMode uint
|
||||
|
||||
const (
|
||||
PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout
|
||||
PrintFunctions // Print function SSA code to stdout
|
||||
LogSource // Log source locations as SSA builder progresses
|
||||
SanityCheckFunctions // Perform sanity checking of function bodies
|
||||
NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers
|
||||
BuildSerially // Build packages serially, not in parallel.
|
||||
GlobalDebug // Enable debug info for all packages
|
||||
BareInits // Build init functions without guards or calls to dependent inits
|
||||
)
|
||||
|
||||
const BuilderModeDoc = `Options controlling the SSA builder.
|
||||
The value is a sequence of zero or more of these letters:
|
||||
C perform sanity [C]hecking of the SSA form.
|
||||
D include [D]ebug info for every function.
|
||||
P print [P]ackage inventory.
|
||||
F print [F]unction SSA code.
|
||||
S log [S]ource locations as SSA builder progresses.
|
||||
L build distinct packages seria[L]ly instead of in parallel.
|
||||
N build [N]aive SSA form: don't replace local loads/stores with registers.
|
||||
I build bare [I]nit functions: no init guards or calls to dependent inits.
|
||||
`
|
||||
|
||||
func (m BuilderMode) String() string {
|
||||
var buf bytes.Buffer
|
||||
if m&GlobalDebug != 0 {
|
||||
buf.WriteByte('D')
|
||||
}
|
||||
if m&PrintPackages != 0 {
|
||||
buf.WriteByte('P')
|
||||
}
|
||||
if m&PrintFunctions != 0 {
|
||||
buf.WriteByte('F')
|
||||
}
|
||||
if m&LogSource != 0 {
|
||||
buf.WriteByte('S')
|
||||
}
|
||||
if m&SanityCheckFunctions != 0 {
|
||||
buf.WriteByte('C')
|
||||
}
|
||||
if m&NaiveForm != 0 {
|
||||
buf.WriteByte('N')
|
||||
}
|
||||
if m&BuildSerially != 0 {
|
||||
buf.WriteByte('L')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Set parses the flag characters in s and updates *m.
|
||||
func (m *BuilderMode) Set(s string) error {
|
||||
var mode BuilderMode
|
||||
for _, c := range s {
|
||||
switch c {
|
||||
case 'D':
|
||||
mode |= GlobalDebug
|
||||
case 'P':
|
||||
mode |= PrintPackages
|
||||
case 'F':
|
||||
mode |= PrintFunctions
|
||||
case 'S':
|
||||
mode |= LogSource | BuildSerially
|
||||
case 'C':
|
||||
mode |= SanityCheckFunctions
|
||||
case 'N':
|
||||
mode |= NaiveForm
|
||||
case 'L':
|
||||
mode |= BuildSerially
|
||||
default:
|
||||
return fmt.Errorf("unknown BuilderMode option: %q", c)
|
||||
}
|
||||
}
|
||||
*m = mode
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns m.
|
||||
func (m BuilderMode) Get() interface{} { return m }
|
435
vendor/honnef.co/go/tools/ssa/print.go
vendored
Normal file
435
vendor/honnef.co/go/tools/ssa/print.go
vendored
Normal file
@ -0,0 +1,435 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file implements the String() methods for all Value and
|
||||
// Instruction types.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
// relName returns the name of v relative to i.
|
||||
// In most cases, this is identical to v.Name(), but references to
|
||||
// Functions (including methods) and Globals use RelString and
|
||||
// all types are displayed with relType, so that only cross-package
|
||||
// references are package-qualified.
|
||||
//
|
||||
func relName(v Value, i Instruction) string {
|
||||
var from *types.Package
|
||||
if i != nil {
|
||||
from = i.Parent().pkg()
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case Member: // *Function or *Global
|
||||
return v.RelString(from)
|
||||
case *Const:
|
||||
return v.RelString(from)
|
||||
}
|
||||
return v.Name()
|
||||
}
|
||||
|
||||
func relType(t types.Type, from *types.Package) string {
|
||||
return types.TypeString(t, types.RelativeTo(from))
|
||||
}
|
||||
|
||||
func relString(m Member, from *types.Package) string {
|
||||
// NB: not all globals have an Object (e.g. init$guard),
|
||||
// so use Package().Object not Object.Package().
|
||||
if pkg := m.Package().Pkg; pkg != nil && pkg != from {
|
||||
return fmt.Sprintf("%s.%s", pkg.Path(), m.Name())
|
||||
}
|
||||
return m.Name()
|
||||
}
|
||||
|
||||
// Value.String()
|
||||
//
|
||||
// This method is provided only for debugging.
|
||||
// It never appears in disassembly, which uses Value.Name().
|
||||
|
||||
func (v *Parameter) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from))
|
||||
}
|
||||
|
||||
func (v *FreeVar) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from))
|
||||
}
|
||||
|
||||
func (v *Builtin) String() string {
|
||||
return fmt.Sprintf("builtin %s", v.Name())
|
||||
}
|
||||
|
||||
// Instruction.String()
|
||||
|
||||
func (v *Alloc) String() string {
|
||||
op := "local"
|
||||
if v.Heap {
|
||||
op = "new"
|
||||
}
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment)
|
||||
}
|
||||
|
||||
func (v *Phi) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("phi [")
|
||||
for i, edge := range v.Edges {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
// Be robust against malformed CFG.
|
||||
if v.block == nil {
|
||||
b.WriteString("??")
|
||||
continue
|
||||
}
|
||||
block := -1
|
||||
if i < len(v.block.Preds) {
|
||||
block = v.block.Preds[i].Index
|
||||
}
|
||||
fmt.Fprintf(&b, "%d: ", block)
|
||||
edgeVal := "<nil>" // be robust
|
||||
if edge != nil {
|
||||
edgeVal = relName(edge, v)
|
||||
}
|
||||
b.WriteString(edgeVal)
|
||||
}
|
||||
b.WriteString("]")
|
||||
if v.Comment != "" {
|
||||
b.WriteString(" #")
|
||||
b.WriteString(v.Comment)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func printCall(v *CallCommon, prefix string, instr Instruction) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString(prefix)
|
||||
if !v.IsInvoke() {
|
||||
b.WriteString(relName(v.Value, instr))
|
||||
} else {
|
||||
fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name())
|
||||
}
|
||||
b.WriteString("(")
|
||||
for i, arg := range v.Args {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(relName(arg, instr))
|
||||
}
|
||||
if v.Signature().Variadic() {
|
||||
b.WriteString("...")
|
||||
}
|
||||
b.WriteString(")")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (c *CallCommon) String() string {
|
||||
return printCall(c, "", nil)
|
||||
}
|
||||
|
||||
func (v *Call) String() string {
|
||||
return printCall(&v.Call, "", v)
|
||||
}
|
||||
|
||||
func (v *BinOp) String() string {
|
||||
return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v))
|
||||
}
|
||||
|
||||
func (v *UnOp) String() string {
|
||||
return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk))
|
||||
}
|
||||
|
||||
func printConv(prefix string, v, x Value) string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("%s %s <- %s (%s)",
|
||||
prefix,
|
||||
relType(v.Type(), from),
|
||||
relType(x.Type(), from),
|
||||
relName(x, v.(Instruction)))
|
||||
}
|
||||
|
||||
func (v *ChangeType) String() string { return printConv("changetype", v, v.X) }
|
||||
func (v *Convert) String() string { return printConv("convert", v, v.X) }
|
||||
func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) }
|
||||
func (v *MakeInterface) String() string { return printConv("make", v, v.X) }
|
||||
|
||||
func (v *MakeClosure) String() string {
|
||||
var b bytes.Buffer
|
||||
fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v))
|
||||
if v.Bindings != nil {
|
||||
b.WriteString(" [")
|
||||
for i, c := range v.Bindings {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(relName(c, v))
|
||||
}
|
||||
b.WriteString("]")
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (v *MakeSlice) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("make %s %s %s",
|
||||
relType(v.Type(), from),
|
||||
relName(v.Len, v),
|
||||
relName(v.Cap, v))
|
||||
}
|
||||
|
||||
func (v *Slice) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("slice ")
|
||||
b.WriteString(relName(v.X, v))
|
||||
b.WriteString("[")
|
||||
if v.Low != nil {
|
||||
b.WriteString(relName(v.Low, v))
|
||||
}
|
||||
b.WriteString(":")
|
||||
if v.High != nil {
|
||||
b.WriteString(relName(v.High, v))
|
||||
}
|
||||
if v.Max != nil {
|
||||
b.WriteString(":")
|
||||
b.WriteString(relName(v.Max, v))
|
||||
}
|
||||
b.WriteString("]")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (v *MakeMap) String() string {
|
||||
res := ""
|
||||
if v.Reserve != nil {
|
||||
res = relName(v.Reserve, v)
|
||||
}
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("make %s %s", relType(v.Type(), from), res)
|
||||
}
|
||||
|
||||
func (v *MakeChan) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v))
|
||||
}
|
||||
|
||||
func (v *FieldAddr) String() string {
|
||||
st := deref(v.X.Type()).Underlying().(*types.Struct)
|
||||
// Be robust against a bad index.
|
||||
name := "?"
|
||||
if 0 <= v.Field && v.Field < st.NumFields() {
|
||||
name = st.Field(v.Field).Name()
|
||||
}
|
||||
return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field)
|
||||
}
|
||||
|
||||
func (v *Field) String() string {
|
||||
st := v.X.Type().Underlying().(*types.Struct)
|
||||
// Be robust against a bad index.
|
||||
name := "?"
|
||||
if 0 <= v.Field && v.Field < st.NumFields() {
|
||||
name = st.Field(v.Field).Name()
|
||||
}
|
||||
return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field)
|
||||
}
|
||||
|
||||
func (v *IndexAddr) String() string {
|
||||
return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v))
|
||||
}
|
||||
|
||||
func (v *Index) String() string {
|
||||
return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v))
|
||||
}
|
||||
|
||||
func (v *Lookup) String() string {
|
||||
return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk))
|
||||
}
|
||||
|
||||
func (v *Range) String() string {
|
||||
return "range " + relName(v.X, v)
|
||||
}
|
||||
|
||||
func (v *Next) String() string {
|
||||
return "next " + relName(v.Iter, v)
|
||||
}
|
||||
|
||||
func (v *TypeAssert) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from))
|
||||
}
|
||||
|
||||
func (v *Extract) String() string {
|
||||
return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index)
|
||||
}
|
||||
|
||||
func (s *Jump) String() string {
|
||||
// Be robust against malformed CFG.
|
||||
block := -1
|
||||
if s.block != nil && len(s.block.Succs) == 1 {
|
||||
block = s.block.Succs[0].Index
|
||||
}
|
||||
return fmt.Sprintf("jump %d", block)
|
||||
}
|
||||
|
||||
func (s *If) String() string {
|
||||
// Be robust against malformed CFG.
|
||||
tblock, fblock := -1, -1
|
||||
if s.block != nil && len(s.block.Succs) == 2 {
|
||||
tblock = s.block.Succs[0].Index
|
||||
fblock = s.block.Succs[1].Index
|
||||
}
|
||||
return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock)
|
||||
}
|
||||
|
||||
func (s *Go) String() string {
|
||||
return printCall(&s.Call, "go ", s)
|
||||
}
|
||||
|
||||
func (s *Panic) String() string {
|
||||
return "panic " + relName(s.X, s)
|
||||
}
|
||||
|
||||
func (s *Return) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("return")
|
||||
for i, r := range s.Results {
|
||||
if i == 0 {
|
||||
b.WriteString(" ")
|
||||
} else {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(relName(r, s))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (*RunDefers) String() string {
|
||||
return "rundefers"
|
||||
}
|
||||
|
||||
func (s *Send) String() string {
|
||||
return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s))
|
||||
}
|
||||
|
||||
func (s *Defer) String() string {
|
||||
return printCall(&s.Call, "defer ", s)
|
||||
}
|
||||
|
||||
func (s *Select) String() string {
|
||||
var b bytes.Buffer
|
||||
for i, st := range s.States {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
if st.Dir == types.RecvOnly {
|
||||
b.WriteString("<-")
|
||||
b.WriteString(relName(st.Chan, s))
|
||||
} else {
|
||||
b.WriteString(relName(st.Chan, s))
|
||||
b.WriteString("<-")
|
||||
b.WriteString(relName(st.Send, s))
|
||||
}
|
||||
}
|
||||
non := ""
|
||||
if !s.Blocking {
|
||||
non = "non"
|
||||
}
|
||||
return fmt.Sprintf("select %sblocking [%s]", non, b.String())
|
||||
}
|
||||
|
||||
func (s *Store) String() string {
|
||||
return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s))
|
||||
}
|
||||
|
||||
func (s *BlankStore) String() string {
|
||||
return fmt.Sprintf("_ = %s", relName(s.Val, s))
|
||||
}
|
||||
|
||||
func (s *MapUpdate) String() string {
|
||||
return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s))
|
||||
}
|
||||
|
||||
func (s *DebugRef) String() string {
|
||||
p := s.Parent().Prog.Fset.Position(s.Pos())
|
||||
var descr interface{}
|
||||
if s.object != nil {
|
||||
descr = s.object // e.g. "var x int"
|
||||
} else {
|
||||
descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr"
|
||||
}
|
||||
var addr string
|
||||
if s.IsAddr {
|
||||
addr = "address of "
|
||||
}
|
||||
return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name())
|
||||
}
|
||||
|
||||
func (p *Package) String() string {
|
||||
return "package " + p.Pkg.Path()
|
||||
}
|
||||
|
||||
var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer
|
||||
|
||||
func (p *Package) WriteTo(w io.Writer) (int64, error) {
|
||||
var buf bytes.Buffer
|
||||
WritePackage(&buf, p)
|
||||
n, err := w.Write(buf.Bytes())
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// WritePackage writes to buf a human-readable summary of p.
|
||||
func WritePackage(buf *bytes.Buffer, p *Package) {
|
||||
fmt.Fprintf(buf, "%s:\n", p)
|
||||
|
||||
var names []string
|
||||
maxname := 0
|
||||
for name := range p.Members {
|
||||
if l := len(name); l > maxname {
|
||||
maxname = l
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
from := p.Pkg
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
switch mem := p.Members[name].(type) {
|
||||
case *NamedConst:
|
||||
fmt.Fprintf(buf, " const %-*s %s = %s\n",
|
||||
maxname, name, mem.Name(), mem.Value.RelString(from))
|
||||
|
||||
case *Function:
|
||||
fmt.Fprintf(buf, " func %-*s %s\n",
|
||||
maxname, name, relType(mem.Type(), from))
|
||||
|
||||
case *Type:
|
||||
fmt.Fprintf(buf, " type %-*s %s\n",
|
||||
maxname, name, relType(mem.Type().Underlying(), from))
|
||||
for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) {
|
||||
fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from)))
|
||||
}
|
||||
|
||||
case *Global:
|
||||
fmt.Fprintf(buf, " var %-*s %s\n",
|
||||
maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
|
||||
func commaOk(x bool) string {
|
||||
if x {
|
||||
return ",ok"
|
||||
}
|
||||
return ""
|
||||
}
|
535
vendor/honnef.co/go/tools/ssa/sanity.go
vendored
Normal file
535
vendor/honnef.co/go/tools/ssa/sanity.go
vendored
Normal file
@ -0,0 +1,535 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// An optional pass for sanity-checking invariants of the SSA representation.
|
||||
// Currently it checks CFG invariants but little at the instruction level.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type sanity struct {
|
||||
reporter io.Writer
|
||||
fn *Function
|
||||
block *BasicBlock
|
||||
instrs map[Instruction]struct{}
|
||||
insane bool
|
||||
}
|
||||
|
||||
// sanityCheck performs integrity checking of the SSA representation
|
||||
// of the function fn and returns true if it was valid. Diagnostics
|
||||
// are written to reporter if non-nil, os.Stderr otherwise. Some
|
||||
// diagnostics are only warnings and do not imply a negative result.
|
||||
//
|
||||
// Sanity-checking is intended to facilitate the debugging of code
|
||||
// transformation passes.
|
||||
//
|
||||
func sanityCheck(fn *Function, reporter io.Writer) bool {
|
||||
if reporter == nil {
|
||||
reporter = os.Stderr
|
||||
}
|
||||
return (&sanity{reporter: reporter}).checkFunction(fn)
|
||||
}
|
||||
|
||||
// mustSanityCheck is like sanityCheck but panics instead of returning
|
||||
// a negative result.
|
||||
//
|
||||
func mustSanityCheck(fn *Function, reporter io.Writer) {
|
||||
if !sanityCheck(fn, reporter) {
|
||||
fn.WriteTo(os.Stderr)
|
||||
panic("SanityCheck failed")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sanity) diagnostic(prefix, format string, args ...interface{}) {
|
||||
fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn)
|
||||
if s.block != nil {
|
||||
fmt.Fprintf(s.reporter, ", block %s", s.block)
|
||||
}
|
||||
io.WriteString(s.reporter, ": ")
|
||||
fmt.Fprintf(s.reporter, format, args...)
|
||||
io.WriteString(s.reporter, "\n")
|
||||
}
|
||||
|
||||
func (s *sanity) errorf(format string, args ...interface{}) {
|
||||
s.insane = true
|
||||
s.diagnostic("Error", format, args...)
|
||||
}
|
||||
|
||||
func (s *sanity) warnf(format string, args ...interface{}) {
|
||||
s.diagnostic("Warning", format, args...)
|
||||
}
|
||||
|
||||
// findDuplicate returns an arbitrary basic block that appeared more
|
||||
// than once in blocks, or nil if all were unique.
|
||||
func findDuplicate(blocks []*BasicBlock) *BasicBlock {
|
||||
if len(blocks) < 2 {
|
||||
return nil
|
||||
}
|
||||
if blocks[0] == blocks[1] {
|
||||
return blocks[0]
|
||||
}
|
||||
// Slow path:
|
||||
m := make(map[*BasicBlock]bool)
|
||||
for _, b := range blocks {
|
||||
if m[b] {
|
||||
return b
|
||||
}
|
||||
m[b] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sanity) checkInstr(idx int, instr Instruction) {
|
||||
switch instr := instr.(type) {
|
||||
case *If, *Jump, *Return, *Panic:
|
||||
s.errorf("control flow instruction not at end of block")
|
||||
case *Phi:
|
||||
if idx == 0 {
|
||||
// It suffices to apply this check to just the first phi node.
|
||||
if dup := findDuplicate(s.block.Preds); dup != nil {
|
||||
s.errorf("phi node in block with duplicate predecessor %s", dup)
|
||||
}
|
||||
} else {
|
||||
prev := s.block.Instrs[idx-1]
|
||||
if _, ok := prev.(*Phi); !ok {
|
||||
s.errorf("Phi instruction follows a non-Phi: %T", prev)
|
||||
}
|
||||
}
|
||||
if ne, np := len(instr.Edges), len(s.block.Preds); ne != np {
|
||||
s.errorf("phi node has %d edges but %d predecessors", ne, np)
|
||||
|
||||
} else {
|
||||
for i, e := range instr.Edges {
|
||||
if e == nil {
|
||||
s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case *Alloc:
|
||||
if !instr.Heap {
|
||||
found := false
|
||||
for _, l := range s.fn.Locals {
|
||||
if l == instr {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr)
|
||||
}
|
||||
}
|
||||
|
||||
case *BinOp:
|
||||
case *Call:
|
||||
case *ChangeInterface:
|
||||
case *ChangeType:
|
||||
case *Convert:
|
||||
if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
|
||||
if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
|
||||
s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
|
||||
}
|
||||
}
|
||||
|
||||
case *Defer:
|
||||
case *Extract:
|
||||
case *Field:
|
||||
case *FieldAddr:
|
||||
case *Go:
|
||||
case *Index:
|
||||
case *IndexAddr:
|
||||
case *Lookup:
|
||||
case *MakeChan:
|
||||
case *MakeClosure:
|
||||
numFree := len(instr.Fn.(*Function).FreeVars)
|
||||
numBind := len(instr.Bindings)
|
||||
if numFree != numBind {
|
||||
s.errorf("MakeClosure has %d Bindings for function %s with %d free vars",
|
||||
numBind, instr.Fn, numFree)
|
||||
|
||||
}
|
||||
if recv := instr.Type().(*types.Signature).Recv(); recv != nil {
|
||||
s.errorf("MakeClosure's type includes receiver %s", recv.Type())
|
||||
}
|
||||
|
||||
case *MakeInterface:
|
||||
case *MakeMap:
|
||||
case *MakeSlice:
|
||||
case *MapUpdate:
|
||||
case *Next:
|
||||
case *Range:
|
||||
case *RunDefers:
|
||||
case *Select:
|
||||
case *Send:
|
||||
case *Slice:
|
||||
case *Store:
|
||||
case *TypeAssert:
|
||||
case *UnOp:
|
||||
case *DebugRef:
|
||||
case *BlankStore:
|
||||
case *Sigma:
|
||||
// TODO(adonovan): implement checks.
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown instruction type: %T", instr))
|
||||
}
|
||||
|
||||
if call, ok := instr.(CallInstruction); ok {
|
||||
if call.Common().Signature() == nil {
|
||||
s.errorf("nil signature: %s", call)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that value-defining instructions have valid types
|
||||
// and a valid referrer list.
|
||||
if v, ok := instr.(Value); ok {
|
||||
t := v.Type()
|
||||
if t == nil {
|
||||
s.errorf("no type: %s = %s", v.Name(), v)
|
||||
} else if t == tRangeIter {
|
||||
// not a proper type; ignore.
|
||||
} else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
|
||||
s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t)
|
||||
}
|
||||
s.checkReferrerList(v)
|
||||
}
|
||||
|
||||
// Untyped constants are legal as instruction Operands(),
|
||||
// for example:
|
||||
// _ = "foo"[0]
|
||||
// or:
|
||||
// if wordsize==64 {...}
|
||||
|
||||
// All other non-Instruction Values can be found via their
|
||||
// enclosing Function or Package.
|
||||
}
|
||||
|
||||
func (s *sanity) checkFinalInstr(instr Instruction) {
|
||||
switch instr := instr.(type) {
|
||||
case *If:
|
||||
if nsuccs := len(s.block.Succs); nsuccs != 2 {
|
||||
s.errorf("If-terminated block has %d successors; expected 2", nsuccs)
|
||||
return
|
||||
}
|
||||
if s.block.Succs[0] == s.block.Succs[1] {
|
||||
s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0])
|
||||
return
|
||||
}
|
||||
|
||||
case *Jump:
|
||||
if nsuccs := len(s.block.Succs); nsuccs != 1 {
|
||||
s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs)
|
||||
return
|
||||
}
|
||||
|
||||
case *Return:
|
||||
if nsuccs := len(s.block.Succs); nsuccs != 0 {
|
||||
s.errorf("Return-terminated block has %d successors; expected none", nsuccs)
|
||||
return
|
||||
}
|
||||
if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na {
|
||||
s.errorf("%d-ary return in %d-ary function", na, nf)
|
||||
}
|
||||
|
||||
case *Panic:
|
||||
if nsuccs := len(s.block.Succs); nsuccs != 0 {
|
||||
s.errorf("Panic-terminated block has %d successors; expected none", nsuccs)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
s.errorf("non-control flow instruction at end of block")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sanity) checkBlock(b *BasicBlock, index int) {
|
||||
s.block = b
|
||||
|
||||
if b.Index != index {
|
||||
s.errorf("block has incorrect Index %d", b.Index)
|
||||
}
|
||||
if b.parent != s.fn {
|
||||
s.errorf("block has incorrect parent %s", b.parent)
|
||||
}
|
||||
|
||||
// Check all blocks are reachable.
|
||||
// (The entry block is always implicitly reachable,
|
||||
// as is the Recover block, if any.)
|
||||
if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 {
|
||||
s.warnf("unreachable block")
|
||||
if b.Instrs == nil {
|
||||
// Since this block is about to be pruned,
|
||||
// tolerating transient problems in it
|
||||
// simplifies other optimizations.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check predecessor and successor relations are dual,
|
||||
// and that all blocks in CFG belong to same function.
|
||||
for _, a := range b.Preds {
|
||||
found := false
|
||||
for _, bb := range a.Succs {
|
||||
if bb == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs)
|
||||
}
|
||||
if a.parent != s.fn {
|
||||
s.errorf("predecessor %s belongs to different function %s", a, a.parent)
|
||||
}
|
||||
}
|
||||
for _, c := range b.Succs {
|
||||
found := false
|
||||
for _, bb := range c.Preds {
|
||||
if bb == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds)
|
||||
}
|
||||
if c.parent != s.fn {
|
||||
s.errorf("successor %s belongs to different function %s", c, c.parent)
|
||||
}
|
||||
}
|
||||
|
||||
// Check each instruction is sane.
|
||||
n := len(b.Instrs)
|
||||
if n == 0 {
|
||||
s.errorf("basic block contains no instructions")
|
||||
}
|
||||
var rands [10]*Value // reuse storage
|
||||
for j, instr := range b.Instrs {
|
||||
if instr == nil {
|
||||
s.errorf("nil instruction at index %d", j)
|
||||
continue
|
||||
}
|
||||
if b2 := instr.Block(); b2 == nil {
|
||||
s.errorf("nil Block() for instruction at index %d", j)
|
||||
continue
|
||||
} else if b2 != b {
|
||||
s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j)
|
||||
continue
|
||||
}
|
||||
if j < n-1 {
|
||||
s.checkInstr(j, instr)
|
||||
} else {
|
||||
s.checkFinalInstr(instr)
|
||||
}
|
||||
|
||||
// Check Instruction.Operands.
|
||||
operands:
|
||||
for i, op := range instr.Operands(rands[:0]) {
|
||||
if op == nil {
|
||||
s.errorf("nil operand pointer %d of %s", i, instr)
|
||||
continue
|
||||
}
|
||||
val := *op
|
||||
if val == nil {
|
||||
continue // a nil operand is ok
|
||||
}
|
||||
|
||||
// Check that "untyped" types only appear on constant operands.
|
||||
if _, ok := (*op).(*Const); !ok {
|
||||
if basic, ok := (*op).Type().(*types.Basic); ok {
|
||||
if basic.Info()&types.IsUntyped != 0 {
|
||||
s.errorf("operand #%d of %s is untyped: %s", i, instr, basic)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that Operands that are also Instructions belong to same function.
|
||||
// TODO(adonovan): also check their block dominates block b.
|
||||
if val, ok := val.(Instruction); ok {
|
||||
if val.Block() == nil {
|
||||
s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val)
|
||||
} else if val.Parent() != s.fn {
|
||||
s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent())
|
||||
}
|
||||
}
|
||||
|
||||
// Check that each function-local operand of
|
||||
// instr refers back to instr. (NB: quadratic)
|
||||
switch val := val.(type) {
|
||||
case *Const, *Global, *Builtin:
|
||||
continue // not local
|
||||
case *Function:
|
||||
if val.parent == nil {
|
||||
continue // only anon functions are local
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined.
|
||||
|
||||
if refs := val.Referrers(); refs != nil {
|
||||
for _, ref := range *refs {
|
||||
if ref == instr {
|
||||
continue operands
|
||||
}
|
||||
}
|
||||
s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val)
|
||||
} else {
|
||||
s.errorf("operand %d of %s (%s) has no referrers", i, instr, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sanity) checkReferrerList(v Value) {
|
||||
refs := v.Referrers()
|
||||
if refs == nil {
|
||||
s.errorf("%s has missing referrer list", v.Name())
|
||||
return
|
||||
}
|
||||
for i, ref := range *refs {
|
||||
if _, ok := s.instrs[ref]; !ok {
|
||||
s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sanity) checkFunction(fn *Function) bool {
|
||||
// TODO(adonovan): check Function invariants:
|
||||
// - check params match signature
|
||||
// - check transient fields are nil
|
||||
// - warn if any fn.Locals do not appear among block instructions.
|
||||
s.fn = fn
|
||||
if fn.Prog == nil {
|
||||
s.errorf("nil Prog")
|
||||
}
|
||||
|
||||
_ = fn.String() // must not crash
|
||||
_ = fn.RelString(fn.pkg()) // must not crash
|
||||
|
||||
// All functions have a package, except delegates (which are
|
||||
// shared across packages, or duplicated as weak symbols in a
|
||||
// separate-compilation model), and error.Error.
|
||||
if fn.Pkg == nil {
|
||||
if strings.HasPrefix(fn.Synthetic, "wrapper ") ||
|
||||
strings.HasPrefix(fn.Synthetic, "bound ") ||
|
||||
strings.HasPrefix(fn.Synthetic, "thunk ") ||
|
||||
strings.HasSuffix(fn.name, "Error") {
|
||||
// ok
|
||||
} else {
|
||||
s.errorf("nil Pkg")
|
||||
}
|
||||
}
|
||||
if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn {
|
||||
s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
|
||||
}
|
||||
for i, l := range fn.Locals {
|
||||
if l.Parent() != fn {
|
||||
s.errorf("Local %s at index %d has wrong parent", l.Name(), i)
|
||||
}
|
||||
if l.Heap {
|
||||
s.errorf("Local %s at index %d has Heap flag set", l.Name(), i)
|
||||
}
|
||||
}
|
||||
// Build the set of valid referrers.
|
||||
s.instrs = make(map[Instruction]struct{})
|
||||
for _, b := range fn.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
s.instrs[instr] = struct{}{}
|
||||
}
|
||||
}
|
||||
for i, p := range fn.Params {
|
||||
if p.Parent() != fn {
|
||||
s.errorf("Param %s at index %d has wrong parent", p.Name(), i)
|
||||
}
|
||||
// Check common suffix of Signature and Params match type.
|
||||
if sig := fn.Signature; sig != nil {
|
||||
j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params
|
||||
if j < 0 {
|
||||
continue
|
||||
}
|
||||
if !types.Identical(p.Type(), sig.Params().At(j).Type()) {
|
||||
s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type())
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
s.checkReferrerList(p)
|
||||
}
|
||||
for i, fv := range fn.FreeVars {
|
||||
if fv.Parent() != fn {
|
||||
s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i)
|
||||
}
|
||||
s.checkReferrerList(fv)
|
||||
}
|
||||
|
||||
if fn.Blocks != nil && len(fn.Blocks) == 0 {
|
||||
// Function _had_ blocks (so it's not external) but
|
||||
// they were "optimized" away, even the entry block.
|
||||
s.errorf("Blocks slice is non-nil but empty")
|
||||
}
|
||||
for i, b := range fn.Blocks {
|
||||
if b == nil {
|
||||
s.warnf("nil *BasicBlock at f.Blocks[%d]", i)
|
||||
continue
|
||||
}
|
||||
s.checkBlock(b, i)
|
||||
}
|
||||
if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover {
|
||||
s.errorf("Recover block is not in Blocks slice")
|
||||
}
|
||||
|
||||
s.block = nil
|
||||
for i, anon := range fn.AnonFuncs {
|
||||
if anon.Parent() != fn {
|
||||
s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent())
|
||||
}
|
||||
}
|
||||
s.fn = nil
|
||||
return !s.insane
|
||||
}
|
||||
|
||||
// sanityCheckPackage checks invariants of packages upon creation.
|
||||
// It does not require that the package is built.
|
||||
// Unlike sanityCheck (for functions), it just panics at the first error.
|
||||
func sanityCheckPackage(pkg *Package) {
|
||||
if pkg.Pkg == nil {
|
||||
panic(fmt.Sprintf("Package %s has no Object", pkg))
|
||||
}
|
||||
_ = pkg.String() // must not crash
|
||||
|
||||
for name, mem := range pkg.Members {
|
||||
if name != mem.Name() {
|
||||
panic(fmt.Sprintf("%s: %T.Name() = %s, want %s",
|
||||
pkg.Pkg.Path(), mem, mem.Name(), name))
|
||||
}
|
||||
obj := mem.Object()
|
||||
if obj == nil {
|
||||
// This check is sound because fields
|
||||
// {Global,Function}.object have type
|
||||
// types.Object. (If they were declared as
|
||||
// *types.{Var,Func}, we'd have a non-empty
|
||||
// interface containing a nil pointer.)
|
||||
|
||||
continue // not all members have typechecker objects
|
||||
}
|
||||
if obj.Name() != name {
|
||||
if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") {
|
||||
// Ok. The name of a declared init function varies between
|
||||
// its types.Func ("init") and its ssa.Function ("init#%d").
|
||||
} else {
|
||||
panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
|
||||
pkg.Pkg.Path(), mem, obj.Name(), name))
|
||||
}
|
||||
}
|
||||
if obj.Pos() != mem.Pos() {
|
||||
panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos()))
|
||||
}
|
||||
}
|
||||
}
|
293
vendor/honnef.co/go/tools/ssa/source.go
vendored
Normal file
293
vendor/honnef.co/go/tools/ssa/source.go
vendored
Normal file
@ -0,0 +1,293 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines utilities for working with source positions
|
||||
// or source-level named entities ("objects").
|
||||
|
||||
// TODO(adonovan): test that {Value,Instruction}.Pos() positions match
|
||||
// the originating syntax, as specified.
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// EnclosingFunction returns the function that contains the syntax
|
||||
// node denoted by path.
|
||||
//
|
||||
// Syntax associated with package-level variable specifications is
|
||||
// enclosed by the package's init() function.
|
||||
//
|
||||
// Returns nil if not found; reasons might include:
|
||||
// - the node is not enclosed by any function.
|
||||
// - the node is within an anonymous function (FuncLit) and
|
||||
// its SSA function has not been created yet
|
||||
// (pkg.Build() has not yet been called).
|
||||
//
|
||||
func EnclosingFunction(pkg *Package, path []ast.Node) *Function {
|
||||
// Start with package-level function...
|
||||
fn := findEnclosingPackageLevelFunction(pkg, path)
|
||||
if fn == nil {
|
||||
return nil // not in any function
|
||||
}
|
||||
|
||||
// ...then walk down the nested anonymous functions.
|
||||
n := len(path)
|
||||
outer:
|
||||
for i := range path {
|
||||
if lit, ok := path[n-1-i].(*ast.FuncLit); ok {
|
||||
for _, anon := range fn.AnonFuncs {
|
||||
if anon.Pos() == lit.Type.Func {
|
||||
fn = anon
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
// SSA function not found:
|
||||
// - package not yet built, or maybe
|
||||
// - builder skipped FuncLit in dead block
|
||||
// (in principle; but currently the Builder
|
||||
// generates even dead FuncLits).
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
// HasEnclosingFunction returns true if the AST node denoted by path
|
||||
// is contained within the declaration of some function or
|
||||
// package-level variable.
|
||||
//
|
||||
// Unlike EnclosingFunction, the behaviour of this function does not
|
||||
// depend on whether SSA code for pkg has been built, so it can be
|
||||
// used to quickly reject check inputs that will cause
|
||||
// EnclosingFunction to fail, prior to SSA building.
|
||||
//
|
||||
func HasEnclosingFunction(pkg *Package, path []ast.Node) bool {
|
||||
return findEnclosingPackageLevelFunction(pkg, path) != nil
|
||||
}
|
||||
|
||||
// findEnclosingPackageLevelFunction returns the Function
|
||||
// corresponding to the package-level function enclosing path.
|
||||
//
|
||||
func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function {
|
||||
if n := len(path); n >= 2 { // [... {Gen,Func}Decl File]
|
||||
switch decl := path[n-2].(type) {
|
||||
case *ast.GenDecl:
|
||||
if decl.Tok == token.VAR && n >= 3 {
|
||||
// Package-level 'var' initializer.
|
||||
return pkg.init
|
||||
}
|
||||
|
||||
case *ast.FuncDecl:
|
||||
if decl.Recv == nil && decl.Name.Name == "init" {
|
||||
// Explicit init() function.
|
||||
for _, b := range pkg.init.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
if instr, ok := instr.(*Call); ok {
|
||||
if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos {
|
||||
return callee
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Hack: return non-nil when SSA is not yet
|
||||
// built so that HasEnclosingFunction works.
|
||||
return pkg.init
|
||||
}
|
||||
// Declared function/method.
|
||||
return findNamedFunc(pkg, decl.Name.NamePos)
|
||||
}
|
||||
}
|
||||
return nil // not in any function
|
||||
}
|
||||
|
||||
// findNamedFunc returns the named function whose FuncDecl.Ident is at
|
||||
// position pos.
|
||||
//
|
||||
func findNamedFunc(pkg *Package, pos token.Pos) *Function {
|
||||
// Look at all package members and method sets of named types.
|
||||
// Not very efficient.
|
||||
for _, mem := range pkg.Members {
|
||||
switch mem := mem.(type) {
|
||||
case *Function:
|
||||
if mem.Pos() == pos {
|
||||
return mem
|
||||
}
|
||||
case *Type:
|
||||
mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type()))
|
||||
for i, n := 0, mset.Len(); i < n; i++ {
|
||||
// Don't call Program.Method: avoid creating wrappers.
|
||||
obj := mset.At(i).Obj().(*types.Func)
|
||||
if obj.Pos() == pos {
|
||||
return pkg.values[obj].(*Function)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueForExpr returns the SSA Value that corresponds to non-constant
|
||||
// expression e.
|
||||
//
|
||||
// It returns nil if no value was found, e.g.
|
||||
// - the expression is not lexically contained within f;
|
||||
// - f was not built with debug information; or
|
||||
// - e is a constant expression. (For efficiency, no debug
|
||||
// information is stored for constants. Use
|
||||
// go/types.Info.Types[e].Value instead.)
|
||||
// - e is a reference to nil or a built-in function.
|
||||
// - the value was optimised away.
|
||||
//
|
||||
// If e is an addressable expression used in an lvalue context,
|
||||
// value is the address denoted by e, and isAddr is true.
|
||||
//
|
||||
// The types of e (or &e, if isAddr) and the result are equal
|
||||
// (modulo "untyped" bools resulting from comparisons).
|
||||
//
|
||||
// (Tip: to find the ssa.Value given a source position, use
|
||||
// astutil.PathEnclosingInterval to locate the ast.Node, then
|
||||
// EnclosingFunction to locate the Function, then ValueForExpr to find
|
||||
// the ssa.Value.)
|
||||
//
|
||||
func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) {
|
||||
if f.debugInfo() { // (opt)
|
||||
e = unparen(e)
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
if ref, ok := instr.(*DebugRef); ok {
|
||||
if ref.Expr == e {
|
||||
return ref.X, ref.IsAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// --- Lookup functions for source-level named entities (types.Objects) ---
|
||||
|
||||
// Package returns the SSA Package corresponding to the specified
|
||||
// type-checker package object.
|
||||
// It returns nil if no such SSA package has been created.
|
||||
//
|
||||
func (prog *Program) Package(obj *types.Package) *Package {
|
||||
return prog.packages[obj]
|
||||
}
|
||||
|
||||
// packageLevelValue returns the package-level value corresponding to
|
||||
// the specified named object, which may be a package-level const
|
||||
// (*Const), var (*Global) or func (*Function) of some package in
|
||||
// prog. It returns nil if the object is not found.
|
||||
//
|
||||
func (prog *Program) packageLevelValue(obj types.Object) Value {
|
||||
if pkg, ok := prog.packages[obj.Pkg()]; ok {
|
||||
return pkg.values[obj]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FuncValue returns the concrete Function denoted by the source-level
|
||||
// named function obj, or nil if obj denotes an interface method.
|
||||
//
|
||||
// TODO(adonovan): check the invariant that obj.Type() matches the
|
||||
// result's Signature, both in the params/results and in the receiver.
|
||||
//
|
||||
func (prog *Program) FuncValue(obj *types.Func) *Function {
|
||||
fn, _ := prog.packageLevelValue(obj).(*Function)
|
||||
return fn
|
||||
}
|
||||
|
||||
// ConstValue returns the SSA Value denoted by the source-level named
|
||||
// constant obj.
|
||||
//
|
||||
func (prog *Program) ConstValue(obj *types.Const) *Const {
|
||||
// TODO(adonovan): opt: share (don't reallocate)
|
||||
// Consts for const objects and constant ast.Exprs.
|
||||
|
||||
// Universal constant? {true,false,nil}
|
||||
if obj.Parent() == types.Universe {
|
||||
return NewConst(obj.Val(), obj.Type())
|
||||
}
|
||||
// Package-level named constant?
|
||||
if v := prog.packageLevelValue(obj); v != nil {
|
||||
return v.(*Const)
|
||||
}
|
||||
return NewConst(obj.Val(), obj.Type())
|
||||
}
|
||||
|
||||
// VarValue returns the SSA Value that corresponds to a specific
|
||||
// identifier denoting the source-level named variable obj.
|
||||
//
|
||||
// VarValue returns nil if a local variable was not found, perhaps
|
||||
// because its package was not built, the debug information was not
|
||||
// requested during SSA construction, or the value was optimized away.
|
||||
//
|
||||
// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval),
|
||||
// and that ident must resolve to obj.
|
||||
//
|
||||
// pkg is the package enclosing the reference. (A reference to a var
|
||||
// always occurs within a function, so we need to know where to find it.)
|
||||
//
|
||||
// If the identifier is a field selector and its base expression is
|
||||
// non-addressable, then VarValue returns the value of that field.
|
||||
// For example:
|
||||
// func f() struct {x int}
|
||||
// f().x // VarValue(x) returns a *Field instruction of type int
|
||||
//
|
||||
// All other identifiers denote addressable locations (variables).
|
||||
// For them, VarValue may return either the variable's address or its
|
||||
// value, even when the expression is evaluated only for its value; the
|
||||
// situation is reported by isAddr, the second component of the result.
|
||||
//
|
||||
// If !isAddr, the returned value is the one associated with the
|
||||
// specific identifier. For example,
|
||||
// var x int // VarValue(x) returns Const 0 here
|
||||
// x = 1 // VarValue(x) returns Const 1 here
|
||||
//
|
||||
// It is not specified whether the value or the address is returned in
|
||||
// any particular case, as it may depend upon optimizations performed
|
||||
// during SSA code generation, such as registerization, constant
|
||||
// folding, avoidance of materialization of subexpressions, etc.
|
||||
//
|
||||
func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) {
|
||||
// All references to a var are local to some function, possibly init.
|
||||
fn := EnclosingFunction(pkg, ref)
|
||||
if fn == nil {
|
||||
return // e.g. def of struct field; SSA not built?
|
||||
}
|
||||
|
||||
id := ref[0].(*ast.Ident)
|
||||
|
||||
// Defining ident of a parameter?
|
||||
if id.Pos() == obj.Pos() {
|
||||
for _, param := range fn.Params {
|
||||
if param.Object() == obj {
|
||||
return param, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Other ident?
|
||||
for _, b := range fn.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
if dr, ok := instr.(*DebugRef); ok {
|
||||
if dr.Pos() == id.Pos() {
|
||||
return dr.X, dr.IsAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Defining ident of package-level var?
|
||||
if v := prog.packageLevelValue(obj); v != nil {
|
||||
return v.(*Global), true
|
||||
}
|
||||
|
||||
return // e.g. debug info not requested, or var optimized away
|
||||
}
|
1745
vendor/honnef.co/go/tools/ssa/ssa.go
vendored
Normal file
1745
vendor/honnef.co/go/tools/ssa/ssa.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
271
vendor/honnef.co/go/tools/ssa/testmain.go
vendored
Normal file
271
vendor/honnef.co/go/tools/ssa/testmain.go
vendored
Normal file
@ -0,0 +1,271 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// CreateTestMainPackage synthesizes a main package that runs all the
|
||||
// tests of the supplied packages.
|
||||
// It is closely coupled to $GOROOT/src/cmd/go/test.go and $GOROOT/src/testing.
|
||||
//
|
||||
// TODO(adonovan): throws this all away now that x/tools/go/packages
|
||||
// provides access to the actual synthetic test main files.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// FindTests returns the Test, Benchmark, and Example functions
|
||||
// (as defined by "go test") defined in the specified package,
|
||||
// and its TestMain function, if any.
|
||||
//
|
||||
// Deprecated: use x/tools/go/packages to access synthetic testmain packages.
|
||||
func FindTests(pkg *Package) (tests, benchmarks, examples []*Function, main *Function) {
|
||||
prog := pkg.Prog
|
||||
|
||||
// The first two of these may be nil: if the program doesn't import "testing",
|
||||
// it can't contain any tests, but it may yet contain Examples.
|
||||
var testSig *types.Signature // func(*testing.T)
|
||||
var benchmarkSig *types.Signature // func(*testing.B)
|
||||
var exampleSig = types.NewSignature(nil, nil, nil, false) // func()
|
||||
|
||||
// Obtain the types from the parameters of testing.MainStart.
|
||||
if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil {
|
||||
mainStart := testingPkg.Func("MainStart")
|
||||
params := mainStart.Signature.Params()
|
||||
testSig = funcField(params.At(1).Type())
|
||||
benchmarkSig = funcField(params.At(2).Type())
|
||||
|
||||
// Does the package define this function?
|
||||
// func TestMain(*testing.M)
|
||||
if f := pkg.Func("TestMain"); f != nil {
|
||||
sig := f.Type().(*types.Signature)
|
||||
starM := mainStart.Signature.Results().At(0).Type() // *testing.M
|
||||
if sig.Results().Len() == 0 &&
|
||||
sig.Params().Len() == 1 &&
|
||||
types.Identical(sig.Params().At(0).Type(), starM) {
|
||||
main = f
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(adonovan): use a stable order, e.g. lexical.
|
||||
for _, mem := range pkg.Members {
|
||||
if f, ok := mem.(*Function); ok &&
|
||||
ast.IsExported(f.Name()) &&
|
||||
strings.HasSuffix(prog.Fset.Position(f.Pos()).Filename, "_test.go") {
|
||||
|
||||
switch {
|
||||
case testSig != nil && isTestSig(f, "Test", testSig):
|
||||
tests = append(tests, f)
|
||||
case benchmarkSig != nil && isTestSig(f, "Benchmark", benchmarkSig):
|
||||
benchmarks = append(benchmarks, f)
|
||||
case isTestSig(f, "Example", exampleSig):
|
||||
examples = append(examples, f)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Like isTest, but checks the signature too.
|
||||
func isTestSig(f *Function, prefix string, sig *types.Signature) bool {
|
||||
return isTest(f.Name(), prefix) && types.Identical(f.Signature, sig)
|
||||
}
|
||||
|
||||
// Given the type of one of the three slice parameters of testing.Main,
|
||||
// returns the function type.
|
||||
func funcField(slice types.Type) *types.Signature {
|
||||
return slice.(*types.Slice).Elem().Underlying().(*types.Struct).Field(1).Type().(*types.Signature)
|
||||
}
|
||||
|
||||
// isTest tells whether name looks like a test (or benchmark, according to prefix).
|
||||
// It is a Test (say) if there is a character after Test that is not a lower-case letter.
|
||||
// We don't want TesticularCancer.
|
||||
// Plundered from $GOROOT/src/cmd/go/test.go
|
||||
func isTest(name, prefix string) bool {
|
||||
if !strings.HasPrefix(name, prefix) {
|
||||
return false
|
||||
}
|
||||
if len(name) == len(prefix) { // "Test" is ok
|
||||
return true
|
||||
}
|
||||
return ast.IsExported(name[len(prefix):])
|
||||
}
|
||||
|
||||
// CreateTestMainPackage creates and returns a synthetic "testmain"
|
||||
// package for the specified package if it defines tests, benchmarks or
|
||||
// executable examples, or nil otherwise. The new package is named
|
||||
// "main" and provides a function named "main" that runs the tests,
|
||||
// similar to the one that would be created by the 'go test' tool.
|
||||
//
|
||||
// Subsequent calls to prog.AllPackages include the new package.
|
||||
// The package pkg must belong to the program prog.
|
||||
//
|
||||
// Deprecated: use x/tools/go/packages to access synthetic testmain packages.
|
||||
func (prog *Program) CreateTestMainPackage(pkg *Package) *Package {
|
||||
if pkg.Prog != prog {
|
||||
log.Fatal("Package does not belong to Program")
|
||||
}
|
||||
|
||||
// Template data
|
||||
var data struct {
|
||||
Pkg *Package
|
||||
Tests, Benchmarks, Examples []*Function
|
||||
Main *Function
|
||||
Go18 bool
|
||||
}
|
||||
data.Pkg = pkg
|
||||
|
||||
// Enumerate tests.
|
||||
data.Tests, data.Benchmarks, data.Examples, data.Main = FindTests(pkg)
|
||||
if data.Main == nil &&
|
||||
data.Tests == nil && data.Benchmarks == nil && data.Examples == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Synthesize source for testmain package.
|
||||
path := pkg.Pkg.Path() + "$testmain"
|
||||
tmpl := testmainTmpl
|
||||
if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil {
|
||||
// In Go 1.8, testing.MainStart's first argument is an interface, not a func.
|
||||
data.Go18 = types.IsInterface(testingPkg.Func("MainStart").Signature.Params().At(0).Type())
|
||||
} else {
|
||||
// The program does not import "testing", but FindTests
|
||||
// returned non-nil, which must mean there were Examples
|
||||
// but no Test, Benchmark, or TestMain functions.
|
||||
|
||||
// We'll simply call them from testmain.main; this will
|
||||
// ensure they don't panic, but will not check any
|
||||
// "Output:" comments.
|
||||
// (We should not execute an Example that has no
|
||||
// "Output:" comment, but it's impossible to tell here.)
|
||||
tmpl = examplesOnlyTmpl
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
log.Fatalf("internal error expanding template for %s: %v", path, err)
|
||||
}
|
||||
if false { // debugging
|
||||
fmt.Fprintln(os.Stderr, buf.String())
|
||||
}
|
||||
|
||||
// Parse and type-check the testmain package.
|
||||
f, err := parser.ParseFile(prog.Fset, path+".go", &buf, parser.Mode(0))
|
||||
if err != nil {
|
||||
log.Fatalf("internal error parsing %s: %v", path, err)
|
||||
}
|
||||
conf := types.Config{
|
||||
DisableUnusedImportCheck: true,
|
||||
Importer: importer{pkg},
|
||||
}
|
||||
files := []*ast.File{f}
|
||||
info := &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
}
|
||||
testmainPkg, err := conf.Check(path, prog.Fset, files, info)
|
||||
if err != nil {
|
||||
log.Fatalf("internal error type-checking %s: %v", path, err)
|
||||
}
|
||||
|
||||
// Create and build SSA code.
|
||||
testmain := prog.CreatePackage(testmainPkg, files, info, false)
|
||||
testmain.SetDebugMode(false)
|
||||
testmain.Build()
|
||||
testmain.Func("main").Synthetic = "test main function"
|
||||
testmain.Func("init").Synthetic = "package initializer"
|
||||
return testmain
|
||||
}
|
||||
|
||||
// An implementation of types.Importer for an already loaded SSA program.
|
||||
type importer struct {
|
||||
pkg *Package // package under test; may be non-importable
|
||||
}
|
||||
|
||||
func (imp importer) Import(path string) (*types.Package, error) {
|
||||
if p := imp.pkg.Prog.ImportedPackage(path); p != nil {
|
||||
return p.Pkg, nil
|
||||
}
|
||||
if path == imp.pkg.Pkg.Path() {
|
||||
return imp.pkg.Pkg, nil
|
||||
}
|
||||
return nil, fmt.Errorf("not found") // can't happen
|
||||
}
|
||||
|
||||
var testmainTmpl = template.Must(template.New("testmain").Parse(`
|
||||
package main
|
||||
|
||||
import "io"
|
||||
import "os"
|
||||
import "testing"
|
||||
import p {{printf "%q" .Pkg.Pkg.Path}}
|
||||
|
||||
{{if .Go18}}
|
||||
type deps struct{}
|
||||
|
||||
func (deps) ImportPath() string { return "" }
|
||||
func (deps) MatchString(pat, str string) (bool, error) { return true, nil }
|
||||
func (deps) StartCPUProfile(io.Writer) error { return nil }
|
||||
func (deps) StartTestLog(io.Writer) {}
|
||||
func (deps) StopCPUProfile() {}
|
||||
func (deps) StopTestLog() error { return nil }
|
||||
func (deps) WriteHeapProfile(io.Writer) error { return nil }
|
||||
func (deps) WriteProfileTo(string, io.Writer, int) error { return nil }
|
||||
|
||||
var match deps
|
||||
{{else}}
|
||||
func match(_, _ string) (bool, error) { return true, nil }
|
||||
{{end}}
|
||||
|
||||
func main() {
|
||||
tests := []testing.InternalTest{
|
||||
{{range .Tests}}
|
||||
{ {{printf "%q" .Name}}, p.{{.Name}} },
|
||||
{{end}}
|
||||
}
|
||||
benchmarks := []testing.InternalBenchmark{
|
||||
{{range .Benchmarks}}
|
||||
{ {{printf "%q" .Name}}, p.{{.Name}} },
|
||||
{{end}}
|
||||
}
|
||||
examples := []testing.InternalExample{
|
||||
{{range .Examples}}
|
||||
{Name: {{printf "%q" .Name}}, F: p.{{.Name}}},
|
||||
{{end}}
|
||||
}
|
||||
m := testing.MainStart(match, tests, benchmarks, examples)
|
||||
{{with .Main}}
|
||||
p.{{.Name}}(m)
|
||||
{{else}}
|
||||
os.Exit(m.Run())
|
||||
{{end}}
|
||||
}
|
||||
|
||||
`))
|
||||
|
||||
var examplesOnlyTmpl = template.Must(template.New("examples").Parse(`
|
||||
package main
|
||||
|
||||
import p {{printf "%q" .Pkg.Pkg.Path}}
|
||||
|
||||
func main() {
|
||||
{{range .Examples}}
|
||||
p.{{.Name}}()
|
||||
{{end}}
|
||||
}
|
||||
`))
|
119
vendor/honnef.co/go/tools/ssa/util.go
vendored
Normal file
119
vendor/honnef.co/go/tools/ssa/util.go
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines a number of miscellaneous utility functions.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
//// AST utilities
|
||||
|
||||
func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
|
||||
|
||||
// isBlankIdent returns true iff e is an Ident with name "_".
|
||||
// They have no associated types.Object, and thus no type.
|
||||
//
|
||||
func isBlankIdent(e ast.Expr) bool {
|
||||
id, ok := e.(*ast.Ident)
|
||||
return ok && id.Name == "_"
|
||||
}
|
||||
|
||||
//// Type utilities. Some of these belong in go/types.
|
||||
|
||||
// isPointer returns true for types whose underlying type is a pointer.
|
||||
func isPointer(typ types.Type) bool {
|
||||
_, ok := typ.Underlying().(*types.Pointer)
|
||||
return ok
|
||||
}
|
||||
|
||||
func isInterface(T types.Type) bool { return types.IsInterface(T) }
|
||||
|
||||
// deref returns a pointer's element type; otherwise it returns typ.
|
||||
func deref(typ types.Type) types.Type {
|
||||
if p, ok := typ.Underlying().(*types.Pointer); ok {
|
||||
return p.Elem()
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
// recvType returns the receiver type of method obj.
|
||||
func recvType(obj *types.Func) types.Type {
|
||||
return obj.Type().(*types.Signature).Recv().Type()
|
||||
}
|
||||
|
||||
// DefaultType returns the default "typed" type for an "untyped" type;
|
||||
// it returns the incoming type for all other types. The default type
|
||||
// for untyped nil is untyped nil.
|
||||
//
|
||||
// Exported to ssa/interp.
|
||||
//
|
||||
// TODO(adonovan): use go/types.DefaultType after 1.8.
|
||||
//
|
||||
func DefaultType(typ types.Type) types.Type {
|
||||
if t, ok := typ.(*types.Basic); ok {
|
||||
k := t.Kind()
|
||||
switch k {
|
||||
case types.UntypedBool:
|
||||
k = types.Bool
|
||||
case types.UntypedInt:
|
||||
k = types.Int
|
||||
case types.UntypedRune:
|
||||
k = types.Rune
|
||||
case types.UntypedFloat:
|
||||
k = types.Float64
|
||||
case types.UntypedComplex:
|
||||
k = types.Complex128
|
||||
case types.UntypedString:
|
||||
k = types.String
|
||||
}
|
||||
typ = types.Typ[k]
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
// logStack prints the formatted "start" message to stderr and
|
||||
// returns a closure that prints the corresponding "end" message.
|
||||
// Call using 'defer logStack(...)()' to show builder stack on panic.
|
||||
// Don't forget trailing parens!
|
||||
//
|
||||
func logStack(format string, args ...interface{}) func() {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
io.WriteString(os.Stderr, msg)
|
||||
io.WriteString(os.Stderr, "\n")
|
||||
return func() {
|
||||
io.WriteString(os.Stderr, msg)
|
||||
io.WriteString(os.Stderr, " end\n")
|
||||
}
|
||||
}
|
||||
|
||||
// newVar creates a 'var' for use in a types.Tuple.
|
||||
func newVar(name string, typ types.Type) *types.Var {
|
||||
return types.NewParam(token.NoPos, nil, name, typ)
|
||||
}
|
||||
|
||||
// anonVar creates an anonymous 'var' for use in a types.Tuple.
|
||||
func anonVar(typ types.Type) *types.Var {
|
||||
return newVar("", typ)
|
||||
}
|
||||
|
||||
var lenResults = types.NewTuple(anonVar(tInt))
|
||||
|
||||
// makeLen returns the len builtin specialized to type func(T)int.
|
||||
func makeLen(T types.Type) *Builtin {
|
||||
lenParams := types.NewTuple(anonVar(T))
|
||||
return &Builtin{
|
||||
name: "len",
|
||||
sig: types.NewSignature(nil, lenParams, lenResults, false),
|
||||
}
|
||||
}
|
290
vendor/honnef.co/go/tools/ssa/wrappers.go
vendored
Normal file
290
vendor/honnef.co/go/tools/ssa/wrappers.go
vendored
Normal file
@ -0,0 +1,290 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines synthesis of Functions that delegate to declared
|
||||
// methods; they come in three kinds:
|
||||
//
|
||||
// (1) wrappers: methods that wrap declared methods, performing
|
||||
// implicit pointer indirections and embedded field selections.
|
||||
//
|
||||
// (2) thunks: funcs that wrap declared methods. Like wrappers,
|
||||
// thunks perform indirections and field selections. The thunk's
|
||||
// first parameter is used as the receiver for the method call.
|
||||
//
|
||||
// (3) bounds: funcs that wrap declared methods. The bound's sole
|
||||
// free variable, supplied by a closure, is used as the receiver
|
||||
// for the method call. No indirections or field selections are
|
||||
// performed since they can be done before the call.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// -- wrappers -----------------------------------------------------------
|
||||
|
||||
// makeWrapper returns a synthetic method that delegates to the
|
||||
// declared method denoted by meth.Obj(), first performing any
|
||||
// necessary pointer indirections or field selections implied by meth.
|
||||
//
|
||||
// The resulting method's receiver type is meth.Recv().
|
||||
//
|
||||
// This function is versatile but quite subtle! Consider the
|
||||
// following axes of variation when making changes:
|
||||
// - optional receiver indirection
|
||||
// - optional implicit field selections
|
||||
// - meth.Obj() may denote a concrete or an interface method
|
||||
// - the result may be a thunk or a wrapper.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
|
||||
//
|
||||
func makeWrapper(prog *Program, sel *types.Selection) *Function {
|
||||
obj := sel.Obj().(*types.Func) // the declared function
|
||||
sig := sel.Type().(*types.Signature) // type of this wrapper
|
||||
|
||||
var recv *types.Var // wrapper's receiver or thunk's params[0]
|
||||
name := obj.Name()
|
||||
var description string
|
||||
var start int // first regular param
|
||||
if sel.Kind() == types.MethodExpr {
|
||||
name += "$thunk"
|
||||
description = "thunk"
|
||||
recv = sig.Params().At(0)
|
||||
start = 1
|
||||
} else {
|
||||
description = "wrapper"
|
||||
recv = sig.Recv()
|
||||
}
|
||||
|
||||
description = fmt.Sprintf("%s for %s", description, sel.Obj())
|
||||
if prog.mode&LogSource != 0 {
|
||||
defer logStack("make %s to (%s)", description, recv.Type())()
|
||||
}
|
||||
fn := &Function{
|
||||
name: name,
|
||||
method: sel,
|
||||
object: obj,
|
||||
Signature: sig,
|
||||
Synthetic: description,
|
||||
Prog: prog,
|
||||
pos: obj.Pos(),
|
||||
}
|
||||
fn.startBody()
|
||||
fn.addSpilledParam(recv)
|
||||
createParams(fn, start)
|
||||
|
||||
indices := sel.Index()
|
||||
|
||||
var v Value = fn.Locals[0] // spilled receiver
|
||||
if isPointer(sel.Recv()) {
|
||||
v = emitLoad(fn, v)
|
||||
|
||||
// For simple indirection wrappers, perform an informative nil-check:
|
||||
// "value method (T).f called using nil *T pointer"
|
||||
if len(indices) == 1 && !isPointer(recvType(obj)) {
|
||||
var c Call
|
||||
c.Call.Value = &Builtin{
|
||||
name: "ssa:wrapnilchk",
|
||||
sig: types.NewSignature(nil,
|
||||
types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)),
|
||||
types.NewTuple(anonVar(sel.Recv())), false),
|
||||
}
|
||||
c.Call.Args = []Value{
|
||||
v,
|
||||
stringConst(deref(sel.Recv()).String()),
|
||||
stringConst(sel.Obj().Name()),
|
||||
}
|
||||
c.setType(v.Type())
|
||||
v = fn.emit(&c)
|
||||
}
|
||||
}
|
||||
|
||||
// Invariant: v is a pointer, either
|
||||
// value of *A receiver param, or
|
||||
// address of A spilled receiver.
|
||||
|
||||
// We use pointer arithmetic (FieldAddr possibly followed by
|
||||
// Load) in preference to value extraction (Field possibly
|
||||
// preceded by Load).
|
||||
|
||||
v = emitImplicitSelections(fn, v, indices[:len(indices)-1])
|
||||
|
||||
// Invariant: v is a pointer, either
|
||||
// value of implicit *C field, or
|
||||
// address of implicit C field.
|
||||
|
||||
var c Call
|
||||
if r := recvType(obj); !isInterface(r) { // concrete method
|
||||
if !isPointer(r) {
|
||||
v = emitLoad(fn, v)
|
||||
}
|
||||
c.Call.Value = prog.declaredFunc(obj)
|
||||
c.Call.Args = append(c.Call.Args, v)
|
||||
} else {
|
||||
c.Call.Method = obj
|
||||
c.Call.Value = emitLoad(fn, v)
|
||||
}
|
||||
for _, arg := range fn.Params[1:] {
|
||||
c.Call.Args = append(c.Call.Args, arg)
|
||||
}
|
||||
emitTailCall(fn, &c)
|
||||
fn.finishBody()
|
||||
return fn
|
||||
}
|
||||
|
||||
// createParams creates parameters for wrapper method fn based on its
|
||||
// Signature.Params, which do not include the receiver.
|
||||
// start is the index of the first regular parameter to use.
|
||||
//
|
||||
func createParams(fn *Function, start int) {
|
||||
tparams := fn.Signature.Params()
|
||||
for i, n := start, tparams.Len(); i < n; i++ {
|
||||
fn.addParamObj(tparams.At(i))
|
||||
}
|
||||
}
|
||||
|
||||
// -- bounds -----------------------------------------------------------
|
||||
|
||||
// makeBound returns a bound method wrapper (or "bound"), a synthetic
|
||||
// function that delegates to a concrete or interface method denoted
|
||||
// by obj. The resulting function has no receiver, but has one free
|
||||
// variable which will be used as the method's receiver in the
|
||||
// tail-call.
|
||||
//
|
||||
// Use MakeClosure with such a wrapper to construct a bound method
|
||||
// closure. e.g.:
|
||||
//
|
||||
// type T int or: type T interface { meth() }
|
||||
// func (t T) meth()
|
||||
// var t T
|
||||
// f := t.meth
|
||||
// f() // calls t.meth()
|
||||
//
|
||||
// f is a closure of a synthetic wrapper defined as if by:
|
||||
//
|
||||
// f := func() { return t.meth() }
|
||||
//
|
||||
// Unlike makeWrapper, makeBound need perform no indirection or field
|
||||
// selections because that can be done before the closure is
|
||||
// constructed.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
|
||||
//
|
||||
func makeBound(prog *Program, obj *types.Func) *Function {
|
||||
prog.methodsMu.Lock()
|
||||
defer prog.methodsMu.Unlock()
|
||||
fn, ok := prog.bounds[obj]
|
||||
if !ok {
|
||||
description := fmt.Sprintf("bound method wrapper for %s", obj)
|
||||
if prog.mode&LogSource != 0 {
|
||||
defer logStack("%s", description)()
|
||||
}
|
||||
fn = &Function{
|
||||
name: obj.Name() + "$bound",
|
||||
object: obj,
|
||||
Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver
|
||||
Synthetic: description,
|
||||
Prog: prog,
|
||||
pos: obj.Pos(),
|
||||
}
|
||||
|
||||
fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn}
|
||||
fn.FreeVars = []*FreeVar{fv}
|
||||
fn.startBody()
|
||||
createParams(fn, 0)
|
||||
var c Call
|
||||
|
||||
if !isInterface(recvType(obj)) { // concrete
|
||||
c.Call.Value = prog.declaredFunc(obj)
|
||||
c.Call.Args = []Value{fv}
|
||||
} else {
|
||||
c.Call.Value = fv
|
||||
c.Call.Method = obj
|
||||
}
|
||||
for _, arg := range fn.Params {
|
||||
c.Call.Args = append(c.Call.Args, arg)
|
||||
}
|
||||
emitTailCall(fn, &c)
|
||||
fn.finishBody()
|
||||
|
||||
prog.bounds[obj] = fn
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
// -- thunks -----------------------------------------------------------
|
||||
|
||||
// makeThunk returns a thunk, a synthetic function that delegates to a
|
||||
// concrete or interface method denoted by sel.Obj(). The resulting
|
||||
// function has no receiver, but has an additional (first) regular
|
||||
// parameter.
|
||||
//
|
||||
// Precondition: sel.Kind() == types.MethodExpr.
|
||||
//
|
||||
// type T int or: type T interface { meth() }
|
||||
// func (t T) meth()
|
||||
// f := T.meth
|
||||
// var t T
|
||||
// f(t) // calls t.meth()
|
||||
//
|
||||
// f is a synthetic wrapper defined as if by:
|
||||
//
|
||||
// f := func(t T) { return t.meth() }
|
||||
//
|
||||
// TODO(adonovan): opt: currently the stub is created even when used
|
||||
// directly in a function call: C.f(i, 0). This is less efficient
|
||||
// than inlining the stub.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
|
||||
//
|
||||
func makeThunk(prog *Program, sel *types.Selection) *Function {
|
||||
if sel.Kind() != types.MethodExpr {
|
||||
panic(sel)
|
||||
}
|
||||
|
||||
key := selectionKey{
|
||||
kind: sel.Kind(),
|
||||
recv: sel.Recv(),
|
||||
obj: sel.Obj(),
|
||||
index: fmt.Sprint(sel.Index()),
|
||||
indirect: sel.Indirect(),
|
||||
}
|
||||
|
||||
prog.methodsMu.Lock()
|
||||
defer prog.methodsMu.Unlock()
|
||||
|
||||
// Canonicalize key.recv to avoid constructing duplicate thunks.
|
||||
canonRecv, ok := prog.canon.At(key.recv).(types.Type)
|
||||
if !ok {
|
||||
canonRecv = key.recv
|
||||
prog.canon.Set(key.recv, canonRecv)
|
||||
}
|
||||
key.recv = canonRecv
|
||||
|
||||
fn, ok := prog.thunks[key]
|
||||
if !ok {
|
||||
fn = makeWrapper(prog, sel)
|
||||
if fn.Signature.Recv() != nil {
|
||||
panic(fn) // unexpected receiver
|
||||
}
|
||||
prog.thunks[key] = fn
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
func changeRecv(s *types.Signature, recv *types.Var) *types.Signature {
|
||||
return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic())
|
||||
}
|
||||
|
||||
// selectionKey is like types.Selection but a usable map key.
|
||||
type selectionKey struct {
|
||||
kind types.SelectionKind
|
||||
recv types.Type // canonicalized via Program.canon
|
||||
obj types.Object
|
||||
index string
|
||||
indirect bool
|
||||
}
|
5
vendor/honnef.co/go/tools/ssa/write.go
vendored
Normal file
5
vendor/honnef.co/go/tools/ssa/write.go
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package ssa
|
||||
|
||||
func NewJump(parent *BasicBlock) *Jump {
|
||||
return &Jump{anInstruction{parent}}
|
||||
}
|
58
vendor/honnef.co/go/tools/ssautil/ssautil.go
vendored
Normal file
58
vendor/honnef.co/go/tools/ssautil/ssautil.go
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package ssautil
|
||||
|
||||
import (
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
func Reachable(from, to *ssa.BasicBlock) bool {
|
||||
if from == to {
|
||||
return true
|
||||
}
|
||||
if from.Dominates(to) {
|
||||
return true
|
||||
}
|
||||
|
||||
found := false
|
||||
Walk(from, func(b *ssa.BasicBlock) bool {
|
||||
if b == to {
|
||||
found = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return found
|
||||
}
|
||||
|
||||
func Walk(b *ssa.BasicBlock, fn func(*ssa.BasicBlock) bool) {
|
||||
seen := map[*ssa.BasicBlock]bool{}
|
||||
wl := []*ssa.BasicBlock{b}
|
||||
for len(wl) > 0 {
|
||||
b := wl[len(wl)-1]
|
||||
wl = wl[:len(wl)-1]
|
||||
if seen[b] {
|
||||
continue
|
||||
}
|
||||
seen[b] = true
|
||||
if !fn(b) {
|
||||
continue
|
||||
}
|
||||
wl = append(wl, b.Succs...)
|
||||
}
|
||||
}
|
||||
|
||||
func Vararg(x *ssa.Slice) ([]ssa.Value, bool) {
|
||||
var out []ssa.Value
|
||||
slice, ok := x.X.(*ssa.Alloc)
|
||||
if !ok || slice.Comment != "varargs" {
|
||||
return nil, false
|
||||
}
|
||||
for _, ref := range *slice.Referrers() {
|
||||
idx, ok := ref.(*ssa.IndexAddr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
v := (*idx.Referrers())[0].(*ssa.Store).Val
|
||||
out = append(out, v)
|
||||
}
|
||||
return out, true
|
||||
}
|
525
vendor/honnef.co/go/tools/staticcheck/analysis.go
vendored
Normal file
525
vendor/honnef.co/go/tools/staticcheck/analysis.go
vendored
Normal file
@ -0,0 +1,525 @@
|
||||
package staticcheck
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/internal/passes/buildssa"
|
||||
"honnef.co/go/tools/lint/lintutil"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
)
|
||||
|
||||
func newFlagSet() flag.FlagSet {
|
||||
fs := flag.NewFlagSet("", flag.PanicOnError)
|
||||
fs.Var(lintutil.NewVersionFlag(), "go", "Target Go version")
|
||||
return *fs
|
||||
}
|
||||
|
||||
var Analyzers = map[string]*analysis.Analyzer{
|
||||
"SA1000": {
|
||||
Name: "SA1000",
|
||||
Run: callChecker(checkRegexpRules),
|
||||
Doc: Docs["SA1000"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1001": {
|
||||
Name: "SA1001",
|
||||
Run: CheckTemplate,
|
||||
Doc: Docs["SA1001"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1002": {
|
||||
Name: "SA1002",
|
||||
Run: callChecker(checkTimeParseRules),
|
||||
Doc: Docs["SA1002"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1003": {
|
||||
Name: "SA1003",
|
||||
Run: callChecker(checkEncodingBinaryRules),
|
||||
Doc: Docs["SA1003"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1004": {
|
||||
Name: "SA1004",
|
||||
Run: CheckTimeSleepConstant,
|
||||
Doc: Docs["SA1004"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1005": {
|
||||
Name: "SA1005",
|
||||
Run: CheckExec,
|
||||
Doc: Docs["SA1005"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1006": {
|
||||
Name: "SA1006",
|
||||
Run: CheckUnsafePrintf,
|
||||
Doc: Docs["SA1006"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1007": {
|
||||
Name: "SA1007",
|
||||
Run: callChecker(checkURLsRules),
|
||||
Doc: Docs["SA1007"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1008": {
|
||||
Name: "SA1008",
|
||||
Run: CheckCanonicalHeaderKey,
|
||||
Doc: Docs["SA1008"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1010": {
|
||||
Name: "SA1010",
|
||||
Run: callChecker(checkRegexpFindAllRules),
|
||||
Doc: Docs["SA1010"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1011": {
|
||||
Name: "SA1011",
|
||||
Run: callChecker(checkUTF8CutsetRules),
|
||||
Doc: Docs["SA1011"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1012": {
|
||||
Name: "SA1012",
|
||||
Run: CheckNilContext,
|
||||
Doc: Docs["SA1012"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1013": {
|
||||
Name: "SA1013",
|
||||
Run: CheckSeeker,
|
||||
Doc: Docs["SA1013"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1014": {
|
||||
Name: "SA1014",
|
||||
Run: callChecker(checkUnmarshalPointerRules),
|
||||
Doc: Docs["SA1014"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1015": {
|
||||
Name: "SA1015",
|
||||
Run: CheckLeakyTimeTick,
|
||||
Doc: Docs["SA1015"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1016": {
|
||||
Name: "SA1016",
|
||||
Run: CheckUntrappableSignal,
|
||||
Doc: Docs["SA1016"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1017": {
|
||||
Name: "SA1017",
|
||||
Run: callChecker(checkUnbufferedSignalChanRules),
|
||||
Doc: Docs["SA1017"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1018": {
|
||||
Name: "SA1018",
|
||||
Run: callChecker(checkStringsReplaceZeroRules),
|
||||
Doc: Docs["SA1018"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1019": {
|
||||
Name: "SA1019",
|
||||
Run: CheckDeprecated,
|
||||
Doc: Docs["SA1019"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Deprecated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1020": {
|
||||
Name: "SA1020",
|
||||
Run: callChecker(checkListenAddressRules),
|
||||
Doc: Docs["SA1020"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1021": {
|
||||
Name: "SA1021",
|
||||
Run: callChecker(checkBytesEqualIPRules),
|
||||
Doc: Docs["SA1021"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1023": {
|
||||
Name: "SA1023",
|
||||
Run: CheckWriterBufferModified,
|
||||
Doc: Docs["SA1023"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1024": {
|
||||
Name: "SA1024",
|
||||
Run: callChecker(checkUniqueCutsetRules),
|
||||
Doc: Docs["SA1024"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1025": {
|
||||
Name: "SA1025",
|
||||
Run: CheckTimerResetReturnValue,
|
||||
Doc: Docs["SA1025"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1026": {
|
||||
Name: "SA1026",
|
||||
Run: callChecker(checkUnsupportedMarshal),
|
||||
Doc: Docs["SA1026"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA1027": {
|
||||
Name: "SA1027",
|
||||
Run: callChecker(checkAtomicAlignment),
|
||||
Doc: Docs["SA1027"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
|
||||
"SA2000": {
|
||||
Name: "SA2000",
|
||||
Run: CheckWaitgroupAdd,
|
||||
Doc: Docs["SA2000"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA2001": {
|
||||
Name: "SA2001",
|
||||
Run: CheckEmptyCriticalSection,
|
||||
Doc: Docs["SA2001"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA2002": {
|
||||
Name: "SA2002",
|
||||
Run: CheckConcurrentTesting,
|
||||
Doc: Docs["SA2002"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA2003": {
|
||||
Name: "SA2003",
|
||||
Run: CheckDeferLock,
|
||||
Doc: Docs["SA2003"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
|
||||
"SA3000": {
|
||||
Name: "SA3000",
|
||||
Run: CheckTestMainExit,
|
||||
Doc: Docs["SA3000"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA3001": {
|
||||
Name: "SA3001",
|
||||
Run: CheckBenchmarkN,
|
||||
Doc: Docs["SA3001"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
|
||||
"SA4000": {
|
||||
Name: "SA4000",
|
||||
Run: CheckLhsRhsIdentical,
|
||||
Doc: Docs["SA4000"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.TokenFile, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4001": {
|
||||
Name: "SA4001",
|
||||
Run: CheckIneffectiveCopy,
|
||||
Doc: Docs["SA4001"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4002": {
|
||||
Name: "SA4002",
|
||||
Run: CheckDiffSizeComparison,
|
||||
Doc: Docs["SA4002"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4003": {
|
||||
Name: "SA4003",
|
||||
Run: CheckExtremeComparison,
|
||||
Doc: Docs["SA4003"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4004": {
|
||||
Name: "SA4004",
|
||||
Run: CheckIneffectiveLoop,
|
||||
Doc: Docs["SA4004"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4006": {
|
||||
Name: "SA4006",
|
||||
Run: CheckUnreadVariableValues,
|
||||
Doc: Docs["SA4006"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4008": {
|
||||
Name: "SA4008",
|
||||
Run: CheckLoopCondition,
|
||||
Doc: Docs["SA4008"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4009": {
|
||||
Name: "SA4009",
|
||||
Run: CheckArgOverwritten,
|
||||
Doc: Docs["SA4009"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4010": {
|
||||
Name: "SA4010",
|
||||
Run: CheckIneffectiveAppend,
|
||||
Doc: Docs["SA4010"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4011": {
|
||||
Name: "SA4011",
|
||||
Run: CheckScopedBreak,
|
||||
Doc: Docs["SA4011"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4012": {
|
||||
Name: "SA4012",
|
||||
Run: CheckNaNComparison,
|
||||
Doc: Docs["SA4012"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4013": {
|
||||
Name: "SA4013",
|
||||
Run: CheckDoubleNegation,
|
||||
Doc: Docs["SA4013"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4014": {
|
||||
Name: "SA4014",
|
||||
Run: CheckRepeatedIfElse,
|
||||
Doc: Docs["SA4014"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4015": {
|
||||
Name: "SA4015",
|
||||
Run: callChecker(checkMathIntRules),
|
||||
Doc: Docs["SA4015"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4016": {
|
||||
Name: "SA4016",
|
||||
Run: CheckSillyBitwiseOps,
|
||||
Doc: Docs["SA4016"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.TokenFile},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4017": {
|
||||
Name: "SA4017",
|
||||
Run: CheckPureFunctions,
|
||||
Doc: Docs["SA4017"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.Purity},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4018": {
|
||||
Name: "SA4018",
|
||||
Run: CheckSelfAssignment,
|
||||
Doc: Docs["SA4018"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4019": {
|
||||
Name: "SA4019",
|
||||
Run: CheckDuplicateBuildConstraints,
|
||||
Doc: Docs["SA4019"].String(),
|
||||
Requires: []*analysis.Analyzer{facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4020": {
|
||||
Name: "SA4020",
|
||||
Run: CheckUnreachableTypeCases,
|
||||
Doc: Docs["SA4020"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA4021": {
|
||||
Name: "SA4021",
|
||||
Run: CheckSingleArgAppend,
|
||||
Doc: Docs["SA4021"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
|
||||
"SA5000": {
|
||||
Name: "SA5000",
|
||||
Run: CheckNilMaps,
|
||||
Doc: Docs["SA5000"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA5001": {
|
||||
Name: "SA5001",
|
||||
Run: CheckEarlyDefer,
|
||||
Doc: Docs["SA5001"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA5002": {
|
||||
Name: "SA5002",
|
||||
Run: CheckInfiniteEmptyLoop,
|
||||
Doc: Docs["SA5002"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA5003": {
|
||||
Name: "SA5003",
|
||||
Run: CheckDeferInInfiniteLoop,
|
||||
Doc: Docs["SA5003"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA5004": {
|
||||
Name: "SA5004",
|
||||
Run: CheckLoopEmptyDefault,
|
||||
Doc: Docs["SA5004"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA5005": {
|
||||
Name: "SA5005",
|
||||
Run: CheckCyclicFinalizer,
|
||||
Doc: Docs["SA5005"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA5007": {
|
||||
Name: "SA5007",
|
||||
Run: CheckInfiniteRecursion,
|
||||
Doc: Docs["SA5007"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA5008": {
|
||||
Name: "SA5008",
|
||||
Run: CheckStructTags,
|
||||
Doc: Docs["SA5008"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA5009": {
|
||||
Name: "SA5009",
|
||||
Run: callChecker(checkPrintfRules),
|
||||
Doc: Docs["SA5009"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
|
||||
"SA6000": {
|
||||
Name: "SA6000",
|
||||
Run: callChecker(checkRegexpMatchLoopRules),
|
||||
Doc: Docs["SA6000"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA6001": {
|
||||
Name: "SA6001",
|
||||
Run: CheckMapBytesKey,
|
||||
Doc: Docs["SA6001"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA6002": {
|
||||
Name: "SA6002",
|
||||
Run: callChecker(checkSyncPoolValueRules),
|
||||
Doc: Docs["SA6002"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA6003": {
|
||||
Name: "SA6003",
|
||||
Run: CheckRangeStringRunes,
|
||||
Doc: Docs["SA6003"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA6005": {
|
||||
Name: "SA6005",
|
||||
Run: CheckToLowerToUpperComparison,
|
||||
Doc: Docs["SA6005"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
|
||||
"SA9001": {
|
||||
Name: "SA9001",
|
||||
Run: CheckDubiousDeferInChannelRangeLoop,
|
||||
Doc: Docs["SA9001"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA9002": {
|
||||
Name: "SA9002",
|
||||
Run: CheckNonOctalFileMode,
|
||||
Doc: Docs["SA9002"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA9003": {
|
||||
Name: "SA9003",
|
||||
Run: CheckEmptyBranch,
|
||||
Doc: Docs["SA9003"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.TokenFile, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"SA9004": {
|
||||
Name: "SA9004",
|
||||
Run: CheckMissingEnumTypesInDeclaration,
|
||||
Doc: Docs["SA9004"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
// Filtering generated code because it may include empty structs generated from data models.
|
||||
"SA9005": {
|
||||
Name: "SA9005",
|
||||
Run: callChecker(checkNoopMarshal),
|
||||
Doc: Docs["SA9005"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, valueRangesAnalyzer, facts.Generated, facts.TokenFile},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
}
|
21
vendor/honnef.co/go/tools/staticcheck/buildtag.go
vendored
Normal file
21
vendor/honnef.co/go/tools/staticcheck/buildtag.go
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package staticcheck
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"strings"
|
||||
|
||||
. "honnef.co/go/tools/lint/lintdsl"
|
||||
)
|
||||
|
||||
func buildTags(f *ast.File) [][]string {
|
||||
var out [][]string
|
||||
for _, line := range strings.Split(Preamble(f), "\n") {
|
||||
if !strings.HasPrefix(line, "+build ") {
|
||||
continue
|
||||
}
|
||||
line = strings.TrimSpace(strings.TrimPrefix(line, "+build "))
|
||||
fields := strings.Fields(line)
|
||||
out = append(out, fields)
|
||||
}
|
||||
return out
|
||||
}
|
764
vendor/honnef.co/go/tools/staticcheck/doc.go
vendored
Normal file
764
vendor/honnef.co/go/tools/staticcheck/doc.go
vendored
Normal file
@ -0,0 +1,764 @@
|
||||
package staticcheck
|
||||
|
||||
import "honnef.co/go/tools/lint"
|
||||
|
||||
var Docs = map[string]*lint.Documentation{
|
||||
"SA1000": &lint.Documentation{
|
||||
Title: `Invalid regular expression`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1001": &lint.Documentation{
|
||||
Title: `Invalid template`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1002": &lint.Documentation{
|
||||
Title: `Invalid format in time.Parse`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1003": &lint.Documentation{
|
||||
Title: `Unsupported argument to functions in encoding/binary`,
|
||||
Text: `The encoding/binary package can only serialize types with known sizes.
|
||||
This precludes the use of the int and uint types, as their sizes
|
||||
differ on different architectures. Furthermore, it doesn't support
|
||||
serializing maps, channels, strings, or functions.
|
||||
|
||||
Before Go 1.8, bool wasn't supported, either.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1004": &lint.Documentation{
|
||||
Title: `Suspiciously small untyped constant in time.Sleep`,
|
||||
Text: `The time.Sleep function takes a time.Duration as its only argument.
|
||||
Durations are expressed in nanoseconds. Thus, calling time.Sleep(1)
|
||||
will sleep for 1 nanosecond. This is a common source of bugs, as sleep
|
||||
functions in other languages often accept seconds or milliseconds.
|
||||
|
||||
The time package provides constants such as time.Second to express
|
||||
large durations. These can be combined with arithmetic to express
|
||||
arbitrary durations, for example '5 * time.Second' for 5 seconds.
|
||||
|
||||
If you truly meant to sleep for a tiny amount of time, use
|
||||
'n * time.Nanosecond' to signal to staticcheck that you did mean to sleep
|
||||
for some amount of nanoseconds.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1005": &lint.Documentation{
|
||||
Title: `Invalid first argument to exec.Command`,
|
||||
Text: `os/exec runs programs directly (using variants of the fork and exec
|
||||
system calls on Unix systems). This shouldn't be confused with running
|
||||
a command in a shell. The shell will allow for features such as input
|
||||
redirection, pipes, and general scripting. The shell is also
|
||||
responsible for splitting the user's input into a program name and its
|
||||
arguments. For example, the equivalent to
|
||||
|
||||
ls / /tmp
|
||||
|
||||
would be
|
||||
|
||||
exec.Command("ls", "/", "/tmp")
|
||||
|
||||
If you want to run a command in a shell, consider using something like
|
||||
the following – but be aware that not all systems, particularly
|
||||
Windows, will have a /bin/sh program:
|
||||
|
||||
exec.Command("/bin/sh", "-c", "ls | grep Awesome")`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1006": &lint.Documentation{
|
||||
Title: `Printf with dynamic first argument and no further arguments`,
|
||||
Text: `Using fmt.Printf with a dynamic first argument can lead to unexpected
|
||||
output. The first argument is a format string, where certain character
|
||||
combinations have special meaning. If, for example, a user were to
|
||||
enter a string such as
|
||||
|
||||
Interest rate: 5%
|
||||
|
||||
and you printed it with
|
||||
|
||||
fmt.Printf(s)
|
||||
|
||||
it would lead to the following output:
|
||||
|
||||
Interest rate: 5%!(NOVERB).
|
||||
|
||||
Similarly, forming the first parameter via string concatenation with
|
||||
user input should be avoided for the same reason. When printing user
|
||||
input, either use a variant of fmt.Print, or use the %s Printf verb
|
||||
and pass the string as an argument.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1007": &lint.Documentation{
|
||||
Title: `Invalid URL in net/url.Parse`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1008": &lint.Documentation{
|
||||
Title: `Non-canonical key in http.Header map`,
|
||||
Text: `Keys in http.Header maps are canonical, meaning they follow a specific
|
||||
combination of uppercase and lowercase letters. Methods such as
|
||||
http.Header.Add and http.Header.Del convert inputs into this canonical
|
||||
form before manipulating the map.
|
||||
|
||||
When manipulating http.Header maps directly, as opposed to using the
|
||||
provided methods, care should be taken to stick to canonical form in
|
||||
order to avoid inconsistencies. The following piece of code
|
||||
demonstrates one such inconsistency:
|
||||
|
||||
h := http.Header{}
|
||||
h["etag"] = []string{"1234"}
|
||||
h.Add("etag", "5678")
|
||||
fmt.Println(h)
|
||||
|
||||
// Output:
|
||||
// map[Etag:[5678] etag:[1234]]
|
||||
|
||||
The easiest way of obtaining the canonical form of a key is to use
|
||||
http.CanonicalHeaderKey.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1010": &lint.Documentation{
|
||||
Title: `(*regexp.Regexp).FindAll called with n == 0, which will always return zero results`,
|
||||
Text: `If n >= 0, the function returns at most n matches/submatches. To
|
||||
return all results, specify a negative number.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1011": &lint.Documentation{
|
||||
Title: `Various methods in the strings package expect valid UTF-8, but invalid input is provided`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1012": &lint.Documentation{
|
||||
Title: `A nil context.Context is being passed to a function, consider using context.TODO instead`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1013": &lint.Documentation{
|
||||
Title: `io.Seeker.Seek is being called with the whence constant as the first argument, but it should be the second`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1014": &lint.Documentation{
|
||||
Title: `Non-pointer value passed to Unmarshal or Decode`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1015": &lint.Documentation{
|
||||
Title: `Using time.Tick in a way that will leak. Consider using time.NewTicker, and only use time.Tick in tests, commands and endless functions`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1016": &lint.Documentation{
|
||||
Title: `Trapping a signal that cannot be trapped`,
|
||||
Text: `Not all signals can be intercepted by a process. Speficially, on
|
||||
UNIX-like systems, the syscall.SIGKILL and syscall.SIGSTOP signals are
|
||||
never passed to the process, but instead handled directly by the
|
||||
kernel. It is therefore pointless to try and handle these signals.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1017": &lint.Documentation{
|
||||
Title: `Channels used with os/signal.Notify should be buffered`,
|
||||
Text: `The os/signal package uses non-blocking channel sends when delivering
|
||||
signals. If the receiving end of the channel isn't ready and the
|
||||
channel is either unbuffered or full, the signal will be dropped. To
|
||||
avoid missing signals, the channel should be buffered and of the
|
||||
appropriate size. For a channel used for notification of just one
|
||||
signal value, a buffer of size 1 is sufficient.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1018": &lint.Documentation{
|
||||
Title: `strings.Replace called with n == 0, which does nothing`,
|
||||
Text: `With n == 0, zero instances will be replaced. To replace all
|
||||
instances, use a negative number, or use strings.ReplaceAll.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1019": &lint.Documentation{
|
||||
Title: `Using a deprecated function, variable, constant or field`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1020": &lint.Documentation{
|
||||
Title: `Using an invalid host:port pair with a net.Listen-related function`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1021": &lint.Documentation{
|
||||
Title: `Using bytes.Equal to compare two net.IP`,
|
||||
Text: `A net.IP stores an IPv4 or IPv6 address as a slice of bytes. The
|
||||
length of the slice for an IPv4 address, however, can be either 4 or
|
||||
16 bytes long, using different ways of representing IPv4 addresses. In
|
||||
order to correctly compare two net.IPs, the net.IP.Equal method should
|
||||
be used, as it takes both representations into account.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1023": &lint.Documentation{
|
||||
Title: `Modifying the buffer in an io.Writer implementation`,
|
||||
Text: `Write must not modify the slice data, even temporarily.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1024": &lint.Documentation{
|
||||
Title: `A string cutset contains duplicate characters`,
|
||||
Text: `The strings.TrimLeft and strings.TrimRight functions take cutsets, not
|
||||
prefixes. A cutset is treated as a set of characters to remove from a
|
||||
string. For example,
|
||||
|
||||
strings.TrimLeft("42133word", "1234"))
|
||||
|
||||
will result in the string "word" – any characters that are 1, 2, 3 or
|
||||
4 are cut from the left of the string.
|
||||
|
||||
In order to remove one string from another, use strings.TrimPrefix instead.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA1025": &lint.Documentation{
|
||||
Title: `It is not possible to use (*time.Timer).Reset's return value correctly`,
|
||||
Since: "2019.1",
|
||||
},
|
||||
|
||||
"SA1026": &lint.Documentation{
|
||||
Title: `Cannot marshal channels or functions`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"SA1027": &lint.Documentation{
|
||||
Title: `Atomic access to 64-bit variable must be 64-bit aligned`,
|
||||
Text: `On ARM, x86-32, and 32-bit MIPS, it is the caller's responsibility to
|
||||
arrange for 64-bit alignment of 64-bit words accessed atomically. The
|
||||
first word in a variable or in an allocated struct, array, or slice
|
||||
can be relied upon to be 64-bit aligned.
|
||||
|
||||
You can use the structlayout tool to inspect the alignment of fields
|
||||
in a struct.`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"SA2000": &lint.Documentation{
|
||||
Title: `sync.WaitGroup.Add called inside the goroutine, leading to a race condition`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA2001": &lint.Documentation{
|
||||
Title: `Empty critical section, did you mean to defer the unlock?`,
|
||||
Text: `Empty critical sections of the kind
|
||||
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
|
||||
are very often a typo, and the following was intended instead:
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
Do note that sometimes empty critical sections can be useful, as a
|
||||
form of signaling to wait on another goroutine. Many times, there are
|
||||
simpler ways of achieving the same effect. When that isn't the case,
|
||||
the code should be amply commented to avoid confusion. Combining such
|
||||
comments with a //lint:ignore directive can be used to suppress this
|
||||
rare false positive.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA2002": &lint.Documentation{
|
||||
Title: `Called testing.T.FailNow or SkipNow in a goroutine, which isn't allowed`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA2003": &lint.Documentation{
|
||||
Title: `Deferred Lock right after locking, likely meant to defer Unlock instead`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA3000": &lint.Documentation{
|
||||
Title: `TestMain doesn't call os.Exit, hiding test failures`,
|
||||
Text: `Test executables (and in turn 'go test') exit with a non-zero status
|
||||
code if any tests failed. When specifying your own TestMain function,
|
||||
it is your responsibility to arrange for this, by calling os.Exit with
|
||||
the correct code. The correct code is returned by (*testing.M).Run, so
|
||||
the usual way of implementing TestMain is to end it with
|
||||
os.Exit(m.Run()).`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA3001": &lint.Documentation{
|
||||
Title: `Assigning to b.N in benchmarks distorts the results`,
|
||||
Text: `The testing package dynamically sets b.N to improve the reliability of
|
||||
benchmarks and uses it in computations to determine the duration of a
|
||||
single operation. Benchmark code must not alter b.N as this would
|
||||
falsify results.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4000": &lint.Documentation{
|
||||
Title: `Boolean expression has identical expressions on both sides`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4001": &lint.Documentation{
|
||||
Title: `&*x gets simplified to x, it does not copy x`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4002": &lint.Documentation{
|
||||
Title: `Comparing strings with known different sizes has predictable results`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4003": &lint.Documentation{
|
||||
Title: `Comparing unsigned values against negative values is pointless`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4004": &lint.Documentation{
|
||||
Title: `The loop exits unconditionally after one iteration`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4005": &lint.Documentation{
|
||||
Title: `Field assignment that will never be observed. Did you mean to use a pointer receiver?`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4006": &lint.Documentation{
|
||||
Title: `A value assigned to a variable is never read before being overwritten. Forgotten error check or dead code?`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4008": &lint.Documentation{
|
||||
Title: `The variable in the loop condition never changes, are you incrementing the wrong variable?`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4009": &lint.Documentation{
|
||||
Title: `A function argument is overwritten before its first use`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4010": &lint.Documentation{
|
||||
Title: `The result of append will never be observed anywhere`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4011": &lint.Documentation{
|
||||
Title: `Break statement with no effect. Did you mean to break out of an outer loop?`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4012": &lint.Documentation{
|
||||
Title: `Comparing a value against NaN even though no value is equal to NaN`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4013": &lint.Documentation{
|
||||
Title: `Negating a boolean twice (!!b) is the same as writing b. This is either redundant, or a typo.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4014": &lint.Documentation{
|
||||
Title: `An if/else if chain has repeated conditions and no side-effects; if the condition didn't match the first time, it won't match the second time, either`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4015": &lint.Documentation{
|
||||
Title: `Calling functions like math.Ceil on floats converted from integers doesn't do anything useful`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4016": &lint.Documentation{
|
||||
Title: `Certain bitwise operations, such as x ^ 0, do not do anything useful`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4017": &lint.Documentation{
|
||||
Title: `A pure function's return value is discarded, making the call pointless`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4018": &lint.Documentation{
|
||||
Title: `Self-assignment of variables`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4019": &lint.Documentation{
|
||||
Title: `Multiple, identical build constraints in the same file`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA4020": &lint.Documentation{
|
||||
Title: `Unreachable case clause in a type switch`,
|
||||
Text: `In a type switch like the following
|
||||
|
||||
type T struct{}
|
||||
func (T) Read(b []byte) (int, error) { return 0, nil }
|
||||
|
||||
var v interface{} = T{}
|
||||
|
||||
switch v.(type) {
|
||||
case io.Reader:
|
||||
// ...
|
||||
case T:
|
||||
// unreachable
|
||||
}
|
||||
|
||||
the second case clause can never be reached because T implements
|
||||
io.Reader and case clauses are evaluated in source order.
|
||||
|
||||
Another example:
|
||||
|
||||
type T struct{}
|
||||
func (T) Read(b []byte) (int, error) { return 0, nil }
|
||||
func (T) Close() error { return nil }
|
||||
|
||||
var v interface{} = T{}
|
||||
|
||||
switch v.(type) {
|
||||
case io.Reader:
|
||||
// ...
|
||||
case io.ReadCloser:
|
||||
// unreachable
|
||||
}
|
||||
|
||||
Even though T has a Close method and thus implements io.ReadCloser,
|
||||
io.Reader will always match first. The method set of io.Reader is a
|
||||
subset of io.ReadCloser. Thus it is impossible to match the second
|
||||
case without matching the first case.
|
||||
|
||||
|
||||
Structurally equivalent interfaces
|
||||
|
||||
A special case of the previous example are structurally identical
|
||||
interfaces. Given these declarations
|
||||
|
||||
type T error
|
||||
type V error
|
||||
|
||||
func doSomething() error {
|
||||
err, ok := doAnotherThing()
|
||||
if ok {
|
||||
return T(err)
|
||||
}
|
||||
|
||||
return U(err)
|
||||
}
|
||||
|
||||
the following type switch will have an unreachable case clause:
|
||||
|
||||
switch doSomething().(type) {
|
||||
case T:
|
||||
// ...
|
||||
case V:
|
||||
// unreachable
|
||||
}
|
||||
|
||||
T will always match before V because they are structurally equivalent
|
||||
and therefore doSomething()'s return value implements both.`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"SA4021": &lint.Documentation{
|
||||
Title: `x = append(y) is equivalent to x = y`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"SA5000": &lint.Documentation{
|
||||
Title: `Assignment to nil map`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA5001": &lint.Documentation{
|
||||
Title: `Defering Close before checking for a possible error`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA5002": &lint.Documentation{
|
||||
Title: `The empty for loop (for {}) spins and can block the scheduler`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA5003": &lint.Documentation{
|
||||
Title: `Defers in infinite loops will never execute`,
|
||||
Text: `Defers are scoped to the surrounding function, not the surrounding
|
||||
block. In a function that never returns, i.e. one containing an
|
||||
infinite loop, defers will never execute.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA5004": &lint.Documentation{
|
||||
Title: `for { select { ... with an empty default branch spins`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA5005": &lint.Documentation{
|
||||
Title: `The finalizer references the finalized object, preventing garbage collection`,
|
||||
Text: `A finalizer is a function associated with an object that runs when the
|
||||
garbage collector is ready to collect said object, that is when the
|
||||
object is no longer referenced by anything.
|
||||
|
||||
If the finalizer references the object, however, it will always remain
|
||||
as the final reference to that object, preventing the garbage
|
||||
collector from collecting the object. The finalizer will never run,
|
||||
and the object will never be collected, leading to a memory leak. That
|
||||
is why the finalizer should instead use its first argument to operate
|
||||
on the object. That way, the number of references can temporarily go
|
||||
to zero before the object is being passed to the finalizer.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA5006": &lint.Documentation{
|
||||
Title: `Slice index out of bounds`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA5007": &lint.Documentation{
|
||||
Title: `Infinite recursive call`,
|
||||
Text: `A function that calls itself recursively needs to have an exit
|
||||
condition. Otherwise it will recurse forever, until the system runs
|
||||
out of memory.
|
||||
|
||||
This issue can be caused by simple bugs such as forgetting to add an
|
||||
exit condition. It can also happen "on purpose". Some languages have
|
||||
tail call optimization which makes certain infinite recursive calls
|
||||
safe to use. Go, however, does not implement TCO, and as such a loop
|
||||
should be used instead.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA5008": &lint.Documentation{
|
||||
Title: `Invalid struct tag`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"SA5009": &lint.Documentation{
|
||||
Title: `Invalid Printf call`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"SA6000": &lint.Documentation{
|
||||
Title: `Using regexp.Match or related in a loop, should use regexp.Compile`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA6001": &lint.Documentation{
|
||||
Title: `Missing an optimization opportunity when indexing maps by byte slices`,
|
||||
|
||||
Text: `Map keys must be comparable, which precludes the use of byte slices.
|
||||
This usually leads to using string keys and converting byte slices to
|
||||
strings.
|
||||
|
||||
Normally, a conversion of a byte slice to a string needs to copy the data and
|
||||
causes allocations. The compiler, however, recognizes m[string(b)] and
|
||||
uses the data of b directly, without copying it, because it knows that
|
||||
the data can't change during the map lookup. This leads to the
|
||||
counter-intuitive situation that
|
||||
|
||||
k := string(b)
|
||||
println(m[k])
|
||||
println(m[k])
|
||||
|
||||
will be less efficient than
|
||||
|
||||
println(m[string(b)])
|
||||
println(m[string(b)])
|
||||
|
||||
because the first version needs to copy and allocate, while the second
|
||||
one does not.
|
||||
|
||||
For some history on this optimization, check out commit
|
||||
f5f5a8b6209f84961687d993b93ea0d397f5d5bf in the Go repository.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA6002": &lint.Documentation{
|
||||
Title: `Storing non-pointer values in sync.Pool allocates memory`,
|
||||
Text: `A sync.Pool is used to avoid unnecessary allocations and reduce the
|
||||
amount of work the garbage collector has to do.
|
||||
|
||||
When passing a value that is not a pointer to a function that accepts
|
||||
an interface, the value needs to be placed on the heap, which means an
|
||||
additional allocation. Slices are a common thing to put in sync.Pools,
|
||||
and they're structs with 3 fields (length, capacity, and a pointer to
|
||||
an array). In order to avoid the extra allocation, one should store a
|
||||
pointer to the slice instead.
|
||||
|
||||
See the comments on https://go-review.googlesource.com/c/go/+/24371
|
||||
that discuss this problem.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA6003": &lint.Documentation{
|
||||
Title: `Converting a string to a slice of runes before ranging over it`,
|
||||
Text: `You may want to loop over the runes in a string. Instead of converting
|
||||
the string to a slice of runes and looping over that, you can loop
|
||||
over the string itself. That is,
|
||||
|
||||
for _, r := range s {}
|
||||
|
||||
and
|
||||
|
||||
for _, r := range []rune(s) {}
|
||||
|
||||
will yield the same values. The first version, however, will be faster
|
||||
and avoid unnecessary memory allocations.
|
||||
|
||||
Do note that if you are interested in the indices, ranging over a
|
||||
string and over a slice of runes will yield different indices. The
|
||||
first one yields byte offsets, while the second one yields indices in
|
||||
the slice of runes.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA6005": &lint.Documentation{
|
||||
Title: `Inefficient string comparison with strings.ToLower or strings.ToUpper`,
|
||||
Text: `Converting two strings to the same case and comparing them like so
|
||||
|
||||
if strings.ToLower(s1) == strings.ToLower(s2) {
|
||||
...
|
||||
}
|
||||
|
||||
is significantly more expensive than comparing them with
|
||||
strings.EqualFold(s1, s2). This is due to memory usage as well as
|
||||
computational complexity.
|
||||
|
||||
strings.ToLower will have to allocate memory for the new strings, as
|
||||
well as convert both strings fully, even if they differ on the very
|
||||
first byte. strings.EqualFold, on the other hand, compares the strings
|
||||
one character at a time. It doesn't need to create two intermediate
|
||||
strings and can return as soon as the first non-matching character has
|
||||
been found.
|
||||
|
||||
For a more in-depth explanation of this issue, see
|
||||
https://blog.digitalocean.com/how-to-efficiently-compare-strings-in-go/`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"SA9001": &lint.Documentation{
|
||||
Title: `Defers in range loops may not run when you expect them to`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA9002": &lint.Documentation{
|
||||
Title: `Using a non-octal os.FileMode that looks like it was meant to be in octal.`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA9003": &lint.Documentation{
|
||||
Title: `Empty body in an if or else branch`,
|
||||
Since: "2017.1",
|
||||
},
|
||||
|
||||
"SA9004": &lint.Documentation{
|
||||
Title: `Only the first constant has an explicit type`,
|
||||
|
||||
Text: `In a constant declaration such as the following:
|
||||
|
||||
const (
|
||||
First byte = 1
|
||||
Second = 2
|
||||
)
|
||||
|
||||
the constant Second does not have the same type as the constant First.
|
||||
This construct shouldn't be confused with
|
||||
|
||||
const (
|
||||
First byte = iota
|
||||
Second
|
||||
)
|
||||
|
||||
where First and Second do indeed have the same type. The type is only
|
||||
passed on when no explicit value is assigned to the constant.
|
||||
|
||||
When declaring enumerations with explicit values it is therefore
|
||||
important not to write
|
||||
|
||||
const (
|
||||
EnumFirst EnumType = 1
|
||||
EnumSecond = 2
|
||||
EnumThird = 3
|
||||
)
|
||||
|
||||
This discrepancy in types can cause various confusing behaviors and
|
||||
bugs.
|
||||
|
||||
|
||||
Wrong type in variable declarations
|
||||
|
||||
The most obvious issue with such incorrect enumerations expresses
|
||||
itself as a compile error:
|
||||
|
||||
package pkg
|
||||
|
||||
const (
|
||||
EnumFirst uint8 = 1
|
||||
EnumSecond = 2
|
||||
)
|
||||
|
||||
func fn(useFirst bool) {
|
||||
x := EnumSecond
|
||||
if useFirst {
|
||||
x = EnumFirst
|
||||
}
|
||||
}
|
||||
|
||||
fails to compile with
|
||||
|
||||
./const.go:11:5: cannot use EnumFirst (type uint8) as type int in assignment
|
||||
|
||||
|
||||
Losing method sets
|
||||
|
||||
A more subtle issue occurs with types that have methods and optional
|
||||
interfaces. Consider the following:
|
||||
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
type Enum int
|
||||
|
||||
func (e Enum) String() string {
|
||||
return "an enum"
|
||||
}
|
||||
|
||||
const (
|
||||
EnumFirst Enum = 1
|
||||
EnumSecond = 2
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println(EnumFirst)
|
||||
fmt.Println(EnumSecond)
|
||||
}
|
||||
|
||||
This code will output
|
||||
|
||||
an enum
|
||||
2
|
||||
|
||||
as EnumSecond has no explicit type, and thus defaults to int.`,
|
||||
Since: "2019.1",
|
||||
},
|
||||
|
||||
"SA9005": &lint.Documentation{
|
||||
Title: `Trying to marshal a struct with no public fields nor custom marshaling`,
|
||||
Text: `The encoding/json and encoding/xml packages only operate on exported
|
||||
fields in structs, not unexported ones. It is usually an error to try
|
||||
to (un)marshal structs that only consist of unexported fields.
|
||||
|
||||
This check will not flag calls involving types that define custom
|
||||
marshaling behavior, e.g. via MarshalJSON methods. It will also not
|
||||
flag empty structs.`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
}
|
25
vendor/honnef.co/go/tools/staticcheck/knowledge.go
vendored
Normal file
25
vendor/honnef.co/go/tools/staticcheck/knowledge.go
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package staticcheck
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/internal/passes/buildssa"
|
||||
"honnef.co/go/tools/ssa"
|
||||
"honnef.co/go/tools/staticcheck/vrp"
|
||||
)
|
||||
|
||||
var valueRangesAnalyzer = &analysis.Analyzer{
|
||||
Name: "vrp",
|
||||
Doc: "calculate value ranges of functions",
|
||||
Run: func(pass *analysis.Pass) (interface{}, error) {
|
||||
m := map[*ssa.Function]vrp.Ranges{}
|
||||
for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
|
||||
vr := vrp.BuildGraph(ssafn).Solve()
|
||||
m[ssafn] = vr
|
||||
}
|
||||
return m, nil
|
||||
},
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
ResultType: reflect.TypeOf(map[*ssa.Function]vrp.Ranges{}),
|
||||
}
|
3360
vendor/honnef.co/go/tools/staticcheck/lint.go
vendored
Normal file
3360
vendor/honnef.co/go/tools/staticcheck/lint.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
321
vendor/honnef.co/go/tools/staticcheck/rules.go
vendored
Normal file
321
vendor/honnef.co/go/tools/staticcheck/rules.go
vendored
Normal file
@ -0,0 +1,321 @@
|
||||
package staticcheck
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"go/types"
|
||||
"net"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
. "honnef.co/go/tools/lint/lintdsl"
|
||||
"honnef.co/go/tools/ssa"
|
||||
"honnef.co/go/tools/staticcheck/vrp"
|
||||
)
|
||||
|
||||
const (
|
||||
MsgInvalidHostPort = "invalid port or service name in host:port pair"
|
||||
MsgInvalidUTF8 = "argument is not a valid UTF-8 encoded string"
|
||||
MsgNonUniqueCutset = "cutset contains duplicate characters"
|
||||
)
|
||||
|
||||
type Call struct {
|
||||
Pass *analysis.Pass
|
||||
Instr ssa.CallInstruction
|
||||
Args []*Argument
|
||||
|
||||
Parent *ssa.Function
|
||||
|
||||
invalids []string
|
||||
}
|
||||
|
||||
func (c *Call) Invalid(msg string) {
|
||||
c.invalids = append(c.invalids, msg)
|
||||
}
|
||||
|
||||
type Argument struct {
|
||||
Value Value
|
||||
invalids []string
|
||||
}
|
||||
|
||||
func (arg *Argument) Invalid(msg string) {
|
||||
arg.invalids = append(arg.invalids, msg)
|
||||
}
|
||||
|
||||
type Value struct {
|
||||
Value ssa.Value
|
||||
Range vrp.Range
|
||||
}
|
||||
|
||||
type CallCheck func(call *Call)
|
||||
|
||||
func extractConsts(v ssa.Value) []*ssa.Const {
|
||||
switch v := v.(type) {
|
||||
case *ssa.Const:
|
||||
return []*ssa.Const{v}
|
||||
case *ssa.MakeInterface:
|
||||
return extractConsts(v.X)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func ValidateRegexp(v Value) error {
|
||||
for _, c := range extractConsts(v.Value) {
|
||||
if c.Value == nil {
|
||||
continue
|
||||
}
|
||||
if c.Value.Kind() != constant.String {
|
||||
continue
|
||||
}
|
||||
s := constant.StringVal(c.Value)
|
||||
if _, err := regexp.Compile(s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateTimeLayout(v Value) error {
|
||||
for _, c := range extractConsts(v.Value) {
|
||||
if c.Value == nil {
|
||||
continue
|
||||
}
|
||||
if c.Value.Kind() != constant.String {
|
||||
continue
|
||||
}
|
||||
s := constant.StringVal(c.Value)
|
||||
s = strings.Replace(s, "_", " ", -1)
|
||||
s = strings.Replace(s, "Z", "-", -1)
|
||||
_, err := time.Parse(s, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateURL(v Value) error {
|
||||
for _, c := range extractConsts(v.Value) {
|
||||
if c.Value == nil {
|
||||
continue
|
||||
}
|
||||
if c.Value.Kind() != constant.String {
|
||||
continue
|
||||
}
|
||||
s := constant.StringVal(c.Value)
|
||||
_, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%q is not a valid URL: %s", s, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func IntValue(v Value, z vrp.Z) bool {
|
||||
r, ok := v.Range.(vrp.IntInterval)
|
||||
if !ok || !r.IsKnown() {
|
||||
return false
|
||||
}
|
||||
if r.Lower != r.Upper {
|
||||
return false
|
||||
}
|
||||
if r.Lower.Cmp(z) == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func InvalidUTF8(v Value) bool {
|
||||
for _, c := range extractConsts(v.Value) {
|
||||
if c.Value == nil {
|
||||
continue
|
||||
}
|
||||
if c.Value.Kind() != constant.String {
|
||||
continue
|
||||
}
|
||||
s := constant.StringVal(c.Value)
|
||||
if !utf8.ValidString(s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func UnbufferedChannel(v Value) bool {
|
||||
r, ok := v.Range.(vrp.ChannelInterval)
|
||||
if !ok || !r.IsKnown() {
|
||||
return false
|
||||
}
|
||||
if r.Size.Lower.Cmp(vrp.NewZ(0)) == 0 &&
|
||||
r.Size.Upper.Cmp(vrp.NewZ(0)) == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Pointer(v Value) bool {
|
||||
switch v.Value.Type().Underlying().(type) {
|
||||
case *types.Pointer, *types.Interface:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ConvertedFromInt(v Value) bool {
|
||||
conv, ok := v.Value.(*ssa.Convert)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
b, ok := conv.X.Type().Underlying().(*types.Basic)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if (b.Info() & types.IsInteger) == 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func validEncodingBinaryType(pass *analysis.Pass, typ types.Type) bool {
|
||||
typ = typ.Underlying()
|
||||
switch typ := typ.(type) {
|
||||
case *types.Basic:
|
||||
switch typ.Kind() {
|
||||
case types.Uint8, types.Uint16, types.Uint32, types.Uint64,
|
||||
types.Int8, types.Int16, types.Int32, types.Int64,
|
||||
types.Float32, types.Float64, types.Complex64, types.Complex128, types.Invalid:
|
||||
return true
|
||||
case types.Bool:
|
||||
return IsGoVersion(pass, 8)
|
||||
}
|
||||
return false
|
||||
case *types.Struct:
|
||||
n := typ.NumFields()
|
||||
for i := 0; i < n; i++ {
|
||||
if !validEncodingBinaryType(pass, typ.Field(i).Type()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *types.Array:
|
||||
return validEncodingBinaryType(pass, typ.Elem())
|
||||
case *types.Interface:
|
||||
// we can't determine if it's a valid type or not
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func CanBinaryMarshal(pass *analysis.Pass, v Value) bool {
|
||||
typ := v.Value.Type().Underlying()
|
||||
if ttyp, ok := typ.(*types.Pointer); ok {
|
||||
typ = ttyp.Elem().Underlying()
|
||||
}
|
||||
if ttyp, ok := typ.(interface {
|
||||
Elem() types.Type
|
||||
}); ok {
|
||||
if _, ok := ttyp.(*types.Pointer); !ok {
|
||||
typ = ttyp.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
return validEncodingBinaryType(pass, typ)
|
||||
}
|
||||
|
||||
func RepeatZeroTimes(name string, arg int) CallCheck {
|
||||
return func(call *Call) {
|
||||
arg := call.Args[arg]
|
||||
if IntValue(arg.Value, vrp.NewZ(0)) {
|
||||
arg.Invalid(fmt.Sprintf("calling %s with n == 0 will return no results, did you mean -1?", name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func validateServiceName(s string) bool {
|
||||
if len(s) < 1 || len(s) > 15 {
|
||||
return false
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return false
|
||||
}
|
||||
if strings.Contains(s, "--") {
|
||||
return false
|
||||
}
|
||||
hasLetter := false
|
||||
for _, r := range s {
|
||||
if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') {
|
||||
hasLetter = true
|
||||
continue
|
||||
}
|
||||
if r >= '0' && r <= '9' {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
return hasLetter
|
||||
}
|
||||
|
||||
func validatePort(s string) bool {
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return validateServiceName(s)
|
||||
}
|
||||
return n >= 0 && n <= 65535
|
||||
}
|
||||
|
||||
func ValidHostPort(v Value) bool {
|
||||
for _, k := range extractConsts(v.Value) {
|
||||
if k.Value == nil {
|
||||
continue
|
||||
}
|
||||
if k.Value.Kind() != constant.String {
|
||||
continue
|
||||
}
|
||||
s := constant.StringVal(k.Value)
|
||||
_, port, err := net.SplitHostPort(s)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// TODO(dh): check hostname
|
||||
if !validatePort(port) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ConvertedFrom reports whether value v was converted from type typ.
|
||||
func ConvertedFrom(v Value, typ string) bool {
|
||||
change, ok := v.Value.(*ssa.ChangeType)
|
||||
return ok && IsType(change.X.Type(), typ)
|
||||
}
|
||||
|
||||
func UniqueStringCutset(v Value) bool {
|
||||
for _, c := range extractConsts(v.Value) {
|
||||
if c.Value == nil {
|
||||
continue
|
||||
}
|
||||
if c.Value.Kind() != constant.String {
|
||||
continue
|
||||
}
|
||||
s := constant.StringVal(c.Value)
|
||||
rs := runeSlice(s)
|
||||
if len(rs) < 2 {
|
||||
continue
|
||||
}
|
||||
sort.Sort(rs)
|
||||
for i, r := range rs[1:] {
|
||||
if rs[i] == r {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
58
vendor/honnef.co/go/tools/staticcheck/structtag.go
vendored
Normal file
58
vendor/honnef.co/go/tools/staticcheck/structtag.go
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Copyright 2019 Dominik Honnef. All rights reserved.
|
||||
|
||||
package staticcheck
|
||||
|
||||
import "strconv"
|
||||
|
||||
func parseStructTag(tag string) (map[string][]string, error) {
|
||||
// FIXME(dh): detect missing closing quote
|
||||
out := map[string][]string{}
|
||||
|
||||
for tag != "" {
|
||||
// Skip leading space.
|
||||
i := 0
|
||||
for i < len(tag) && tag[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
tag = tag[i:]
|
||||
if tag == "" {
|
||||
break
|
||||
}
|
||||
|
||||
// Scan to colon. A space, a quote or a control character is a syntax error.
|
||||
// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
|
||||
// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
|
||||
// as it is simpler to inspect the tag's bytes than the tag's runes.
|
||||
i = 0
|
||||
for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
|
||||
i++
|
||||
}
|
||||
if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
|
||||
break
|
||||
}
|
||||
name := string(tag[:i])
|
||||
tag = tag[i+1:]
|
||||
|
||||
// Scan quoted string to find value.
|
||||
i = 1
|
||||
for i < len(tag) && tag[i] != '"' {
|
||||
if tag[i] == '\\' {
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(tag) {
|
||||
break
|
||||
}
|
||||
qvalue := string(tag[:i+1])
|
||||
tag = tag[i+1:]
|
||||
|
||||
value, err := strconv.Unquote(qvalue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[name] = append(out[name], value)
|
||||
}
|
||||
return out, nil
|
||||
}
|
73
vendor/honnef.co/go/tools/staticcheck/vrp/channel.go
vendored
Normal file
73
vendor/honnef.co/go/tools/staticcheck/vrp/channel.go
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package vrp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
type ChannelInterval struct {
|
||||
Size IntInterval
|
||||
}
|
||||
|
||||
func (c ChannelInterval) Union(other Range) Range {
|
||||
i, ok := other.(ChannelInterval)
|
||||
if !ok {
|
||||
i = ChannelInterval{EmptyIntInterval}
|
||||
}
|
||||
if c.Size.Empty() || !c.Size.IsKnown() {
|
||||
return i
|
||||
}
|
||||
if i.Size.Empty() || !i.Size.IsKnown() {
|
||||
return c
|
||||
}
|
||||
return ChannelInterval{
|
||||
Size: c.Size.Union(i.Size).(IntInterval),
|
||||
}
|
||||
}
|
||||
|
||||
func (c ChannelInterval) String() string {
|
||||
return c.Size.String()
|
||||
}
|
||||
|
||||
func (c ChannelInterval) IsKnown() bool {
|
||||
return c.Size.IsKnown()
|
||||
}
|
||||
|
||||
type MakeChannelConstraint struct {
|
||||
aConstraint
|
||||
Buffer ssa.Value
|
||||
}
|
||||
type ChannelChangeTypeConstraint struct {
|
||||
aConstraint
|
||||
X ssa.Value
|
||||
}
|
||||
|
||||
func NewMakeChannelConstraint(buffer, y ssa.Value) Constraint {
|
||||
return &MakeChannelConstraint{NewConstraint(y), buffer}
|
||||
}
|
||||
func NewChannelChangeTypeConstraint(x, y ssa.Value) Constraint {
|
||||
return &ChannelChangeTypeConstraint{NewConstraint(y), x}
|
||||
}
|
||||
|
||||
func (c *MakeChannelConstraint) Operands() []ssa.Value { return []ssa.Value{c.Buffer} }
|
||||
func (c *ChannelChangeTypeConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
|
||||
|
||||
func (c *MakeChannelConstraint) String() string {
|
||||
return fmt.Sprintf("%s = make(chan, %s)", c.Y().Name(), c.Buffer.Name())
|
||||
}
|
||||
func (c *ChannelChangeTypeConstraint) String() string {
|
||||
return fmt.Sprintf("%s = changetype(%s)", c.Y().Name(), c.X.Name())
|
||||
}
|
||||
|
||||
func (c *MakeChannelConstraint) Eval(g *Graph) Range {
|
||||
i, ok := g.Range(c.Buffer).(IntInterval)
|
||||
if !ok {
|
||||
return ChannelInterval{NewIntInterval(NewZ(0), PInfinity)}
|
||||
}
|
||||
if i.Lower.Sign() == -1 {
|
||||
i.Lower = NewZ(0)
|
||||
}
|
||||
return ChannelInterval{i}
|
||||
}
|
||||
func (c *ChannelChangeTypeConstraint) Eval(g *Graph) Range { return g.Range(c.X) }
|
476
vendor/honnef.co/go/tools/staticcheck/vrp/int.go
vendored
Normal file
476
vendor/honnef.co/go/tools/staticcheck/vrp/int.go
vendored
Normal file
@ -0,0 +1,476 @@
|
||||
package vrp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"math/big"
|
||||
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
type Zs []Z
|
||||
|
||||
func (zs Zs) Len() int {
|
||||
return len(zs)
|
||||
}
|
||||
|
||||
func (zs Zs) Less(i int, j int) bool {
|
||||
return zs[i].Cmp(zs[j]) == -1
|
||||
}
|
||||
|
||||
func (zs Zs) Swap(i int, j int) {
|
||||
zs[i], zs[j] = zs[j], zs[i]
|
||||
}
|
||||
|
||||
type Z struct {
|
||||
infinity int8
|
||||
integer *big.Int
|
||||
}
|
||||
|
||||
func NewZ(n int64) Z {
|
||||
return NewBigZ(big.NewInt(n))
|
||||
}
|
||||
|
||||
func NewBigZ(n *big.Int) Z {
|
||||
return Z{integer: n}
|
||||
}
|
||||
|
||||
func (z1 Z) Infinite() bool {
|
||||
return z1.infinity != 0
|
||||
}
|
||||
|
||||
func (z1 Z) Add(z2 Z) Z {
|
||||
if z2.Sign() == -1 {
|
||||
return z1.Sub(z2.Negate())
|
||||
}
|
||||
if z1 == NInfinity {
|
||||
return NInfinity
|
||||
}
|
||||
if z1 == PInfinity {
|
||||
return PInfinity
|
||||
}
|
||||
if z2 == PInfinity {
|
||||
return PInfinity
|
||||
}
|
||||
|
||||
if !z1.Infinite() && !z2.Infinite() {
|
||||
n := &big.Int{}
|
||||
n.Add(z1.integer, z2.integer)
|
||||
return NewBigZ(n)
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("%s + %s is not defined", z1, z2))
|
||||
}
|
||||
|
||||
func (z1 Z) Sub(z2 Z) Z {
|
||||
if z2.Sign() == -1 {
|
||||
return z1.Add(z2.Negate())
|
||||
}
|
||||
if !z1.Infinite() && !z2.Infinite() {
|
||||
n := &big.Int{}
|
||||
n.Sub(z1.integer, z2.integer)
|
||||
return NewBigZ(n)
|
||||
}
|
||||
|
||||
if z1 != PInfinity && z2 == PInfinity {
|
||||
return NInfinity
|
||||
}
|
||||
if z1.Infinite() && !z2.Infinite() {
|
||||
return Z{infinity: z1.infinity}
|
||||
}
|
||||
if z1 == PInfinity && z2 == PInfinity {
|
||||
return PInfinity
|
||||
}
|
||||
panic(fmt.Sprintf("%s - %s is not defined", z1, z2))
|
||||
}
|
||||
|
||||
func (z1 Z) Mul(z2 Z) Z {
|
||||
if (z1.integer != nil && z1.integer.Sign() == 0) ||
|
||||
(z2.integer != nil && z2.integer.Sign() == 0) {
|
||||
return NewBigZ(&big.Int{})
|
||||
}
|
||||
|
||||
if z1.infinity != 0 || z2.infinity != 0 {
|
||||
return Z{infinity: int8(z1.Sign() * z2.Sign())}
|
||||
}
|
||||
|
||||
n := &big.Int{}
|
||||
n.Mul(z1.integer, z2.integer)
|
||||
return NewBigZ(n)
|
||||
}
|
||||
|
||||
func (z1 Z) Negate() Z {
|
||||
if z1.infinity == 1 {
|
||||
return NInfinity
|
||||
}
|
||||
if z1.infinity == -1 {
|
||||
return PInfinity
|
||||
}
|
||||
n := &big.Int{}
|
||||
n.Neg(z1.integer)
|
||||
return NewBigZ(n)
|
||||
}
|
||||
|
||||
func (z1 Z) Sign() int {
|
||||
if z1.infinity != 0 {
|
||||
return int(z1.infinity)
|
||||
}
|
||||
return z1.integer.Sign()
|
||||
}
|
||||
|
||||
func (z1 Z) String() string {
|
||||
if z1 == NInfinity {
|
||||
return "-∞"
|
||||
}
|
||||
if z1 == PInfinity {
|
||||
return "∞"
|
||||
}
|
||||
return fmt.Sprintf("%d", z1.integer)
|
||||
}
|
||||
|
||||
func (z1 Z) Cmp(z2 Z) int {
|
||||
if z1.infinity == z2.infinity && z1.infinity != 0 {
|
||||
return 0
|
||||
}
|
||||
if z1 == PInfinity {
|
||||
return 1
|
||||
}
|
||||
if z1 == NInfinity {
|
||||
return -1
|
||||
}
|
||||
if z2 == NInfinity {
|
||||
return 1
|
||||
}
|
||||
if z2 == PInfinity {
|
||||
return -1
|
||||
}
|
||||
return z1.integer.Cmp(z2.integer)
|
||||
}
|
||||
|
||||
func MaxZ(zs ...Z) Z {
|
||||
if len(zs) == 0 {
|
||||
panic("Max called with no arguments")
|
||||
}
|
||||
if len(zs) == 1 {
|
||||
return zs[0]
|
||||
}
|
||||
ret := zs[0]
|
||||
for _, z := range zs[1:] {
|
||||
if z.Cmp(ret) == 1 {
|
||||
ret = z
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func MinZ(zs ...Z) Z {
|
||||
if len(zs) == 0 {
|
||||
panic("Min called with no arguments")
|
||||
}
|
||||
if len(zs) == 1 {
|
||||
return zs[0]
|
||||
}
|
||||
ret := zs[0]
|
||||
for _, z := range zs[1:] {
|
||||
if z.Cmp(ret) == -1 {
|
||||
ret = z
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
var NInfinity = Z{infinity: -1}
|
||||
var PInfinity = Z{infinity: 1}
|
||||
var EmptyIntInterval = IntInterval{true, PInfinity, NInfinity}
|
||||
|
||||
func InfinityFor(v ssa.Value) IntInterval {
|
||||
if b, ok := v.Type().Underlying().(*types.Basic); ok {
|
||||
if (b.Info() & types.IsUnsigned) != 0 {
|
||||
return NewIntInterval(NewZ(0), PInfinity)
|
||||
}
|
||||
}
|
||||
return NewIntInterval(NInfinity, PInfinity)
|
||||
}
|
||||
|
||||
type IntInterval struct {
|
||||
known bool
|
||||
Lower Z
|
||||
Upper Z
|
||||
}
|
||||
|
||||
func NewIntInterval(l, u Z) IntInterval {
|
||||
if u.Cmp(l) == -1 {
|
||||
return EmptyIntInterval
|
||||
}
|
||||
return IntInterval{known: true, Lower: l, Upper: u}
|
||||
}
|
||||
|
||||
func (i IntInterval) IsKnown() bool {
|
||||
return i.known
|
||||
}
|
||||
|
||||
func (i IntInterval) Empty() bool {
|
||||
return i.Lower == PInfinity && i.Upper == NInfinity
|
||||
}
|
||||
|
||||
func (i IntInterval) IsMaxRange() bool {
|
||||
return i.Lower == NInfinity && i.Upper == PInfinity
|
||||
}
|
||||
|
||||
func (i1 IntInterval) Intersection(i2 IntInterval) IntInterval {
|
||||
if !i1.IsKnown() {
|
||||
return i2
|
||||
}
|
||||
if !i2.IsKnown() {
|
||||
return i1
|
||||
}
|
||||
if i1.Empty() || i2.Empty() {
|
||||
return EmptyIntInterval
|
||||
}
|
||||
i3 := NewIntInterval(MaxZ(i1.Lower, i2.Lower), MinZ(i1.Upper, i2.Upper))
|
||||
if i3.Lower.Cmp(i3.Upper) == 1 {
|
||||
return EmptyIntInterval
|
||||
}
|
||||
return i3
|
||||
}
|
||||
|
||||
func (i1 IntInterval) Union(other Range) Range {
|
||||
i2, ok := other.(IntInterval)
|
||||
if !ok {
|
||||
i2 = EmptyIntInterval
|
||||
}
|
||||
if i1.Empty() || !i1.IsKnown() {
|
||||
return i2
|
||||
}
|
||||
if i2.Empty() || !i2.IsKnown() {
|
||||
return i1
|
||||
}
|
||||
return NewIntInterval(MinZ(i1.Lower, i2.Lower), MaxZ(i1.Upper, i2.Upper))
|
||||
}
|
||||
|
||||
func (i1 IntInterval) Add(i2 IntInterval) IntInterval {
|
||||
if i1.Empty() || i2.Empty() {
|
||||
return EmptyIntInterval
|
||||
}
|
||||
l1, u1, l2, u2 := i1.Lower, i1.Upper, i2.Lower, i2.Upper
|
||||
return NewIntInterval(l1.Add(l2), u1.Add(u2))
|
||||
}
|
||||
|
||||
func (i1 IntInterval) Sub(i2 IntInterval) IntInterval {
|
||||
if i1.Empty() || i2.Empty() {
|
||||
return EmptyIntInterval
|
||||
}
|
||||
l1, u1, l2, u2 := i1.Lower, i1.Upper, i2.Lower, i2.Upper
|
||||
return NewIntInterval(l1.Sub(u2), u1.Sub(l2))
|
||||
}
|
||||
|
||||
func (i1 IntInterval) Mul(i2 IntInterval) IntInterval {
|
||||
if i1.Empty() || i2.Empty() {
|
||||
return EmptyIntInterval
|
||||
}
|
||||
x1, x2 := i1.Lower, i1.Upper
|
||||
y1, y2 := i2.Lower, i2.Upper
|
||||
return NewIntInterval(
|
||||
MinZ(x1.Mul(y1), x1.Mul(y2), x2.Mul(y1), x2.Mul(y2)),
|
||||
MaxZ(x1.Mul(y1), x1.Mul(y2), x2.Mul(y1), x2.Mul(y2)),
|
||||
)
|
||||
}
|
||||
|
||||
func (i1 IntInterval) String() string {
|
||||
if !i1.IsKnown() {
|
||||
return "[⊥, ⊥]"
|
||||
}
|
||||
if i1.Empty() {
|
||||
return "{}"
|
||||
}
|
||||
return fmt.Sprintf("[%s, %s]", i1.Lower, i1.Upper)
|
||||
}
|
||||
|
||||
type IntArithmeticConstraint struct {
|
||||
aConstraint
|
||||
A ssa.Value
|
||||
B ssa.Value
|
||||
Op token.Token
|
||||
Fn func(IntInterval, IntInterval) IntInterval
|
||||
}
|
||||
|
||||
type IntAddConstraint struct{ *IntArithmeticConstraint }
|
||||
type IntSubConstraint struct{ *IntArithmeticConstraint }
|
||||
type IntMulConstraint struct{ *IntArithmeticConstraint }
|
||||
|
||||
type IntConversionConstraint struct {
|
||||
aConstraint
|
||||
X ssa.Value
|
||||
}
|
||||
|
||||
type IntIntersectionConstraint struct {
|
||||
aConstraint
|
||||
ranges Ranges
|
||||
A ssa.Value
|
||||
B ssa.Value
|
||||
Op token.Token
|
||||
I IntInterval
|
||||
resolved bool
|
||||
}
|
||||
|
||||
type IntIntervalConstraint struct {
|
||||
aConstraint
|
||||
I IntInterval
|
||||
}
|
||||
|
||||
func NewIntArithmeticConstraint(a, b, y ssa.Value, op token.Token, fn func(IntInterval, IntInterval) IntInterval) *IntArithmeticConstraint {
|
||||
return &IntArithmeticConstraint{NewConstraint(y), a, b, op, fn}
|
||||
}
|
||||
func NewIntAddConstraint(a, b, y ssa.Value) Constraint {
|
||||
return &IntAddConstraint{NewIntArithmeticConstraint(a, b, y, token.ADD, IntInterval.Add)}
|
||||
}
|
||||
func NewIntSubConstraint(a, b, y ssa.Value) Constraint {
|
||||
return &IntSubConstraint{NewIntArithmeticConstraint(a, b, y, token.SUB, IntInterval.Sub)}
|
||||
}
|
||||
func NewIntMulConstraint(a, b, y ssa.Value) Constraint {
|
||||
return &IntMulConstraint{NewIntArithmeticConstraint(a, b, y, token.MUL, IntInterval.Mul)}
|
||||
}
|
||||
func NewIntConversionConstraint(x, y ssa.Value) Constraint {
|
||||
return &IntConversionConstraint{NewConstraint(y), x}
|
||||
}
|
||||
func NewIntIntersectionConstraint(a, b ssa.Value, op token.Token, ranges Ranges, y ssa.Value) Constraint {
|
||||
return &IntIntersectionConstraint{
|
||||
aConstraint: NewConstraint(y),
|
||||
ranges: ranges,
|
||||
A: a,
|
||||
B: b,
|
||||
Op: op,
|
||||
}
|
||||
}
|
||||
func NewIntIntervalConstraint(i IntInterval, y ssa.Value) Constraint {
|
||||
return &IntIntervalConstraint{NewConstraint(y), i}
|
||||
}
|
||||
|
||||
func (c *IntArithmeticConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} }
|
||||
func (c *IntConversionConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
|
||||
func (c *IntIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.A} }
|
||||
func (s *IntIntervalConstraint) Operands() []ssa.Value { return nil }
|
||||
|
||||
func (c *IntArithmeticConstraint) String() string {
|
||||
return fmt.Sprintf("%s = %s %s %s", c.Y().Name(), c.A.Name(), c.Op, c.B.Name())
|
||||
}
|
||||
func (c *IntConversionConstraint) String() string {
|
||||
return fmt.Sprintf("%s = %s(%s)", c.Y().Name(), c.Y().Type(), c.X.Name())
|
||||
}
|
||||
func (c *IntIntersectionConstraint) String() string {
|
||||
return fmt.Sprintf("%s = %s %s %s (%t branch)", c.Y().Name(), c.A.Name(), c.Op, c.B.Name(), c.Y().(*ssa.Sigma).Branch)
|
||||
}
|
||||
func (c *IntIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) }
|
||||
|
||||
func (c *IntArithmeticConstraint) Eval(g *Graph) Range {
|
||||
i1, i2 := g.Range(c.A).(IntInterval), g.Range(c.B).(IntInterval)
|
||||
if !i1.IsKnown() || !i2.IsKnown() {
|
||||
return IntInterval{}
|
||||
}
|
||||
return c.Fn(i1, i2)
|
||||
}
|
||||
func (c *IntConversionConstraint) Eval(g *Graph) Range {
|
||||
s := &types.StdSizes{
|
||||
// XXX is it okay to assume the largest word size, or do we
|
||||
// need to be platform specific?
|
||||
WordSize: 8,
|
||||
MaxAlign: 1,
|
||||
}
|
||||
fromI := g.Range(c.X).(IntInterval)
|
||||
toI := g.Range(c.Y()).(IntInterval)
|
||||
fromT := c.X.Type().Underlying().(*types.Basic)
|
||||
toT := c.Y().Type().Underlying().(*types.Basic)
|
||||
fromB := s.Sizeof(c.X.Type())
|
||||
toB := s.Sizeof(c.Y().Type())
|
||||
|
||||
if !fromI.IsKnown() {
|
||||
return toI
|
||||
}
|
||||
if !toI.IsKnown() {
|
||||
return fromI
|
||||
}
|
||||
|
||||
// uint<N> -> sint/uint<M>, M > N: [max(0, l1), min(2**N-1, u2)]
|
||||
if (fromT.Info()&types.IsUnsigned != 0) &&
|
||||
toB > fromB {
|
||||
|
||||
n := big.NewInt(1)
|
||||
n.Lsh(n, uint(fromB*8))
|
||||
n.Sub(n, big.NewInt(1))
|
||||
return NewIntInterval(
|
||||
MaxZ(NewZ(0), fromI.Lower),
|
||||
MinZ(NewBigZ(n), toI.Upper),
|
||||
)
|
||||
}
|
||||
|
||||
// sint<N> -> sint<M>, M > N; [max(-∞, l1), min(2**N-1, u2)]
|
||||
if (fromT.Info()&types.IsUnsigned == 0) &&
|
||||
(toT.Info()&types.IsUnsigned == 0) &&
|
||||
toB > fromB {
|
||||
|
||||
n := big.NewInt(1)
|
||||
n.Lsh(n, uint(fromB*8))
|
||||
n.Sub(n, big.NewInt(1))
|
||||
return NewIntInterval(
|
||||
MaxZ(NInfinity, fromI.Lower),
|
||||
MinZ(NewBigZ(n), toI.Upper),
|
||||
)
|
||||
}
|
||||
|
||||
return fromI
|
||||
}
|
||||
func (c *IntIntersectionConstraint) Eval(g *Graph) Range {
|
||||
xi := g.Range(c.A).(IntInterval)
|
||||
if !xi.IsKnown() {
|
||||
return c.I
|
||||
}
|
||||
return xi.Intersection(c.I)
|
||||
}
|
||||
func (c *IntIntervalConstraint) Eval(*Graph) Range { return c.I }
|
||||
|
||||
func (c *IntIntersectionConstraint) Futures() []ssa.Value {
|
||||
return []ssa.Value{c.B}
|
||||
}
|
||||
|
||||
func (c *IntIntersectionConstraint) Resolve() {
|
||||
r, ok := c.ranges[c.B].(IntInterval)
|
||||
if !ok {
|
||||
c.I = InfinityFor(c.Y())
|
||||
return
|
||||
}
|
||||
|
||||
switch c.Op {
|
||||
case token.EQL:
|
||||
c.I = r
|
||||
case token.GTR:
|
||||
c.I = NewIntInterval(r.Lower.Add(NewZ(1)), PInfinity)
|
||||
case token.GEQ:
|
||||
c.I = NewIntInterval(r.Lower, PInfinity)
|
||||
case token.LSS:
|
||||
// TODO(dh): do we need 0 instead of NInfinity for uints?
|
||||
c.I = NewIntInterval(NInfinity, r.Upper.Sub(NewZ(1)))
|
||||
case token.LEQ:
|
||||
c.I = NewIntInterval(NInfinity, r.Upper)
|
||||
case token.NEQ:
|
||||
c.I = InfinityFor(c.Y())
|
||||
default:
|
||||
panic("unsupported op " + c.Op.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *IntIntersectionConstraint) IsKnown() bool {
|
||||
return c.I.IsKnown()
|
||||
}
|
||||
|
||||
func (c *IntIntersectionConstraint) MarkUnresolved() {
|
||||
c.resolved = false
|
||||
}
|
||||
|
||||
func (c *IntIntersectionConstraint) MarkResolved() {
|
||||
c.resolved = true
|
||||
}
|
||||
|
||||
func (c *IntIntersectionConstraint) IsResolved() bool {
|
||||
return c.resolved
|
||||
}
|
273
vendor/honnef.co/go/tools/staticcheck/vrp/slice.go
vendored
Normal file
273
vendor/honnef.co/go/tools/staticcheck/vrp/slice.go
vendored
Normal file
@ -0,0 +1,273 @@
|
||||
package vrp
|
||||
|
||||
// TODO(dh): most of the constraints have implementations identical to
|
||||
// that of strings. Consider reusing them.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
type SliceInterval struct {
|
||||
Length IntInterval
|
||||
}
|
||||
|
||||
func (s SliceInterval) Union(other Range) Range {
|
||||
i, ok := other.(SliceInterval)
|
||||
if !ok {
|
||||
i = SliceInterval{EmptyIntInterval}
|
||||
}
|
||||
if s.Length.Empty() || !s.Length.IsKnown() {
|
||||
return i
|
||||
}
|
||||
if i.Length.Empty() || !i.Length.IsKnown() {
|
||||
return s
|
||||
}
|
||||
return SliceInterval{
|
||||
Length: s.Length.Union(i.Length).(IntInterval),
|
||||
}
|
||||
}
|
||||
func (s SliceInterval) String() string { return s.Length.String() }
|
||||
func (s SliceInterval) IsKnown() bool { return s.Length.IsKnown() }
|
||||
|
||||
type SliceAppendConstraint struct {
|
||||
aConstraint
|
||||
A ssa.Value
|
||||
B ssa.Value
|
||||
}
|
||||
|
||||
type SliceSliceConstraint struct {
|
||||
aConstraint
|
||||
X ssa.Value
|
||||
Lower ssa.Value
|
||||
Upper ssa.Value
|
||||
}
|
||||
|
||||
type ArraySliceConstraint struct {
|
||||
aConstraint
|
||||
X ssa.Value
|
||||
Lower ssa.Value
|
||||
Upper ssa.Value
|
||||
}
|
||||
|
||||
type SliceIntersectionConstraint struct {
|
||||
aConstraint
|
||||
X ssa.Value
|
||||
I IntInterval
|
||||
}
|
||||
|
||||
type SliceLengthConstraint struct {
|
||||
aConstraint
|
||||
X ssa.Value
|
||||
}
|
||||
|
||||
type MakeSliceConstraint struct {
|
||||
aConstraint
|
||||
Size ssa.Value
|
||||
}
|
||||
|
||||
type SliceIntervalConstraint struct {
|
||||
aConstraint
|
||||
I IntInterval
|
||||
}
|
||||
|
||||
func NewSliceAppendConstraint(a, b, y ssa.Value) Constraint {
|
||||
return &SliceAppendConstraint{NewConstraint(y), a, b}
|
||||
}
|
||||
func NewSliceSliceConstraint(x, lower, upper, y ssa.Value) Constraint {
|
||||
return &SliceSliceConstraint{NewConstraint(y), x, lower, upper}
|
||||
}
|
||||
func NewArraySliceConstraint(x, lower, upper, y ssa.Value) Constraint {
|
||||
return &ArraySliceConstraint{NewConstraint(y), x, lower, upper}
|
||||
}
|
||||
func NewSliceIntersectionConstraint(x ssa.Value, i IntInterval, y ssa.Value) Constraint {
|
||||
return &SliceIntersectionConstraint{NewConstraint(y), x, i}
|
||||
}
|
||||
func NewSliceLengthConstraint(x, y ssa.Value) Constraint {
|
||||
return &SliceLengthConstraint{NewConstraint(y), x}
|
||||
}
|
||||
func NewMakeSliceConstraint(size, y ssa.Value) Constraint {
|
||||
return &MakeSliceConstraint{NewConstraint(y), size}
|
||||
}
|
||||
func NewSliceIntervalConstraint(i IntInterval, y ssa.Value) Constraint {
|
||||
return &SliceIntervalConstraint{NewConstraint(y), i}
|
||||
}
|
||||
|
||||
func (c *SliceAppendConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} }
|
||||
func (c *SliceSliceConstraint) Operands() []ssa.Value {
|
||||
ops := []ssa.Value{c.X}
|
||||
if c.Lower != nil {
|
||||
ops = append(ops, c.Lower)
|
||||
}
|
||||
if c.Upper != nil {
|
||||
ops = append(ops, c.Upper)
|
||||
}
|
||||
return ops
|
||||
}
|
||||
func (c *ArraySliceConstraint) Operands() []ssa.Value {
|
||||
ops := []ssa.Value{c.X}
|
||||
if c.Lower != nil {
|
||||
ops = append(ops, c.Lower)
|
||||
}
|
||||
if c.Upper != nil {
|
||||
ops = append(ops, c.Upper)
|
||||
}
|
||||
return ops
|
||||
}
|
||||
func (c *SliceIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
|
||||
func (c *SliceLengthConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
|
||||
func (c *MakeSliceConstraint) Operands() []ssa.Value { return []ssa.Value{c.Size} }
|
||||
func (s *SliceIntervalConstraint) Operands() []ssa.Value { return nil }
|
||||
|
||||
func (c *SliceAppendConstraint) String() string {
|
||||
return fmt.Sprintf("%s = append(%s, %s)", c.Y().Name(), c.A.Name(), c.B.Name())
|
||||
}
|
||||
func (c *SliceSliceConstraint) String() string {
|
||||
var lname, uname string
|
||||
if c.Lower != nil {
|
||||
lname = c.Lower.Name()
|
||||
}
|
||||
if c.Upper != nil {
|
||||
uname = c.Upper.Name()
|
||||
}
|
||||
return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname)
|
||||
}
|
||||
func (c *ArraySliceConstraint) String() string {
|
||||
var lname, uname string
|
||||
if c.Lower != nil {
|
||||
lname = c.Lower.Name()
|
||||
}
|
||||
if c.Upper != nil {
|
||||
uname = c.Upper.Name()
|
||||
}
|
||||
return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname)
|
||||
}
|
||||
func (c *SliceIntersectionConstraint) String() string {
|
||||
return fmt.Sprintf("%s = %s.%t ⊓ %s", c.Y().Name(), c.X.Name(), c.Y().(*ssa.Sigma).Branch, c.I)
|
||||
}
|
||||
func (c *SliceLengthConstraint) String() string {
|
||||
return fmt.Sprintf("%s = len(%s)", c.Y().Name(), c.X.Name())
|
||||
}
|
||||
func (c *MakeSliceConstraint) String() string {
|
||||
return fmt.Sprintf("%s = make(slice, %s)", c.Y().Name(), c.Size.Name())
|
||||
}
|
||||
func (c *SliceIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) }
|
||||
|
||||
func (c *SliceAppendConstraint) Eval(g *Graph) Range {
|
||||
l1 := g.Range(c.A).(SliceInterval).Length
|
||||
var l2 IntInterval
|
||||
switch r := g.Range(c.B).(type) {
|
||||
case SliceInterval:
|
||||
l2 = r.Length
|
||||
case StringInterval:
|
||||
l2 = r.Length
|
||||
default:
|
||||
return SliceInterval{}
|
||||
}
|
||||
if !l1.IsKnown() || !l2.IsKnown() {
|
||||
return SliceInterval{}
|
||||
}
|
||||
return SliceInterval{
|
||||
Length: l1.Add(l2),
|
||||
}
|
||||
}
|
||||
func (c *SliceSliceConstraint) Eval(g *Graph) Range {
|
||||
lr := NewIntInterval(NewZ(0), NewZ(0))
|
||||
if c.Lower != nil {
|
||||
lr = g.Range(c.Lower).(IntInterval)
|
||||
}
|
||||
ur := g.Range(c.X).(SliceInterval).Length
|
||||
if c.Upper != nil {
|
||||
ur = g.Range(c.Upper).(IntInterval)
|
||||
}
|
||||
if !lr.IsKnown() || !ur.IsKnown() {
|
||||
return SliceInterval{}
|
||||
}
|
||||
|
||||
ls := []Z{
|
||||
ur.Lower.Sub(lr.Lower),
|
||||
ur.Upper.Sub(lr.Lower),
|
||||
ur.Lower.Sub(lr.Upper),
|
||||
ur.Upper.Sub(lr.Upper),
|
||||
}
|
||||
// TODO(dh): if we don't truncate lengths to 0 we might be able to
|
||||
// easily detect slices with high < low. we'd need to treat -∞
|
||||
// specially, though.
|
||||
for i, l := range ls {
|
||||
if l.Sign() == -1 {
|
||||
ls[i] = NewZ(0)
|
||||
}
|
||||
}
|
||||
|
||||
return SliceInterval{
|
||||
Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)),
|
||||
}
|
||||
}
|
||||
func (c *ArraySliceConstraint) Eval(g *Graph) Range {
|
||||
lr := NewIntInterval(NewZ(0), NewZ(0))
|
||||
if c.Lower != nil {
|
||||
lr = g.Range(c.Lower).(IntInterval)
|
||||
}
|
||||
var l int64
|
||||
switch typ := c.X.Type().(type) {
|
||||
case *types.Array:
|
||||
l = typ.Len()
|
||||
case *types.Pointer:
|
||||
l = typ.Elem().(*types.Array).Len()
|
||||
}
|
||||
ur := NewIntInterval(NewZ(l), NewZ(l))
|
||||
if c.Upper != nil {
|
||||
ur = g.Range(c.Upper).(IntInterval)
|
||||
}
|
||||
if !lr.IsKnown() || !ur.IsKnown() {
|
||||
return SliceInterval{}
|
||||
}
|
||||
|
||||
ls := []Z{
|
||||
ur.Lower.Sub(lr.Lower),
|
||||
ur.Upper.Sub(lr.Lower),
|
||||
ur.Lower.Sub(lr.Upper),
|
||||
ur.Upper.Sub(lr.Upper),
|
||||
}
|
||||
// TODO(dh): if we don't truncate lengths to 0 we might be able to
|
||||
// easily detect slices with high < low. we'd need to treat -∞
|
||||
// specially, though.
|
||||
for i, l := range ls {
|
||||
if l.Sign() == -1 {
|
||||
ls[i] = NewZ(0)
|
||||
}
|
||||
}
|
||||
|
||||
return SliceInterval{
|
||||
Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)),
|
||||
}
|
||||
}
|
||||
func (c *SliceIntersectionConstraint) Eval(g *Graph) Range {
|
||||
xi := g.Range(c.X).(SliceInterval)
|
||||
if !xi.IsKnown() {
|
||||
return c.I
|
||||
}
|
||||
return SliceInterval{
|
||||
Length: xi.Length.Intersection(c.I),
|
||||
}
|
||||
}
|
||||
func (c *SliceLengthConstraint) Eval(g *Graph) Range {
|
||||
i := g.Range(c.X).(SliceInterval).Length
|
||||
if !i.IsKnown() {
|
||||
return NewIntInterval(NewZ(0), PInfinity)
|
||||
}
|
||||
return i
|
||||
}
|
||||
func (c *MakeSliceConstraint) Eval(g *Graph) Range {
|
||||
i, ok := g.Range(c.Size).(IntInterval)
|
||||
if !ok {
|
||||
return SliceInterval{NewIntInterval(NewZ(0), PInfinity)}
|
||||
}
|
||||
if i.Lower.Sign() == -1 {
|
||||
i.Lower = NewZ(0)
|
||||
}
|
||||
return SliceInterval{i}
|
||||
}
|
||||
func (c *SliceIntervalConstraint) Eval(*Graph) Range { return SliceInterval{c.I} }
|
258
vendor/honnef.co/go/tools/staticcheck/vrp/string.go
vendored
Normal file
258
vendor/honnef.co/go/tools/staticcheck/vrp/string.go
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
package vrp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
|
||||
"honnef.co/go/tools/ssa"
|
||||
)
|
||||
|
||||
type StringInterval struct {
|
||||
Length IntInterval
|
||||
}
|
||||
|
||||
func (s StringInterval) Union(other Range) Range {
|
||||
i, ok := other.(StringInterval)
|
||||
if !ok {
|
||||
i = StringInterval{EmptyIntInterval}
|
||||
}
|
||||
if s.Length.Empty() || !s.Length.IsKnown() {
|
||||
return i
|
||||
}
|
||||
if i.Length.Empty() || !i.Length.IsKnown() {
|
||||
return s
|
||||
}
|
||||
return StringInterval{
|
||||
Length: s.Length.Union(i.Length).(IntInterval),
|
||||
}
|
||||
}
|
||||
|
||||
func (s StringInterval) String() string {
|
||||
return s.Length.String()
|
||||
}
|
||||
|
||||
func (s StringInterval) IsKnown() bool {
|
||||
return s.Length.IsKnown()
|
||||
}
|
||||
|
||||
type StringSliceConstraint struct {
|
||||
aConstraint
|
||||
X ssa.Value
|
||||
Lower ssa.Value
|
||||
Upper ssa.Value
|
||||
}
|
||||
|
||||
type StringIntersectionConstraint struct {
|
||||
aConstraint
|
||||
ranges Ranges
|
||||
A ssa.Value
|
||||
B ssa.Value
|
||||
Op token.Token
|
||||
I IntInterval
|
||||
resolved bool
|
||||
}
|
||||
|
||||
type StringConcatConstraint struct {
|
||||
aConstraint
|
||||
A ssa.Value
|
||||
B ssa.Value
|
||||
}
|
||||
|
||||
type StringLengthConstraint struct {
|
||||
aConstraint
|
||||
X ssa.Value
|
||||
}
|
||||
|
||||
type StringIntervalConstraint struct {
|
||||
aConstraint
|
||||
I IntInterval
|
||||
}
|
||||
|
||||
func NewStringSliceConstraint(x, lower, upper, y ssa.Value) Constraint {
|
||||
return &StringSliceConstraint{NewConstraint(y), x, lower, upper}
|
||||
}
|
||||
func NewStringIntersectionConstraint(a, b ssa.Value, op token.Token, ranges Ranges, y ssa.Value) Constraint {
|
||||
return &StringIntersectionConstraint{
|
||||
aConstraint: NewConstraint(y),
|
||||
ranges: ranges,
|
||||
A: a,
|
||||
B: b,
|
||||
Op: op,
|
||||
}
|
||||
}
|
||||
func NewStringConcatConstraint(a, b, y ssa.Value) Constraint {
|
||||
return &StringConcatConstraint{NewConstraint(y), a, b}
|
||||
}
|
||||
func NewStringLengthConstraint(x ssa.Value, y ssa.Value) Constraint {
|
||||
return &StringLengthConstraint{NewConstraint(y), x}
|
||||
}
|
||||
func NewStringIntervalConstraint(i IntInterval, y ssa.Value) Constraint {
|
||||
return &StringIntervalConstraint{NewConstraint(y), i}
|
||||
}
|
||||
|
||||
func (c *StringSliceConstraint) Operands() []ssa.Value {
|
||||
vs := []ssa.Value{c.X}
|
||||
if c.Lower != nil {
|
||||
vs = append(vs, c.Lower)
|
||||
}
|
||||
if c.Upper != nil {
|
||||
vs = append(vs, c.Upper)
|
||||
}
|
||||
return vs
|
||||
}
|
||||
func (c *StringIntersectionConstraint) Operands() []ssa.Value { return []ssa.Value{c.A} }
|
||||
func (c StringConcatConstraint) Operands() []ssa.Value { return []ssa.Value{c.A, c.B} }
|
||||
func (c *StringLengthConstraint) Operands() []ssa.Value { return []ssa.Value{c.X} }
|
||||
func (s *StringIntervalConstraint) Operands() []ssa.Value { return nil }
|
||||
|
||||
func (c *StringSliceConstraint) String() string {
|
||||
var lname, uname string
|
||||
if c.Lower != nil {
|
||||
lname = c.Lower.Name()
|
||||
}
|
||||
if c.Upper != nil {
|
||||
uname = c.Upper.Name()
|
||||
}
|
||||
return fmt.Sprintf("%s[%s:%s]", c.X.Name(), lname, uname)
|
||||
}
|
||||
func (c *StringIntersectionConstraint) String() string {
|
||||
return fmt.Sprintf("%s = %s %s %s (%t branch)", c.Y().Name(), c.A.Name(), c.Op, c.B.Name(), c.Y().(*ssa.Sigma).Branch)
|
||||
}
|
||||
func (c StringConcatConstraint) String() string {
|
||||
return fmt.Sprintf("%s = %s + %s", c.Y().Name(), c.A.Name(), c.B.Name())
|
||||
}
|
||||
func (c *StringLengthConstraint) String() string {
|
||||
return fmt.Sprintf("%s = len(%s)", c.Y().Name(), c.X.Name())
|
||||
}
|
||||
func (c *StringIntervalConstraint) String() string { return fmt.Sprintf("%s = %s", c.Y().Name(), c.I) }
|
||||
|
||||
func (c *StringSliceConstraint) Eval(g *Graph) Range {
|
||||
lr := NewIntInterval(NewZ(0), NewZ(0))
|
||||
if c.Lower != nil {
|
||||
lr = g.Range(c.Lower).(IntInterval)
|
||||
}
|
||||
ur := g.Range(c.X).(StringInterval).Length
|
||||
if c.Upper != nil {
|
||||
ur = g.Range(c.Upper).(IntInterval)
|
||||
}
|
||||
if !lr.IsKnown() || !ur.IsKnown() {
|
||||
return StringInterval{}
|
||||
}
|
||||
|
||||
ls := []Z{
|
||||
ur.Lower.Sub(lr.Lower),
|
||||
ur.Upper.Sub(lr.Lower),
|
||||
ur.Lower.Sub(lr.Upper),
|
||||
ur.Upper.Sub(lr.Upper),
|
||||
}
|
||||
// TODO(dh): if we don't truncate lengths to 0 we might be able to
|
||||
// easily detect slices with high < low. we'd need to treat -∞
|
||||
// specially, though.
|
||||
for i, l := range ls {
|
||||
if l.Sign() == -1 {
|
||||
ls[i] = NewZ(0)
|
||||
}
|
||||
}
|
||||
|
||||
return StringInterval{
|
||||
Length: NewIntInterval(MinZ(ls...), MaxZ(ls...)),
|
||||
}
|
||||
}
|
||||
func (c *StringIntersectionConstraint) Eval(g *Graph) Range {
|
||||
var l IntInterval
|
||||
switch r := g.Range(c.A).(type) {
|
||||
case StringInterval:
|
||||
l = r.Length
|
||||
case IntInterval:
|
||||
l = r
|
||||
}
|
||||
|
||||
if !l.IsKnown() {
|
||||
return StringInterval{c.I}
|
||||
}
|
||||
return StringInterval{
|
||||
Length: l.Intersection(c.I),
|
||||
}
|
||||
}
|
||||
func (c StringConcatConstraint) Eval(g *Graph) Range {
|
||||
i1, i2 := g.Range(c.A).(StringInterval), g.Range(c.B).(StringInterval)
|
||||
if !i1.Length.IsKnown() || !i2.Length.IsKnown() {
|
||||
return StringInterval{}
|
||||
}
|
||||
return StringInterval{
|
||||
Length: i1.Length.Add(i2.Length),
|
||||
}
|
||||
}
|
||||
func (c *StringLengthConstraint) Eval(g *Graph) Range {
|
||||
i := g.Range(c.X).(StringInterval).Length
|
||||
if !i.IsKnown() {
|
||||
return NewIntInterval(NewZ(0), PInfinity)
|
||||
}
|
||||
return i
|
||||
}
|
||||
func (c *StringIntervalConstraint) Eval(*Graph) Range { return StringInterval{c.I} }
|
||||
|
||||
func (c *StringIntersectionConstraint) Futures() []ssa.Value {
|
||||
return []ssa.Value{c.B}
|
||||
}
|
||||
|
||||
func (c *StringIntersectionConstraint) Resolve() {
|
||||
if (c.A.Type().Underlying().(*types.Basic).Info() & types.IsString) != 0 {
|
||||
// comparing two strings
|
||||
r, ok := c.ranges[c.B].(StringInterval)
|
||||
if !ok {
|
||||
c.I = NewIntInterval(NewZ(0), PInfinity)
|
||||
return
|
||||
}
|
||||
switch c.Op {
|
||||
case token.EQL:
|
||||
c.I = r.Length
|
||||
case token.GTR, token.GEQ:
|
||||
c.I = NewIntInterval(r.Length.Lower, PInfinity)
|
||||
case token.LSS, token.LEQ:
|
||||
c.I = NewIntInterval(NewZ(0), r.Length.Upper)
|
||||
case token.NEQ:
|
||||
default:
|
||||
panic("unsupported op " + c.Op.String())
|
||||
}
|
||||
} else {
|
||||
r, ok := c.ranges[c.B].(IntInterval)
|
||||
if !ok {
|
||||
c.I = NewIntInterval(NewZ(0), PInfinity)
|
||||
return
|
||||
}
|
||||
// comparing two lengths
|
||||
switch c.Op {
|
||||
case token.EQL:
|
||||
c.I = r
|
||||
case token.GTR:
|
||||
c.I = NewIntInterval(r.Lower.Add(NewZ(1)), PInfinity)
|
||||
case token.GEQ:
|
||||
c.I = NewIntInterval(r.Lower, PInfinity)
|
||||
case token.LSS:
|
||||
c.I = NewIntInterval(NInfinity, r.Upper.Sub(NewZ(1)))
|
||||
case token.LEQ:
|
||||
c.I = NewIntInterval(NInfinity, r.Upper)
|
||||
case token.NEQ:
|
||||
default:
|
||||
panic("unsupported op " + c.Op.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *StringIntersectionConstraint) IsKnown() bool {
|
||||
return c.I.IsKnown()
|
||||
}
|
||||
|
||||
func (c *StringIntersectionConstraint) MarkUnresolved() {
|
||||
c.resolved = false
|
||||
}
|
||||
|
||||
func (c *StringIntersectionConstraint) MarkResolved() {
|
||||
c.resolved = true
|
||||
}
|
||||
|
||||
func (c *StringIntersectionConstraint) IsResolved() bool {
|
||||
return c.resolved
|
||||
}
|
1056
vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go
vendored
Normal file
1056
vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
111
vendor/honnef.co/go/tools/stylecheck/analysis.go
vendored
Normal file
111
vendor/honnef.co/go/tools/stylecheck/analysis.go
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
package stylecheck
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/facts"
|
||||
"honnef.co/go/tools/internal/passes/buildssa"
|
||||
"honnef.co/go/tools/lint/lintutil"
|
||||
)
|
||||
|
||||
func newFlagSet() flag.FlagSet {
|
||||
fs := flag.NewFlagSet("", flag.PanicOnError)
|
||||
fs.Var(lintutil.NewVersionFlag(), "go", "Target Go version")
|
||||
return *fs
|
||||
}
|
||||
|
||||
var Analyzers = map[string]*analysis.Analyzer{
|
||||
"ST1000": {
|
||||
Name: "ST1000",
|
||||
Run: CheckPackageComment,
|
||||
Doc: Docs["ST1000"].String(),
|
||||
Requires: []*analysis.Analyzer{},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1001": {
|
||||
Name: "ST1001",
|
||||
Run: CheckDotImports,
|
||||
Doc: Docs["ST1001"].String(),
|
||||
Requires: []*analysis.Analyzer{facts.Generated, config.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1003": {
|
||||
Name: "ST1003",
|
||||
Run: CheckNames,
|
||||
Doc: Docs["ST1003"].String(),
|
||||
Requires: []*analysis.Analyzer{facts.Generated, config.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1005": {
|
||||
Name: "ST1005",
|
||||
Run: CheckErrorStrings,
|
||||
Doc: Docs["ST1005"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1006": {
|
||||
Name: "ST1006",
|
||||
Run: CheckReceiverNames,
|
||||
Doc: Docs["ST1006"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer, facts.Generated},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1008": {
|
||||
Name: "ST1008",
|
||||
Run: CheckErrorReturn,
|
||||
Doc: Docs["ST1008"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1011": {
|
||||
Name: "ST1011",
|
||||
Run: CheckTimeNames,
|
||||
Doc: Docs["ST1011"].String(),
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1012": {
|
||||
Name: "ST1012",
|
||||
Run: CheckErrorVarNames,
|
||||
Doc: Docs["ST1012"].String(),
|
||||
Requires: []*analysis.Analyzer{config.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1013": {
|
||||
Name: "ST1013",
|
||||
Run: CheckHTTPStatusCodes,
|
||||
Doc: Docs["ST1013"].String(),
|
||||
Requires: []*analysis.Analyzer{facts.Generated, facts.TokenFile, config.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1015": {
|
||||
Name: "ST1015",
|
||||
Run: CheckDefaultCaseOrder,
|
||||
Doc: Docs["ST1015"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1016": {
|
||||
Name: "ST1016",
|
||||
Run: CheckReceiverNamesIdentical,
|
||||
Doc: Docs["ST1016"].String(),
|
||||
Requires: []*analysis.Analyzer{buildssa.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1017": {
|
||||
Name: "ST1017",
|
||||
Run: CheckYodaConditions,
|
||||
Doc: Docs["ST1017"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
"ST1018": {
|
||||
Name: "ST1018",
|
||||
Run: CheckInvisibleCharacters,
|
||||
Doc: Docs["ST1018"].String(),
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Flags: newFlagSet(),
|
||||
},
|
||||
}
|
154
vendor/honnef.co/go/tools/stylecheck/doc.go
vendored
Normal file
154
vendor/honnef.co/go/tools/stylecheck/doc.go
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
package stylecheck
|
||||
|
||||
import "honnef.co/go/tools/lint"
|
||||
|
||||
var Docs = map[string]*lint.Documentation{
|
||||
"ST1000": &lint.Documentation{
|
||||
Title: `Incorrect or missing package comment`,
|
||||
Text: `Packages must have a package comment that is formatted according to
|
||||
the guidelines laid out in
|
||||
https://github.com/golang/go/wiki/CodeReviewComments#package-comments.`,
|
||||
Since: "2019.1",
|
||||
NonDefault: true,
|
||||
},
|
||||
|
||||
"ST1001": &lint.Documentation{
|
||||
Title: `Dot imports are discouraged`,
|
||||
Text: `Dot imports that aren't in external test packages are discouraged.
|
||||
|
||||
The dot_import_whitelist option can be used to whitelist certain
|
||||
imports.
|
||||
|
||||
Quoting Go Code Review Comments:
|
||||
|
||||
The import . form can be useful in tests that, due to circular
|
||||
dependencies, cannot be made part of the package being tested:
|
||||
|
||||
package foo_test
|
||||
|
||||
import (
|
||||
"bar/testutil" // also imports "foo"
|
||||
. "foo"
|
||||
)
|
||||
|
||||
In this case, the test file cannot be in package foo because it
|
||||
uses bar/testutil, which imports foo. So we use the 'import .'
|
||||
form to let the file pretend to be part of package foo even though
|
||||
it is not. Except for this one case, do not use import . in your
|
||||
programs. It makes the programs much harder to read because it is
|
||||
unclear whether a name like Quux is a top-level identifier in the
|
||||
current package or in an imported package.`,
|
||||
Since: "2019.1",
|
||||
Options: []string{"dot_import_whitelist"},
|
||||
},
|
||||
|
||||
"ST1003": &lint.Documentation{
|
||||
Title: `Poorly chosen identifier`,
|
||||
Text: `Identifiers, such as variable and package names, follow certain rules.
|
||||
|
||||
See the following links for details:
|
||||
|
||||
- https://golang.org/doc/effective_go.html#package-names
|
||||
- https://golang.org/doc/effective_go.html#mixed-caps
|
||||
- https://github.com/golang/go/wiki/CodeReviewComments#initialisms
|
||||
- https://github.com/golang/go/wiki/CodeReviewComments#variable-names`,
|
||||
Since: "2019.1",
|
||||
NonDefault: true,
|
||||
Options: []string{"initialisms"},
|
||||
},
|
||||
|
||||
"ST1005": &lint.Documentation{
|
||||
Title: `Incorrectly formatted error string`,
|
||||
Text: `Error strings follow a set of guidelines to ensure uniformity and good
|
||||
composability.
|
||||
|
||||
Quoting Go Code Review Comments:
|
||||
|
||||
Error strings should not be capitalized (unless beginning with
|
||||
proper nouns or acronyms) or end with punctuation, since they are
|
||||
usually printed following other context. That is, use
|
||||
fmt.Errorf("something bad") not fmt.Errorf("Something bad"), so
|
||||
that log.Printf("Reading %s: %v", filename, err) formats without a
|
||||
spurious capital letter mid-message.`,
|
||||
Since: "2019.1",
|
||||
},
|
||||
|
||||
"ST1006": &lint.Documentation{
|
||||
Title: `Poorly chosen receiver name`,
|
||||
Text: `Quoting Go Code Review Comments:
|
||||
|
||||
The name of a method's receiver should be a reflection of its
|
||||
identity; often a one or two letter abbreviation of its type
|
||||
suffices (such as "c" or "cl" for "Client"). Don't use generic
|
||||
names such as "me", "this" or "self", identifiers typical of
|
||||
object-oriented languages that place more emphasis on methods as
|
||||
opposed to functions. The name need not be as descriptive as that
|
||||
of a method argument, as its role is obvious and serves no
|
||||
documentary purpose. It can be very short as it will appear on
|
||||
almost every line of every method of the type; familiarity admits
|
||||
brevity. Be consistent, too: if you call the receiver "c" in one
|
||||
method, don't call it "cl" in another.`,
|
||||
Since: "2019.1",
|
||||
},
|
||||
|
||||
"ST1008": &lint.Documentation{
|
||||
Title: `A function's error value should be its last return value`,
|
||||
Text: `A function's error value should be its last return value.`,
|
||||
Since: `2019.1`,
|
||||
},
|
||||
|
||||
"ST1011": &lint.Documentation{
|
||||
Title: `Poorly chosen name for variable of type time.Duration`,
|
||||
Text: `time.Duration values represent an amount of time, which is represented
|
||||
as a count of nanoseconds. An expression like 5 * time.Microsecond
|
||||
yields the value 5000. It is therefore not appropriate to suffix a
|
||||
variable of type time.Duration with any time unit, such as Msec or
|
||||
Milli.`,
|
||||
Since: `2019.1`,
|
||||
},
|
||||
|
||||
"ST1012": &lint.Documentation{
|
||||
Title: `Poorly chosen name for error variable`,
|
||||
Text: `Error variables that are part of an API should be called errFoo or
|
||||
ErrFoo.`,
|
||||
Since: "2019.1",
|
||||
},
|
||||
|
||||
"ST1013": &lint.Documentation{
|
||||
Title: `Should use constants for HTTP error codes, not magic numbers`,
|
||||
Text: `HTTP has a tremendous number of status codes. While some of those are
|
||||
well known (200, 400, 404, 500), most of them are not. The net/http
|
||||
package provides constants for all status codes that are part of the
|
||||
various specifications. It is recommended to use these constants
|
||||
instead of hard-coding magic numbers, to vastly improve the
|
||||
readability of your code.`,
|
||||
Since: "2019.1",
|
||||
Options: []string{"http_status_code_whitelist"},
|
||||
},
|
||||
|
||||
"ST1015": &lint.Documentation{
|
||||
Title: `A switch's default case should be the first or last case`,
|
||||
Since: "2019.1",
|
||||
},
|
||||
|
||||
"ST1016": &lint.Documentation{
|
||||
Title: `Use consistent method receiver names`,
|
||||
Since: "2019.1",
|
||||
NonDefault: true,
|
||||
},
|
||||
|
||||
"ST1017": &lint.Documentation{
|
||||
Title: `Don't use Yoda conditions`,
|
||||
Text: `Yoda conditions are conditions of the kind 'if 42 == x', where the
|
||||
literal is on the left side of the comparison. These are a common
|
||||
idiom in languages in which assignment is an expression, to avoid bugs
|
||||
of the kind 'if (x = 42)'. In Go, which doesn't allow for this kind of
|
||||
bug, we prefer the more idiomatic 'if x == 42'.`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
|
||||
"ST1018": &lint.Documentation{
|
||||
Title: `Avoid zero-width and control characters in string literals`,
|
||||
Since: "2019.2",
|
||||
},
|
||||
}
|
629
vendor/honnef.co/go/tools/stylecheck/lint.go
vendored
Normal file
629
vendor/honnef.co/go/tools/stylecheck/lint.go
vendored
Normal file
@ -0,0 +1,629 @@
|
||||
package stylecheck // import "honnef.co/go/tools/stylecheck"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"honnef.co/go/tools/config"
|
||||
"honnef.co/go/tools/internal/passes/buildssa"
|
||||
. "honnef.co/go/tools/lint/lintdsl"
|
||||
"honnef.co/go/tools/ssa"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
func CheckPackageComment(pass *analysis.Pass) (interface{}, error) {
|
||||
// - At least one file in a non-main package should have a package comment
|
||||
//
|
||||
// - The comment should be of the form
|
||||
// "Package x ...". This has a slight potential for false
|
||||
// positives, as multiple files can have package comments, in
|
||||
// which case they get appended. But that doesn't happen a lot in
|
||||
// the real world.
|
||||
|
||||
if pass.Pkg.Name() == "main" {
|
||||
return nil, nil
|
||||
}
|
||||
hasDocs := false
|
||||
for _, f := range pass.Files {
|
||||
if IsInTest(pass, f) {
|
||||
continue
|
||||
}
|
||||
if f.Doc != nil && len(f.Doc.List) > 0 {
|
||||
hasDocs = true
|
||||
prefix := "Package " + f.Name.Name + " "
|
||||
if !strings.HasPrefix(strings.TrimSpace(f.Doc.Text()), prefix) {
|
||||
ReportNodef(pass, f.Doc, `package comment should be of the form "%s..."`, prefix)
|
||||
}
|
||||
f.Doc.Text()
|
||||
}
|
||||
}
|
||||
|
||||
if !hasDocs {
|
||||
for _, f := range pass.Files {
|
||||
if IsInTest(pass, f) {
|
||||
continue
|
||||
}
|
||||
ReportNodef(pass, f, "at least one file in a package should have a package comment")
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckDotImports(pass *analysis.Pass) (interface{}, error) {
|
||||
for _, f := range pass.Files {
|
||||
imports:
|
||||
for _, imp := range f.Imports {
|
||||
path := imp.Path.Value
|
||||
path = path[1 : len(path)-1]
|
||||
for _, w := range config.For(pass).DotImportWhitelist {
|
||||
if w == path {
|
||||
continue imports
|
||||
}
|
||||
}
|
||||
|
||||
if imp.Name != nil && imp.Name.Name == "." && !IsInTest(pass, f) {
|
||||
ReportNodefFG(pass, imp, "should not use dot imports")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckBlankImports(pass *analysis.Pass) (interface{}, error) {
|
||||
fset := pass.Fset
|
||||
for _, f := range pass.Files {
|
||||
if IsInMain(pass, f) || IsInTest(pass, f) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Collect imports of the form `import _ "foo"`, i.e. with no
|
||||
// parentheses, as their comment will be associated with the
|
||||
// (paren-free) GenDecl, not the import spec itself.
|
||||
//
|
||||
// We don't directly process the GenDecl so that we can
|
||||
// correctly handle the following:
|
||||
//
|
||||
// import _ "foo"
|
||||
// import _ "bar"
|
||||
//
|
||||
// where only the first import should get flagged.
|
||||
skip := map[ast.Spec]bool{}
|
||||
ast.Inspect(f, func(node ast.Node) bool {
|
||||
switch node := node.(type) {
|
||||
case *ast.File:
|
||||
return true
|
||||
case *ast.GenDecl:
|
||||
if node.Tok != token.IMPORT {
|
||||
return false
|
||||
}
|
||||
if node.Lparen == token.NoPos && node.Doc != nil {
|
||||
skip[node.Specs[0]] = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
})
|
||||
for i, imp := range f.Imports {
|
||||
pos := fset.Position(imp.Pos())
|
||||
|
||||
if !IsBlank(imp.Name) {
|
||||
continue
|
||||
}
|
||||
// Only flag the first blank import in a group of imports,
|
||||
// or don't flag any of them, if the first one is
|
||||
// commented
|
||||
if i > 0 {
|
||||
prev := f.Imports[i-1]
|
||||
prevPos := fset.Position(prev.Pos())
|
||||
if pos.Line-1 == prevPos.Line && IsBlank(prev.Name) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if imp.Doc == nil && imp.Comment == nil && !skip[imp] {
|
||||
ReportNodef(pass, imp, "a blank import should be only in a main or test package, or have a comment justifying it")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckIncDec(pass *analysis.Pass) (interface{}, error) {
|
||||
// TODO(dh): this can be noisy for function bodies that look like this:
|
||||
// x += 3
|
||||
// ...
|
||||
// x += 2
|
||||
// ...
|
||||
// x += 1
|
||||
fn := func(node ast.Node) {
|
||||
assign := node.(*ast.AssignStmt)
|
||||
if assign.Tok != token.ADD_ASSIGN && assign.Tok != token.SUB_ASSIGN {
|
||||
return
|
||||
}
|
||||
if (len(assign.Lhs) != 1 || len(assign.Rhs) != 1) ||
|
||||
!IsIntLiteral(assign.Rhs[0], "1") {
|
||||
return
|
||||
}
|
||||
|
||||
suffix := ""
|
||||
switch assign.Tok {
|
||||
case token.ADD_ASSIGN:
|
||||
suffix = "++"
|
||||
case token.SUB_ASSIGN:
|
||||
suffix = "--"
|
||||
}
|
||||
|
||||
ReportNodef(pass, assign, "should replace %s with %s%s", Render(pass, assign), Render(pass, assign.Lhs[0]), suffix)
|
||||
}
|
||||
pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.AssignStmt)(nil)}, fn)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckErrorReturn(pass *analysis.Pass) (interface{}, error) {
|
||||
fnLoop:
|
||||
for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
|
||||
sig := fn.Type().(*types.Signature)
|
||||
rets := sig.Results()
|
||||
if rets == nil || rets.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
if rets.At(rets.Len()-1).Type() == types.Universe.Lookup("error").Type() {
|
||||
// Last return type is error. If the function also returns
|
||||
// errors in other positions, that's fine.
|
||||
continue
|
||||
}
|
||||
for i := rets.Len() - 2; i >= 0; i-- {
|
||||
if rets.At(i).Type() == types.Universe.Lookup("error").Type() {
|
||||
pass.Reportf(rets.At(i).Pos(), "error should be returned as the last argument")
|
||||
continue fnLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CheckUnexportedReturn checks that exported functions on exported
|
||||
// types do not return unexported types.
|
||||
func CheckUnexportedReturn(pass *analysis.Pass) (interface{}, error) {
|
||||
for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
|
||||
if fn.Synthetic != "" || fn.Parent() != nil {
|
||||
continue
|
||||
}
|
||||
if !ast.IsExported(fn.Name()) || IsInMain(pass, fn) || IsInTest(pass, fn) {
|
||||
continue
|
||||
}
|
||||
sig := fn.Type().(*types.Signature)
|
||||
if sig.Recv() != nil && !ast.IsExported(Dereference(sig.Recv().Type()).(*types.Named).Obj().Name()) {
|
||||
continue
|
||||
}
|
||||
res := sig.Results()
|
||||
for i := 0; i < res.Len(); i++ {
|
||||
if named, ok := DereferenceR(res.At(i).Type()).(*types.Named); ok &&
|
||||
!ast.IsExported(named.Obj().Name()) &&
|
||||
named != types.Universe.Lookup("error").Type() {
|
||||
pass.Reportf(fn.Pos(), "should not return unexported type")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckReceiverNames(pass *analysis.Pass) (interface{}, error) {
|
||||
ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg
|
||||
for _, m := range ssapkg.Members {
|
||||
if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() {
|
||||
ms := typeutil.IntuitiveMethodSet(T.Type(), nil)
|
||||
for _, sel := range ms {
|
||||
fn := sel.Obj().(*types.Func)
|
||||
recv := fn.Type().(*types.Signature).Recv()
|
||||
if Dereference(recv.Type()) != T.Type() {
|
||||
// skip embedded methods
|
||||
continue
|
||||
}
|
||||
if recv.Name() == "self" || recv.Name() == "this" {
|
||||
ReportfFG(pass, recv.Pos(), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`)
|
||||
}
|
||||
if recv.Name() == "_" {
|
||||
ReportfFG(pass, recv.Pos(), "receiver name should not be an underscore, omit the name if it is unused")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckReceiverNamesIdentical(pass *analysis.Pass) (interface{}, error) {
|
||||
ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg
|
||||
for _, m := range ssapkg.Members {
|
||||
names := map[string]int{}
|
||||
|
||||
var firstFn *types.Func
|
||||
if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() {
|
||||
ms := typeutil.IntuitiveMethodSet(T.Type(), nil)
|
||||
for _, sel := range ms {
|
||||
fn := sel.Obj().(*types.Func)
|
||||
recv := fn.Type().(*types.Signature).Recv()
|
||||
if Dereference(recv.Type()) != T.Type() {
|
||||
// skip embedded methods
|
||||
continue
|
||||
}
|
||||
if firstFn == nil {
|
||||
firstFn = fn
|
||||
}
|
||||
if recv.Name() != "" && recv.Name() != "_" {
|
||||
names[recv.Name()]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(names) > 1 {
|
||||
var seen []string
|
||||
for name, count := range names {
|
||||
seen = append(seen, fmt.Sprintf("%dx %q", count, name))
|
||||
}
|
||||
|
||||
pass.Reportf(firstFn.Pos(), "methods on the same type should have the same receiver name (seen %s)", strings.Join(seen, ", "))
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckContextFirstArg(pass *analysis.Pass) (interface{}, error) {
|
||||
// TODO(dh): this check doesn't apply to test helpers. Example from the stdlib:
|
||||
// func helperCommandContext(t *testing.T, ctx context.Context, s ...string) (cmd *exec.Cmd) {
|
||||
fnLoop:
|
||||
for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
|
||||
if fn.Synthetic != "" || fn.Parent() != nil {
|
||||
continue
|
||||
}
|
||||
params := fn.Signature.Params()
|
||||
if params.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
if types.TypeString(params.At(0).Type(), nil) == "context.Context" {
|
||||
continue
|
||||
}
|
||||
for i := 1; i < params.Len(); i++ {
|
||||
param := params.At(i)
|
||||
if types.TypeString(param.Type(), nil) == "context.Context" {
|
||||
pass.Reportf(param.Pos(), "context.Context should be the first argument of a function")
|
||||
continue fnLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckErrorStrings(pass *analysis.Pass) (interface{}, error) {
|
||||
objNames := map[*ssa.Package]map[string]bool{}
|
||||
ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg
|
||||
objNames[ssapkg] = map[string]bool{}
|
||||
for _, m := range ssapkg.Members {
|
||||
if typ, ok := m.(*ssa.Type); ok {
|
||||
objNames[ssapkg][typ.Name()] = true
|
||||
}
|
||||
}
|
||||
for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
|
||||
objNames[fn.Package()][fn.Name()] = true
|
||||
}
|
||||
|
||||
for _, fn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs {
|
||||
if IsInTest(pass, fn) {
|
||||
// We don't care about malformed error messages in tests;
|
||||
// they're usually for direct human consumption, not part
|
||||
// of an API
|
||||
continue
|
||||
}
|
||||
for _, block := range fn.Blocks {
|
||||
instrLoop:
|
||||
for _, ins := range block.Instrs {
|
||||
call, ok := ins.(*ssa.Call)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if !IsCallTo(call.Common(), "errors.New") && !IsCallTo(call.Common(), "fmt.Errorf") {
|
||||
continue
|
||||
}
|
||||
|
||||
k, ok := call.Common().Args[0].(*ssa.Const)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
s := constant.StringVal(k.Value)
|
||||
if len(s) == 0 {
|
||||
continue
|
||||
}
|
||||
switch s[len(s)-1] {
|
||||
case '.', ':', '!', '\n':
|
||||
pass.Reportf(call.Pos(), "error strings should not end with punctuation or a newline")
|
||||
}
|
||||
idx := strings.IndexByte(s, ' ')
|
||||
if idx == -1 {
|
||||
// single word error message, probably not a real
|
||||
// error but something used in tests or during
|
||||
// debugging
|
||||
continue
|
||||
}
|
||||
word := s[:idx]
|
||||
first, n := utf8.DecodeRuneInString(word)
|
||||
if !unicode.IsUpper(first) {
|
||||
continue
|
||||
}
|
||||
for _, c := range word[n:] {
|
||||
if unicode.IsUpper(c) {
|
||||
// Word is probably an initialism or
|
||||
// multi-word function name
|
||||
continue instrLoop
|
||||
}
|
||||
}
|
||||
|
||||
word = strings.TrimRightFunc(word, func(r rune) bool { return unicode.IsPunct(r) })
|
||||
if objNames[fn.Package()][word] {
|
||||
// Word is probably the name of a function or type in this package
|
||||
continue
|
||||
}
|
||||
// First word in error starts with a capital
|
||||
// letter, and the word doesn't contain any other
|
||||
// capitals, making it unlikely to be an
|
||||
// initialism or multi-word function name.
|
||||
//
|
||||
// It could still be a proper noun, though.
|
||||
|
||||
pass.Reportf(call.Pos(), "error strings should not be capitalized")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckTimeNames(pass *analysis.Pass) (interface{}, error) {
|
||||
suffixes := []string{
|
||||
"Sec", "Secs", "Seconds",
|
||||
"Msec", "Msecs",
|
||||
"Milli", "Millis", "Milliseconds",
|
||||
"Usec", "Usecs", "Microseconds",
|
||||
"MS", "Ms",
|
||||
}
|
||||
fn := func(T types.Type, names []*ast.Ident) {
|
||||
if !IsType(T, "time.Duration") && !IsType(T, "*time.Duration") {
|
||||
return
|
||||
}
|
||||
for _, name := range names {
|
||||
for _, suffix := range suffixes {
|
||||
if strings.HasSuffix(name.Name, suffix) {
|
||||
ReportNodef(pass, name, "var %s is of type %v; don't use unit-specific suffix %q", name.Name, T, suffix)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range pass.Files {
|
||||
ast.Inspect(f, func(node ast.Node) bool {
|
||||
switch node := node.(type) {
|
||||
case *ast.ValueSpec:
|
||||
T := pass.TypesInfo.TypeOf(node.Type)
|
||||
fn(T, node.Names)
|
||||
case *ast.FieldList:
|
||||
for _, field := range node.List {
|
||||
T := pass.TypesInfo.TypeOf(field.Type)
|
||||
fn(T, field.Names)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckErrorVarNames(pass *analysis.Pass) (interface{}, error) {
|
||||
for _, f := range pass.Files {
|
||||
for _, decl := range f.Decls {
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.VAR {
|
||||
continue
|
||||
}
|
||||
for _, spec := range gen.Specs {
|
||||
spec := spec.(*ast.ValueSpec)
|
||||
if len(spec.Names) != len(spec.Values) {
|
||||
continue
|
||||
}
|
||||
|
||||
for i, name := range spec.Names {
|
||||
val := spec.Values[i]
|
||||
if !IsCallToAST(pass, val, "errors.New") && !IsCallToAST(pass, val, "fmt.Errorf") {
|
||||
continue
|
||||
}
|
||||
|
||||
prefix := "err"
|
||||
if name.IsExported() {
|
||||
prefix = "Err"
|
||||
}
|
||||
if !strings.HasPrefix(name.Name, prefix) {
|
||||
ReportNodef(pass, name, "error var %s should have name of the form %sFoo", name.Name, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var httpStatusCodes = map[int]string{
|
||||
100: "StatusContinue",
|
||||
101: "StatusSwitchingProtocols",
|
||||
102: "StatusProcessing",
|
||||
200: "StatusOK",
|
||||
201: "StatusCreated",
|
||||
202: "StatusAccepted",
|
||||
203: "StatusNonAuthoritativeInfo",
|
||||
204: "StatusNoContent",
|
||||
205: "StatusResetContent",
|
||||
206: "StatusPartialContent",
|
||||
207: "StatusMultiStatus",
|
||||
208: "StatusAlreadyReported",
|
||||
226: "StatusIMUsed",
|
||||
300: "StatusMultipleChoices",
|
||||
301: "StatusMovedPermanently",
|
||||
302: "StatusFound",
|
||||
303: "StatusSeeOther",
|
||||
304: "StatusNotModified",
|
||||
305: "StatusUseProxy",
|
||||
307: "StatusTemporaryRedirect",
|
||||
308: "StatusPermanentRedirect",
|
||||
400: "StatusBadRequest",
|
||||
401: "StatusUnauthorized",
|
||||
402: "StatusPaymentRequired",
|
||||
403: "StatusForbidden",
|
||||
404: "StatusNotFound",
|
||||
405: "StatusMethodNotAllowed",
|
||||
406: "StatusNotAcceptable",
|
||||
407: "StatusProxyAuthRequired",
|
||||
408: "StatusRequestTimeout",
|
||||
409: "StatusConflict",
|
||||
410: "StatusGone",
|
||||
411: "StatusLengthRequired",
|
||||
412: "StatusPreconditionFailed",
|
||||
413: "StatusRequestEntityTooLarge",
|
||||
414: "StatusRequestURITooLong",
|
||||
415: "StatusUnsupportedMediaType",
|
||||
416: "StatusRequestedRangeNotSatisfiable",
|
||||
417: "StatusExpectationFailed",
|
||||
418: "StatusTeapot",
|
||||
422: "StatusUnprocessableEntity",
|
||||
423: "StatusLocked",
|
||||
424: "StatusFailedDependency",
|
||||
426: "StatusUpgradeRequired",
|
||||
428: "StatusPreconditionRequired",
|
||||
429: "StatusTooManyRequests",
|
||||
431: "StatusRequestHeaderFieldsTooLarge",
|
||||
451: "StatusUnavailableForLegalReasons",
|
||||
500: "StatusInternalServerError",
|
||||
501: "StatusNotImplemented",
|
||||
502: "StatusBadGateway",
|
||||
503: "StatusServiceUnavailable",
|
||||
504: "StatusGatewayTimeout",
|
||||
505: "StatusHTTPVersionNotSupported",
|
||||
506: "StatusVariantAlsoNegotiates",
|
||||
507: "StatusInsufficientStorage",
|
||||
508: "StatusLoopDetected",
|
||||
510: "StatusNotExtended",
|
||||
511: "StatusNetworkAuthenticationRequired",
|
||||
}
|
||||
|
||||
func CheckHTTPStatusCodes(pass *analysis.Pass) (interface{}, error) {
|
||||
whitelist := map[string]bool{}
|
||||
for _, code := range config.For(pass).HTTPStatusCodeWhitelist {
|
||||
whitelist[code] = true
|
||||
}
|
||||
fn := func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return true
|
||||
}
|
||||
call, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
var arg int
|
||||
switch CallNameAST(pass, call) {
|
||||
case "net/http.Error":
|
||||
arg = 2
|
||||
case "net/http.Redirect":
|
||||
arg = 3
|
||||
case "net/http.StatusText":
|
||||
arg = 0
|
||||
case "net/http.RedirectHandler":
|
||||
arg = 1
|
||||
default:
|
||||
return true
|
||||
}
|
||||
lit, ok := call.Args[arg].(*ast.BasicLit)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
if whitelist[lit.Value] {
|
||||
return true
|
||||
}
|
||||
|
||||
n, err := strconv.Atoi(lit.Value)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
s, ok := httpStatusCodes[n]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
ReportNodefFG(pass, lit, "should use constant http.%s instead of numeric literal %d", s, n)
|
||||
return true
|
||||
}
|
||||
// OPT(dh): replace with inspector
|
||||
for _, f := range pass.Files {
|
||||
ast.Inspect(f, fn)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckDefaultCaseOrder(pass *analysis.Pass) (interface{}, error) {
|
||||
fn := func(node ast.Node) {
|
||||
stmt := node.(*ast.SwitchStmt)
|
||||
list := stmt.Body.List
|
||||
for i, c := range list {
|
||||
if c.(*ast.CaseClause).List == nil && i != 0 && i != len(list)-1 {
|
||||
ReportNodefFG(pass, c, "default case should be first or last in switch statement")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.SwitchStmt)(nil)}, fn)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckYodaConditions(pass *analysis.Pass) (interface{}, error) {
|
||||
fn := func(node ast.Node) {
|
||||
cond := node.(*ast.BinaryExpr)
|
||||
if cond.Op != token.EQL && cond.Op != token.NEQ {
|
||||
return
|
||||
}
|
||||
if _, ok := cond.X.(*ast.BasicLit); !ok {
|
||||
return
|
||||
}
|
||||
if _, ok := cond.Y.(*ast.BasicLit); ok {
|
||||
// Don't flag lit == lit conditions, just in case
|
||||
return
|
||||
}
|
||||
ReportNodefFG(pass, cond, "don't use Yoda conditions")
|
||||
}
|
||||
pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BinaryExpr)(nil)}, fn)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func CheckInvisibleCharacters(pass *analysis.Pass) (interface{}, error) {
|
||||
fn := func(node ast.Node) {
|
||||
lit := node.(*ast.BasicLit)
|
||||
if lit.Kind != token.STRING {
|
||||
return
|
||||
}
|
||||
for _, r := range lit.Value {
|
||||
if unicode.Is(unicode.Cf, r) {
|
||||
ReportNodef(pass, lit, "string literal contains the Unicode format character %U, consider using the %q escape sequence", r, r)
|
||||
} else if unicode.Is(unicode.Cc, r) && r != '\n' && r != '\t' && r != '\r' {
|
||||
ReportNodef(pass, lit, "string literal contains the Unicode control character %U, consider using the %q escape sequence", r, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder([]ast.Node{(*ast.BasicLit)(nil)}, fn)
|
||||
return nil, nil
|
||||
}
|
264
vendor/honnef.co/go/tools/stylecheck/names.go
vendored
Normal file
264
vendor/honnef.co/go/tools/stylecheck/names.go
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
// Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
// Copyright (c) 2018 Dominik Honnef. All rights reserved.
|
||||
|
||||
package stylecheck
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"honnef.co/go/tools/config"
|
||||
. "honnef.co/go/tools/lint/lintdsl"
|
||||
)
|
||||
|
||||
// knownNameExceptions is a set of names that are known to be exempt from naming checks.
|
||||
// This is usually because they are constrained by having to match names in the
|
||||
// standard library.
|
||||
var knownNameExceptions = map[string]bool{
|
||||
"LastInsertId": true, // must match database/sql
|
||||
"kWh": true,
|
||||
}
|
||||
|
||||
func CheckNames(pass *analysis.Pass) (interface{}, error) {
|
||||
// A large part of this function is copied from
|
||||
// github.com/golang/lint, Copyright (c) 2013 The Go Authors,
|
||||
// licensed under the BSD 3-clause license.
|
||||
|
||||
allCaps := func(s string) bool {
|
||||
for _, r := range s {
|
||||
if !((r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '_') {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
check := func(id *ast.Ident, thing string, initialisms map[string]bool) {
|
||||
if id.Name == "_" {
|
||||
return
|
||||
}
|
||||
if knownNameExceptions[id.Name] {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle two common styles from other languages that don't belong in Go.
|
||||
if len(id.Name) >= 5 && allCaps(id.Name) && strings.Contains(id.Name, "_") {
|
||||
ReportfFG(pass, id.Pos(), "should not use ALL_CAPS in Go names; use CamelCase instead")
|
||||
return
|
||||
}
|
||||
|
||||
should := lintName(id.Name, initialisms)
|
||||
if id.Name == should {
|
||||
return
|
||||
}
|
||||
|
||||
if len(id.Name) > 2 && strings.Contains(id.Name[1:len(id.Name)-1], "_") {
|
||||
ReportfFG(pass, id.Pos(), "should not use underscores in Go names; %s %s should be %s", thing, id.Name, should)
|
||||
return
|
||||
}
|
||||
ReportfFG(pass, id.Pos(), "%s %s should be %s", thing, id.Name, should)
|
||||
}
|
||||
checkList := func(fl *ast.FieldList, thing string, initialisms map[string]bool) {
|
||||
if fl == nil {
|
||||
return
|
||||
}
|
||||
for _, f := range fl.List {
|
||||
for _, id := range f.Names {
|
||||
check(id, thing, initialisms)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
il := config.For(pass).Initialisms
|
||||
initialisms := make(map[string]bool, len(il))
|
||||
for _, word := range il {
|
||||
initialisms[word] = true
|
||||
}
|
||||
for _, f := range pass.Files {
|
||||
// Package names need slightly different handling than other names.
|
||||
if !strings.HasSuffix(f.Name.Name, "_test") && strings.Contains(f.Name.Name, "_") {
|
||||
ReportfFG(pass, f.Pos(), "should not use underscores in package names")
|
||||
}
|
||||
if strings.IndexFunc(f.Name.Name, unicode.IsUpper) != -1 {
|
||||
ReportfFG(pass, f.Pos(), "should not use MixedCaps in package name; %s should be %s", f.Name.Name, strings.ToLower(f.Name.Name))
|
||||
}
|
||||
|
||||
ast.Inspect(f, func(node ast.Node) bool {
|
||||
switch v := node.(type) {
|
||||
case *ast.AssignStmt:
|
||||
if v.Tok != token.DEFINE {
|
||||
return true
|
||||
}
|
||||
for _, exp := range v.Lhs {
|
||||
if id, ok := exp.(*ast.Ident); ok {
|
||||
check(id, "var", initialisms)
|
||||
}
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
// Functions with no body are defined elsewhere (in
|
||||
// assembly, or via go:linkname). These are likely to
|
||||
// be something very low level (such as the runtime),
|
||||
// where our rules don't apply.
|
||||
if v.Body == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if IsInTest(pass, v) && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) {
|
||||
return true
|
||||
}
|
||||
|
||||
thing := "func"
|
||||
if v.Recv != nil {
|
||||
thing = "method"
|
||||
}
|
||||
|
||||
if !isTechnicallyExported(v) {
|
||||
check(v.Name, thing, initialisms)
|
||||
}
|
||||
|
||||
checkList(v.Type.Params, thing+" parameter", initialisms)
|
||||
checkList(v.Type.Results, thing+" result", initialisms)
|
||||
case *ast.GenDecl:
|
||||
if v.Tok == token.IMPORT {
|
||||
return true
|
||||
}
|
||||
var thing string
|
||||
switch v.Tok {
|
||||
case token.CONST:
|
||||
thing = "const"
|
||||
case token.TYPE:
|
||||
thing = "type"
|
||||
case token.VAR:
|
||||
thing = "var"
|
||||
}
|
||||
for _, spec := range v.Specs {
|
||||
switch s := spec.(type) {
|
||||
case *ast.TypeSpec:
|
||||
check(s.Name, thing, initialisms)
|
||||
case *ast.ValueSpec:
|
||||
for _, id := range s.Names {
|
||||
check(id, thing, initialisms)
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.InterfaceType:
|
||||
// Do not check interface method names.
|
||||
// They are often constrainted by the method names of concrete types.
|
||||
for _, x := range v.Methods.List {
|
||||
ft, ok := x.Type.(*ast.FuncType)
|
||||
if !ok { // might be an embedded interface name
|
||||
continue
|
||||
}
|
||||
checkList(ft.Params, "interface method parameter", initialisms)
|
||||
checkList(ft.Results, "interface method result", initialisms)
|
||||
}
|
||||
case *ast.RangeStmt:
|
||||
if v.Tok == token.ASSIGN {
|
||||
return true
|
||||
}
|
||||
if id, ok := v.Key.(*ast.Ident); ok {
|
||||
check(id, "range var", initialisms)
|
||||
}
|
||||
if id, ok := v.Value.(*ast.Ident); ok {
|
||||
check(id, "range var", initialisms)
|
||||
}
|
||||
case *ast.StructType:
|
||||
for _, f := range v.Fields.List {
|
||||
for _, id := range f.Names {
|
||||
check(id, "struct field", initialisms)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// lintName returns a different name if it should be different.
|
||||
func lintName(name string, initialisms map[string]bool) (should string) {
|
||||
// A large part of this function is copied from
|
||||
// github.com/golang/lint, Copyright (c) 2013 The Go Authors,
|
||||
// licensed under the BSD 3-clause license.
|
||||
|
||||
// Fast path for simple cases: "_" and all lowercase.
|
||||
if name == "_" {
|
||||
return name
|
||||
}
|
||||
if strings.IndexFunc(name, func(r rune) bool { return !unicode.IsLower(r) }) == -1 {
|
||||
return name
|
||||
}
|
||||
|
||||
// Split camelCase at any lower->upper transition, and split on underscores.
|
||||
// Check each word for common initialisms.
|
||||
runes := []rune(name)
|
||||
w, i := 0, 0 // index of start of word, scan
|
||||
for i+1 <= len(runes) {
|
||||
eow := false // whether we hit the end of a word
|
||||
if i+1 == len(runes) {
|
||||
eow = true
|
||||
} else if runes[i+1] == '_' && i+1 != len(runes)-1 {
|
||||
// underscore; shift the remainder forward over any run of underscores
|
||||
eow = true
|
||||
n := 1
|
||||
for i+n+1 < len(runes) && runes[i+n+1] == '_' {
|
||||
n++
|
||||
}
|
||||
|
||||
// Leave at most one underscore if the underscore is between two digits
|
||||
if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) {
|
||||
n--
|
||||
}
|
||||
|
||||
copy(runes[i+1:], runes[i+n+1:])
|
||||
runes = runes[:len(runes)-n]
|
||||
} else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
|
||||
// lower->non-lower
|
||||
eow = true
|
||||
}
|
||||
i++
|
||||
if !eow {
|
||||
continue
|
||||
}
|
||||
|
||||
// [w,i) is a word.
|
||||
word := string(runes[w:i])
|
||||
if u := strings.ToUpper(word); initialisms[u] {
|
||||
// Keep consistent case, which is lowercase only at the start.
|
||||
if w == 0 && unicode.IsLower(runes[w]) {
|
||||
u = strings.ToLower(u)
|
||||
}
|
||||
// All the common initialisms are ASCII,
|
||||
// so we can replace the bytes exactly.
|
||||
// TODO(dh): this won't be true once we allow custom initialisms
|
||||
copy(runes[w:], []rune(u))
|
||||
} else if w > 0 && strings.ToLower(word) == word {
|
||||
// already all lowercase, and not the first word, so uppercase the first character.
|
||||
runes[w] = unicode.ToUpper(runes[w])
|
||||
}
|
||||
w = i
|
||||
}
|
||||
return string(runes)
|
||||
}
|
||||
|
||||
func isTechnicallyExported(f *ast.FuncDecl) bool {
|
||||
if f.Recv != nil || f.Doc == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
const export = "//export "
|
||||
const linkname = "//go:linkname "
|
||||
for _, c := range f.Doc.List {
|
||||
if strings.HasPrefix(c.Text, export) && len(c.Text) == len(export)+len(f.Name.Name) && c.Text[len(export):] == f.Name.Name {
|
||||
return true
|
||||
}
|
||||
|
||||
if strings.HasPrefix(c.Text, linkname) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
54
vendor/honnef.co/go/tools/unused/edge.go
vendored
Normal file
54
vendor/honnef.co/go/tools/unused/edge.go
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package unused
|
||||
|
||||
//go:generate stringer -type edgeKind
|
||||
type edgeKind uint64
|
||||
|
||||
func (e edgeKind) is(o edgeKind) bool {
|
||||
return e&o != 0
|
||||
}
|
||||
|
||||
const (
|
||||
edgeAlias edgeKind = 1 << iota
|
||||
edgeBlankField
|
||||
edgeAnonymousStruct
|
||||
edgeCgoExported
|
||||
edgeConstGroup
|
||||
edgeElementType
|
||||
edgeEmbeddedInterface
|
||||
edgeExportedConstant
|
||||
edgeExportedField
|
||||
edgeExportedFunction
|
||||
edgeExportedMethod
|
||||
edgeExportedType
|
||||
edgeExportedVariable
|
||||
edgeExtendsExportedFields
|
||||
edgeExtendsExportedMethodSet
|
||||
edgeFieldAccess
|
||||
edgeFunctionArgument
|
||||
edgeFunctionResult
|
||||
edgeFunctionSignature
|
||||
edgeImplements
|
||||
edgeInstructionOperand
|
||||
edgeInterfaceCall
|
||||
edgeInterfaceMethod
|
||||
edgeKeyType
|
||||
edgeLinkname
|
||||
edgeMainFunction
|
||||
edgeNamedType
|
||||
edgeNetRPCRegister
|
||||
edgeNoCopySentinel
|
||||
edgeProvidesMethod
|
||||
edgeReceiver
|
||||
edgeRuntimeFunction
|
||||
edgeSignature
|
||||
edgeStructConversion
|
||||
edgeTestSink
|
||||
edgeTupleElement
|
||||
edgeType
|
||||
edgeTypeName
|
||||
edgeUnderlyingType
|
||||
edgePointerType
|
||||
edgeUnsafeConversion
|
||||
edgeUsedConstant
|
||||
edgeVarDecl
|
||||
)
|
109
vendor/honnef.co/go/tools/unused/edgekind_string.go
vendored
Normal file
109
vendor/honnef.co/go/tools/unused/edgekind_string.go
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
// Code generated by "stringer -type edgeKind"; DO NOT EDIT.
|
||||
|
||||
package unused
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[edgeAlias-1]
|
||||
_ = x[edgeBlankField-2]
|
||||
_ = x[edgeAnonymousStruct-4]
|
||||
_ = x[edgeCgoExported-8]
|
||||
_ = x[edgeConstGroup-16]
|
||||
_ = x[edgeElementType-32]
|
||||
_ = x[edgeEmbeddedInterface-64]
|
||||
_ = x[edgeExportedConstant-128]
|
||||
_ = x[edgeExportedField-256]
|
||||
_ = x[edgeExportedFunction-512]
|
||||
_ = x[edgeExportedMethod-1024]
|
||||
_ = x[edgeExportedType-2048]
|
||||
_ = x[edgeExportedVariable-4096]
|
||||
_ = x[edgeExtendsExportedFields-8192]
|
||||
_ = x[edgeExtendsExportedMethodSet-16384]
|
||||
_ = x[edgeFieldAccess-32768]
|
||||
_ = x[edgeFunctionArgument-65536]
|
||||
_ = x[edgeFunctionResult-131072]
|
||||
_ = x[edgeFunctionSignature-262144]
|
||||
_ = x[edgeImplements-524288]
|
||||
_ = x[edgeInstructionOperand-1048576]
|
||||
_ = x[edgeInterfaceCall-2097152]
|
||||
_ = x[edgeInterfaceMethod-4194304]
|
||||
_ = x[edgeKeyType-8388608]
|
||||
_ = x[edgeLinkname-16777216]
|
||||
_ = x[edgeMainFunction-33554432]
|
||||
_ = x[edgeNamedType-67108864]
|
||||
_ = x[edgeNetRPCRegister-134217728]
|
||||
_ = x[edgeNoCopySentinel-268435456]
|
||||
_ = x[edgeProvidesMethod-536870912]
|
||||
_ = x[edgeReceiver-1073741824]
|
||||
_ = x[edgeRuntimeFunction-2147483648]
|
||||
_ = x[edgeSignature-4294967296]
|
||||
_ = x[edgeStructConversion-8589934592]
|
||||
_ = x[edgeTestSink-17179869184]
|
||||
_ = x[edgeTupleElement-34359738368]
|
||||
_ = x[edgeType-68719476736]
|
||||
_ = x[edgeTypeName-137438953472]
|
||||
_ = x[edgeUnderlyingType-274877906944]
|
||||
_ = x[edgePointerType-549755813888]
|
||||
_ = x[edgeUnsafeConversion-1099511627776]
|
||||
_ = x[edgeUsedConstant-2199023255552]
|
||||
_ = x[edgeVarDecl-4398046511104]
|
||||
}
|
||||
|
||||
const _edgeKind_name = "edgeAliasedgeBlankFieldedgeAnonymousStructedgeCgoExportededgeConstGroupedgeElementTypeedgeEmbeddedInterfaceedgeExportedConstantedgeExportedFieldedgeExportedFunctionedgeExportedMethodedgeExportedTypeedgeExportedVariableedgeExtendsExportedFieldsedgeExtendsExportedMethodSetedgeFieldAccessedgeFunctionArgumentedgeFunctionResultedgeFunctionSignatureedgeImplementsedgeInstructionOperandedgeInterfaceCalledgeInterfaceMethodedgeKeyTypeedgeLinknameedgeMainFunctionedgeNamedTypeedgeNetRPCRegisteredgeNoCopySentineledgeProvidesMethodedgeReceiveredgeRuntimeFunctionedgeSignatureedgeStructConversionedgeTestSinkedgeTupleElementedgeTypeedgeTypeNameedgeUnderlyingTypeedgePointerTypeedgeUnsafeConversionedgeUsedConstantedgeVarDecl"
|
||||
|
||||
var _edgeKind_map = map[edgeKind]string{
|
||||
1: _edgeKind_name[0:9],
|
||||
2: _edgeKind_name[9:23],
|
||||
4: _edgeKind_name[23:42],
|
||||
8: _edgeKind_name[42:57],
|
||||
16: _edgeKind_name[57:71],
|
||||
32: _edgeKind_name[71:86],
|
||||
64: _edgeKind_name[86:107],
|
||||
128: _edgeKind_name[107:127],
|
||||
256: _edgeKind_name[127:144],
|
||||
512: _edgeKind_name[144:164],
|
||||
1024: _edgeKind_name[164:182],
|
||||
2048: _edgeKind_name[182:198],
|
||||
4096: _edgeKind_name[198:218],
|
||||
8192: _edgeKind_name[218:243],
|
||||
16384: _edgeKind_name[243:271],
|
||||
32768: _edgeKind_name[271:286],
|
||||
65536: _edgeKind_name[286:306],
|
||||
131072: _edgeKind_name[306:324],
|
||||
262144: _edgeKind_name[324:345],
|
||||
524288: _edgeKind_name[345:359],
|
||||
1048576: _edgeKind_name[359:381],
|
||||
2097152: _edgeKind_name[381:398],
|
||||
4194304: _edgeKind_name[398:417],
|
||||
8388608: _edgeKind_name[417:428],
|
||||
16777216: _edgeKind_name[428:440],
|
||||
33554432: _edgeKind_name[440:456],
|
||||
67108864: _edgeKind_name[456:469],
|
||||
134217728: _edgeKind_name[469:487],
|
||||
268435456: _edgeKind_name[487:505],
|
||||
536870912: _edgeKind_name[505:523],
|
||||
1073741824: _edgeKind_name[523:535],
|
||||
2147483648: _edgeKind_name[535:554],
|
||||
4294967296: _edgeKind_name[554:567],
|
||||
8589934592: _edgeKind_name[567:587],
|
||||
17179869184: _edgeKind_name[587:599],
|
||||
34359738368: _edgeKind_name[599:615],
|
||||
68719476736: _edgeKind_name[615:623],
|
||||
137438953472: _edgeKind_name[623:635],
|
||||
274877906944: _edgeKind_name[635:653],
|
||||
549755813888: _edgeKind_name[653:668],
|
||||
1099511627776: _edgeKind_name[668:688],
|
||||
2199023255552: _edgeKind_name[688:704],
|
||||
4398046511104: _edgeKind_name[704:715],
|
||||
}
|
||||
|
||||
func (i edgeKind) String() string {
|
||||
if str, ok := _edgeKind_map[i]; ok {
|
||||
return str
|
||||
}
|
||||
return "edgeKind(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
82
vendor/honnef.co/go/tools/unused/implements.go
vendored
Normal file
82
vendor/honnef.co/go/tools/unused/implements.go
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package unused
|
||||
|
||||
import "go/types"
|
||||
|
||||
// lookupMethod returns the index of and method with matching package and name, or (-1, nil).
|
||||
func lookupMethod(T *types.Interface, pkg *types.Package, name string) (int, *types.Func) {
|
||||
if name != "_" {
|
||||
for i := 0; i < T.NumMethods(); i++ {
|
||||
m := T.Method(i)
|
||||
if sameId(m, pkg, name) {
|
||||
return i, m
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func sameId(obj types.Object, pkg *types.Package, name string) bool {
|
||||
// spec:
|
||||
// "Two identifiers are different if they are spelled differently,
|
||||
// or if they appear in different packages and are not exported.
|
||||
// Otherwise, they are the same."
|
||||
if name != obj.Name() {
|
||||
return false
|
||||
}
|
||||
// obj.Name == name
|
||||
if obj.Exported() {
|
||||
return true
|
||||
}
|
||||
// not exported, so packages must be the same (pkg == nil for
|
||||
// fields in Universe scope; this can only happen for types
|
||||
// introduced via Eval)
|
||||
if pkg == nil || obj.Pkg() == nil {
|
||||
return pkg == obj.Pkg()
|
||||
}
|
||||
// pkg != nil && obj.pkg != nil
|
||||
return pkg.Path() == obj.Pkg().Path()
|
||||
}
|
||||
|
||||
func (g *Graph) implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) {
|
||||
// fast path for common case
|
||||
if T.Empty() {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
if ityp, _ := V.Underlying().(*types.Interface); ityp != nil {
|
||||
// TODO(dh): is this code reachable?
|
||||
for i := 0; i < T.NumMethods(); i++ {
|
||||
m := T.Method(i)
|
||||
_, obj := lookupMethod(ityp, m.Pkg(), m.Name())
|
||||
switch {
|
||||
case obj == nil:
|
||||
return nil, false
|
||||
case !types.Identical(obj.Type(), m.Type()):
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
return nil, true
|
||||
}
|
||||
|
||||
// A concrete type implements T if it implements all methods of T.
|
||||
var sels []*types.Selection
|
||||
for i := 0; i < T.NumMethods(); i++ {
|
||||
m := T.Method(i)
|
||||
sel := msV.Lookup(m.Pkg(), m.Name())
|
||||
if sel == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
f, _ := sel.Obj().(*types.Func)
|
||||
if f == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if !types.Identical(f.Type(), m.Type()) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
sels = append(sels, sel)
|
||||
}
|
||||
return sels, true
|
||||
}
|
1964
vendor/honnef.co/go/tools/unused/unused.go
vendored
Normal file
1964
vendor/honnef.co/go/tools/unused/unused.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
46
vendor/honnef.co/go/tools/version/buildinfo.go
vendored
Normal file
46
vendor/honnef.co/go/tools/version/buildinfo.go
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// +build go1.12
|
||||
|
||||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
func printBuildInfo() {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
fmt.Println("Main module:")
|
||||
printModule(&info.Main)
|
||||
fmt.Println("Dependencies:")
|
||||
for _, dep := range info.Deps {
|
||||
printModule(dep)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Built without Go modules")
|
||||
}
|
||||
}
|
||||
|
||||
func buildInfoVersion() (string, bool) {
|
||||
info, ok := debug.ReadBuildInfo()
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if info.Main.Version == "(devel)" {
|
||||
return "", false
|
||||
}
|
||||
return info.Main.Version, true
|
||||
}
|
||||
|
||||
func printModule(m *debug.Module) {
|
||||
fmt.Printf("\t%s", m.Path)
|
||||
if m.Version != "(devel)" {
|
||||
fmt.Printf("@%s", m.Version)
|
||||
}
|
||||
if m.Sum != "" {
|
||||
fmt.Printf(" (sum: %s)", m.Sum)
|
||||
}
|
||||
if m.Replace != nil {
|
||||
fmt.Printf(" (replace: %s)", m.Replace.Path)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
6
vendor/honnef.co/go/tools/version/buildinfo111.go
vendored
Normal file
6
vendor/honnef.co/go/tools/version/buildinfo111.go
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
// +build !go1.12
|
||||
|
||||
package version
|
||||
|
||||
func printBuildInfo() {}
|
||||
func buildInfoVersion() (string, bool) { return "", false }
|
42
vendor/honnef.co/go/tools/version/version.go
vendored
Normal file
42
vendor/honnef.co/go/tools/version/version.go
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const Version = "2019.2.3"
|
||||
|
||||
// version returns a version descriptor and reports whether the
|
||||
// version is a known release.
|
||||
func version() (string, bool) {
|
||||
if Version != "devel" {
|
||||
return Version, true
|
||||
}
|
||||
v, ok := buildInfoVersion()
|
||||
if ok {
|
||||
return v, false
|
||||
}
|
||||
return "devel", false
|
||||
}
|
||||
|
||||
func Print() {
|
||||
v, release := version()
|
||||
|
||||
if release {
|
||||
fmt.Printf("%s %s\n", filepath.Base(os.Args[0]), v)
|
||||
} else if v == "devel" {
|
||||
fmt.Printf("%s (no version)\n", filepath.Base(os.Args[0]))
|
||||
} else {
|
||||
fmt.Printf("%s (devel, %s)\n", filepath.Base(os.Args[0]), v)
|
||||
}
|
||||
}
|
||||
|
||||
func Verbose() {
|
||||
Print()
|
||||
fmt.Println()
|
||||
fmt.Println("Compiled with Go version:", runtime.Version())
|
||||
printBuildInfo()
|
||||
}
|
Reference in New Issue
Block a user