Enable all static-checks in golangci-lint

* Enable all static-checks in golangci-lint
* Update golangci-lint version
* Fix issue found in golangci-lint

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2019-06-10 12:18:41 +05:30
parent 47d4e44af0
commit a38986fce0
21 changed files with 85 additions and 87 deletions

View File

@ -21,7 +21,7 @@ go: 1.11.x
env:
global:
- GOLANGCI_VERSION="v1.16.0"
- GOLANGCI_VERSION="v1.17.0"
- TEST_COVERAGE=stdout
- GO_METALINTER_THREADS=1
- GO_COVER_DIR=_output

View File

@ -63,7 +63,7 @@ func init() {
}
func getType() string {
if vtype == nil || len(*vtype) == 0 {
if vtype == nil || *vtype == "" {
a0 := path.Base(os.Args[0])
if strings.Contains(a0, rbdType) {
return rbdType
@ -78,7 +78,7 @@ func getType() string {
func getDriverName() string {
// was explicitly passed a driver name
if driverName != nil && len(*driverName) != 0 {
if driverName != nil && *driverName != "" {
return *driverName
}
// select driver name based on volume type
@ -96,7 +96,7 @@ func main() {
var cp util.CachePersister
driverType := getType()
if len(driverType) == 0 {
if driverType == "" {
klog.Fatalln("driver type not specified")
}
@ -108,12 +108,12 @@ func main() {
klog.Infof("Starting driver type: %v with name: %v", driverType, dname)
switch driverType {
case rbdType:
rbd.PluginFolder = rbd.PluginFolder + dname
rbd.PluginFolder += dname
driver := rbd.NewDriver()
driver.Run(dname, *nodeID, *endpoint, *instanceID, *containerized)
case cephfsType:
cephfs.PluginFolder = cephfs.PluginFolder + dname
cephfs.PluginFolder += dname
if *metadataStorage != "" {
cp, err = util.CreatePersistanceStorage(
cephfs.PluginFolder, *metadataStorage, dname)

View File

@ -20,17 +20,17 @@ var (
)
func deployCephfsPlugin() {
//deploy provisioner
// deploy provisioner
framework.RunKubectlOrDie("create", "-f", cephfsDirPath+cephfsProvisioner)
framework.RunKubectlOrDie("apply", "-f", cephfsDirPath+cephfsProvisionerRBAC)
//deploy nodeplugin
// deploy nodeplugin
framework.RunKubectlOrDie("create", "-f", cephfsDirPath+cephfsNodePlugin)
framework.RunKubectlOrDie("apply", "-f", cephfsDirPath+cephfsNodePluginRBAC)
}
var _ = Describe("cephfs", func() {
f := framework.NewDefaultFramework("cephfs")
//deploy cephfs CSI
// deploy cephfs CSI
BeforeEach(func() {
createFileSystem(f.ClientSet)
createConfigMap(f.ClientSet, f)

View File

@ -36,7 +36,7 @@ func deployCommon() {
func createFileSystem(c kubernetes.Interface) {
commonPath := fmt.Sprintf("%s/%s", rookURL, "filesystem-test.yaml")
framework.RunKubectlOrDie("create", "-f", commonPath)
opt := metav1.ListOptions{
opt := &metav1.ListOptions{
LabelSelector: "app=rook-ceph-mds",
}
err := checkCephPods(rookNS, c, 1, deployTimeout, opt)
@ -72,7 +72,7 @@ func deployOperator(c kubernetes.Interface) {
func deployCluster(c kubernetes.Interface) {
opPath := fmt.Sprintf("%s/%s", rookURL, "cluster-test.yaml")
framework.RunKubectlOrDie("create", "-f", opPath)
opt := metav1.ListOptions{
opt := &metav1.ListOptions{
LabelSelector: "app=rook-ceph-mon",
}
err := checkCephPods(rookNS, c, 1, deployTimeout, opt)
@ -82,7 +82,7 @@ func deployCluster(c kubernetes.Interface) {
func deployToolBox(c kubernetes.Interface) {
opPath := fmt.Sprintf("%s/%s", rookURL, "toolbox.yaml")
framework.RunKubectlOrDie("create", "-f", opPath)
opt := metav1.ListOptions{
opt := &metav1.ListOptions{
LabelSelector: "app=rook-ceph-tools",
}
@ -106,7 +106,7 @@ func tearDownRook() {
framework.Cleanup(opPath, rookNS, "app=rook-ceph-tools")
opPath = fmt.Sprintf("%s/%s", rookURL, "operator.yaml")
//TODO need to add selector for cleanup validation
// TODO need to add selector for cleanup validation
framework.Cleanup(opPath, rookNS)
commonPath := fmt.Sprintf("%s/%s", rookURL, "common.yaml")
framework.RunKubectlOrDie("delete", "-f", commonPath)

View File

@ -32,14 +32,14 @@ func init() {
fmt.Println("timeout for deploytimeout ", deployTimeout)
}
//BeforeSuite deploys the rook-operator and ceph cluster
// BeforeSuite deploys the rook-operator and ceph cluster
var _ = BeforeSuite(func() {
if rookRequired {
deployRook()
}
})
//AfterSuite removes the rook-operator and ceph cluster
// AfterSuite removes the rook-operator and ceph cluster
var _ = AfterSuite(func() {
if rookRequired {
tearDownRook()

View File

@ -24,14 +24,14 @@ func deployRBDPlugin() {
// deploy provisioner
framework.RunKubectlOrDie("create", "-f", rbdDirPath+rbdProvisioner)
framework.RunKubectlOrDie("apply", "-f", rbdDirPath+rbdProvisionerRBAC)
//deploy nodeplugin
// deploy nodeplugin
framework.RunKubectlOrDie("create", "-f", rbdDirPath+rbdNodePlugin)
framework.RunKubectlOrDie("apply", "-f", rbdDirPath+rbdNodePluginRBAC)
}
var _ = Describe("RBD", func() {
f := framework.NewDefaultFramework("rbd")
//deploy RBD CSI
// deploy RBD CSI
BeforeEach(func() {
createRBDPool()
createConfigMap(f.ClientSet, f)

View File

@ -80,7 +80,7 @@ func waitForDeploymentComplete(name, ns string, c clientset.Interface, t int) er
return false, err
}
//TODO need to check rolling update
// TODO need to check rolling update
// When the deployment status and its underlying resources reach the
// desired state, we're done
@ -137,8 +137,8 @@ func getMons(ns string, c kubernetes.Interface) []string {
svcList, err := c.CoreV1().Services(ns).List(opt)
Expect(err).Should(BeNil())
services := make([]string, 0)
for _, svc := range svcList.Items {
s := fmt.Sprintf("%s.%s.svc.cluster.local:%d", svc.Name, svc.Namespace, svc.Spec.Ports[0].Port)
for i := range svcList.Items {
s := fmt.Sprintf("%s.%s.svc.cluster.local:%d", svcList.Items[i].Name, svcList.Items[i].Namespace, svcList.Items[i].Spec.Ports[0].Port)
services = append(services, s)
}
return services
@ -160,7 +160,7 @@ func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework) {
sc.Parameters["pool"] = "myfs-data0"
sc.Parameters["fsName"] = "myfs"
fsID := execCommandInToolBox(f, "ceph fsid")
//remove new line present in fsID
// remove new line present in fsID
fsID = strings.Trim(fsID, "\n")
sc.Parameters["clusterID"] = fsID
@ -175,7 +175,7 @@ func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework) {
sc.Parameters["pool"] = "replicapool"
fsID := execCommandInToolBox(f, "ceph fsid")
//remove new line present in fsID
// remove new line present in fsID
fsID = strings.Trim(fsID, "\n")
sc.Parameters["clusterID"] = fsID
@ -190,9 +190,9 @@ func createConfigMap(c kubernetes.Interface, f *framework.Framework) {
Expect(err).Should(BeNil())
fsID := execCommandInToolBox(f, "ceph fsid")
//remove new line present in fsID
// remove new line present in fsID
fsID = strings.Trim(fsID, "\n")
//get mon list
// get mon list
mons := getMons(rookNS, c)
conmap := []struct {
Clusterid string `json:"clusterID"`
@ -213,7 +213,7 @@ func createConfigMap(c kubernetes.Interface, f *framework.Framework) {
func getSecret(path string) v1.Secret {
sc := v1.Secret{}
err := unmarshal(path, &sc)
//discard corruptInputError
// discard corruptInputError
if err != nil {
if _, ok := err.(base64.CorruptInputError); !ok {
Expect(err).Should(BeNil())
@ -367,11 +367,11 @@ func createApp(c kubernetes.Interface, app *v1.Pod, timeout int) error {
return waitForPodInRunningState(app.Name, app.Namespace, c, timeout)
}
func getPodName(ns string, c kubernetes.Interface, opt metav1.ListOptions) string {
func getPodName(ns string, c kubernetes.Interface, opt *metav1.ListOptions) string {
ticker := time.NewTicker(1 * time.Second)
//TODO add stop logic
// TODO add stop logic
for range ticker.C {
podList, err := c.CoreV1().Pods(ns).List(opt)
podList, err := c.CoreV1().Pods(ns).List(*opt)
framework.ExpectNoError(err)
Expect(podList.Items).NotTo(BeNil())
Expect(err).Should(BeNil())
@ -439,12 +439,12 @@ func unmarshal(fileName string, obj interface{}) error {
return err
}
func checkCephPods(ns string, c kubernetes.Interface, count int, t int, opt metav1.ListOptions) error {
func checkCephPods(ns string, c kubernetes.Interface, count, t int, opt *metav1.ListOptions) error {
timeout := time.Duration(t) * time.Minute
start := time.Now()
return wait.PollImmediate(poll, timeout, func() (bool, error) {
podList, err := c.CoreV1().Pods(ns).List(opt)
podList, err := c.CoreV1().Pods(ns).List(*opt)
if err != nil {
return false, err
}

View File

@ -139,7 +139,7 @@ func (fs *Driver) Run(driverName, nodeID, endpoint, volumeMounter, mountCacheDir
if mountCacheDir != "" {
if err := remountCachedVolumes(); err != nil {
klog.Warningf("failed to remount cached volumes: %v", err)
//ignore remount fail
// ignore remount fail
}
}
// Initialize default library driver

View File

@ -34,7 +34,7 @@ var (
volumeMountCacheMtx sync.Mutex
)
func initVolumeMountCache(driverName string, mountCacheDir string) {
func initVolumeMountCache(driverName, mountCacheDir string) {
volumeMountCache.volumes = make(map[string]volumeMountCacheEntry)
volumeMountCache.nodeCacheStore.BasePath = mountCacheDir
@ -159,7 +159,7 @@ func cleanupMountPoint(mountPoint string) error {
err := execCommandErr("umount", mountPoint)
if err != nil {
klog.Infof("mount-cache: failed to umount %s %v", mountPoint, err)
//ignore error return err
// ignore error return err
}
}
}
@ -201,7 +201,7 @@ func genVolumeMountCacheFileName(volID string) string {
return cachePath
}
func (mc *volumeMountCacheMap) isEnable() bool {
//if mount cache dir unset, disable state
// if mount cache dir unset, disable state
return mc.nodeCacheStore.BasePath != ""
}
@ -246,7 +246,7 @@ func (mc *volumeMountCacheMap) nodeUnStageVolume(volID string) error {
return mc.nodeCacheStore.Delete(genVolumeMountCacheFileName(volID))
}
func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string, readOnly bool) error {
func (mc *volumeMountCacheMap) nodePublishVolume(volID, targetPath string, readOnly bool) error {
if !mc.isEnable() {
return nil
}
@ -261,7 +261,7 @@ func (mc *volumeMountCacheMap) nodePublishVolume(volID string, targetPath string
return mc.updateNodeCache(volID)
}
func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID string, targetPath string) error {
func (mc *volumeMountCacheMap) nodeUnPublishVolume(volID, targetPath string) error {
if !mc.isEnable() {
return nil
}

View File

@ -178,7 +178,7 @@ func mountKernel(mountPoint string, cr *credentials, volOptions *volumeOptions)
}
optionsStr := fmt.Sprintf("name=%s,secret=%s", cr.id, cr.key)
if volOptions.FsName != "" {
optionsStr = optionsStr + fmt.Sprintf(",mds_namespace=%s", volOptions.FsName)
optionsStr += fmt.Sprintf(",mds_namespace=%s", volOptions.FsName)
}
args = append(args, "-o", optionsStr)
@ -202,7 +202,7 @@ func bindMount(from, to string, readOnly bool, mntOptions []string) error {
}
if readOnly {
mntOptionSli = mntOptionSli + ",remount"
mntOptionSli += ",remount"
if err := execCommandErr("mount", "-o", mntOptionSli, to); err != nil {
return fmt.Errorf("failed read-only remount of %s: %v", to, err)
}

View File

@ -39,7 +39,7 @@ func (cs *DefaultControllerServer) ControllerUnpublishVolume(ctx context.Context
return nil, status.Error(codes.Unimplemented, "")
}
//ControllerExpandVolume expand volume
// ControllerExpandVolume expand volume
func (cs *DefaultControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

View File

@ -37,7 +37,7 @@ type CSIDriver struct {
// NewCSIDriver Creates a NewCSIDriver object. Assumes vendor
// version is equal to driver version & does not support optional
// driver plugin info manifest field. Refer to CSI spec for more details.
func NewCSIDriver(name string, v string, nodeID string) *CSIDriver {
func NewCSIDriver(name, v, nodeID string) *CSIDriver {
if name == "" {
klog.Errorf("Driver name missing")
return nil
@ -48,7 +48,7 @@ func NewCSIDriver(name string, v string, nodeID string) *CSIDriver {
return nil
}
// TODO version format and validation
if len(v) == 0 {
if v == "" {
klog.Errorf("Version argument missing")
return nil
}

View File

@ -64,11 +64,11 @@ func NewDefaultControllerServer(d *CSIDriver) *DefaultControllerServer {
}
// NewControllerServiceCapability returns controller capabilities
func NewControllerServiceCapability(cap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability {
func NewControllerServiceCapability(ctrlCap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability {
return &csi.ControllerServiceCapability{
Type: &csi.ControllerServiceCapability_Rpc{
Rpc: &csi.ControllerServiceCapability_RPC{
Type: cap,
Type: ctrlCap,
},
},
}

View File

@ -46,17 +46,17 @@ func (cs *ControllerServer) validateVolumeReq(req *csi.CreateVolumeRequest) erro
return err
}
// Check sanity of request Name, Volume Capabilities
if len(req.Name) == 0 {
if req.Name == "" {
return status.Error(codes.InvalidArgument, "volume Name cannot be empty")
}
if req.VolumeCapabilities == nil {
return status.Error(codes.InvalidArgument, "volume Capabilities cannot be empty")
}
options := req.GetParameters()
if value, ok := options["clusterID"]; !ok || len(value) == 0 {
if value, ok := options["clusterID"]; !ok || value == "" {
return status.Error(codes.InvalidArgument, "missing or empty cluster ID to provision volume from")
}
if value, ok := options["pool"]; !ok || len(value) == 0 {
if value, ok := options["pool"]; !ok || value == "" {
return status.Error(codes.InvalidArgument, "missing or empty pool name to provision volume from")
}
return nil
@ -194,7 +194,7 @@ func (cs *ControllerServer) checkSnapshot(req *csi.CreateVolumeRequest, rbdVol *
}
snapshotID := snapshot.GetSnapshotId()
if len(snapshotID) == 0 {
if snapshotID == "" {
return status.Error(codes.InvalidArgument, "volume Snapshot ID cannot be empty")
}
@ -401,10 +401,10 @@ func (cs *ControllerServer) validateSnapshotReq(req *csi.CreateSnapshotRequest)
}
// Check sanity of request Snapshot Name, Source Volume Id
if len(req.Name) == 0 {
if req.Name == "" {
return status.Error(codes.InvalidArgument, "snapshot Name cannot be empty")
}
if len(req.SourceVolumeId) == 0 {
if req.SourceVolumeId == "" {
return status.Error(codes.InvalidArgument, "source Volume ID cannot be empty")
}
@ -458,7 +458,7 @@ func (cs *ControllerServer) doSnapshot(rbdSnap *rbdSnapshot, secret map[string]s
}
// DeleteSnapshot deletes the snapshot in backend and removes the
//snapshot metadata from store
// snapshot metadata from store
func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
klog.Warningf("invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
@ -466,7 +466,7 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
}
snapshotID := req.GetSnapshotId()
if len(snapshotID) == 0 {
if snapshotID == "" {
return nil, status.Error(codes.InvalidArgument, "snapshot ID cannot be empty")
}

View File

@ -50,7 +50,7 @@ func init() {
}
// Search /sys/bus for rbd device that matches given pool and image.
func getRbdDevFromImageAndPool(pool string, image string) (string, bool) {
func getRbdDevFromImageAndPool(pool, image string) (string, bool) {
// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
sysPath := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sysPath); err == nil {
@ -123,7 +123,7 @@ func getMaxNbds() (int, error) {
// but older versions of list-mapped don't.
// The implementation below peeks at the command line of nbd bound processes
// to figure out any mapped images.
func getNbdDevFromImageAndPool(pool string, image string) (string, bool) {
func getNbdDevFromImageAndPool(pool, image string) (string, bool) {
// nbd module exports the pid of serving process in sysfs
basePath := "/sys/block/nbd"
// Do not change imgPath format - some tools like rbd-nbd are strict about it.

View File

@ -55,7 +55,7 @@ type cephStoragePoolSummary struct {
// GetPoolID searches a list of pools in a cluster and returns the ID of the pool that matches
// the passed in poolName parameter
func GetPoolID(monitors string, adminID string, key string, poolName string) (int64, error) {
func GetPoolID(monitors, adminID, key, poolName string) (int64, error) {
// ceph <options> -f json osd lspools
// JSON out: [{"poolnum":<int64>,"poolname":<string>}]
@ -90,7 +90,7 @@ func GetPoolID(monitors string, adminID string, key string, poolName string) (in
// GetPoolName lists all pools in a ceph cluster, and matches the pool whose pool ID is equal to
// the requested poolID parameter
func GetPoolName(monitors string, adminID string, key string, poolID int64) (string, error) {
func GetPoolName(monitors, adminID, key string, poolID int64) (string, error) {
// ceph <options> -f json osd lspools
// [{"poolnum":1,"poolname":"replicapool"}]

View File

@ -92,7 +92,7 @@ func (k8scm *K8sCMCache) getMetadataCM(resourceID string) (*v1.ConfigMap, error)
return cm, nil
}
//ForAll list the metadata in configmaps and filters outs based on the pattern
// ForAll list the metadata in configmaps and filters outs based on the pattern
func (k8scm *K8sCMCache) ForAll(pattern string, destObj interface{}, f ForAllFunc) error {
listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", csiMetadataLabelAttr, cmLabel)}
cms, err := k8scm.Client.CoreV1().ConfigMaps(k8scm.Namespace).List(listOpts)
@ -100,9 +100,9 @@ func (k8scm *K8sCMCache) ForAll(pattern string, destObj interface{}, f ForAllFun
return errors.Wrap(err, "k8s-cm-cache: failed to list metadata configmaps")
}
for _, cm := range cms.Items {
data := cm.Data[cmDataKey]
match, err := regexp.MatchString(pattern, cm.ObjectMeta.Name)
for i := range cms.Items {
data := cms.Items[i].Data[cmDataKey]
match, err := regexp.MatchString(pattern, cms.Items[i].ObjectMeta.Name)
if err != nil {
continue
}
@ -110,9 +110,9 @@ func (k8scm *K8sCMCache) ForAll(pattern string, destObj interface{}, f ForAllFun
continue
}
if err = json.Unmarshal([]byte(data), destObj); err != nil {
return errors.Wrapf(err, "k8s-cm-cache: JSON unmarshaling failed for configmap %s", cm.ObjectMeta.Name)
return errors.Wrapf(err, "k8s-cm-cache: JSON unmarshaling failed for configmap %s", cms.Items[i].ObjectMeta.Name)
}
if err = f(cm.ObjectMeta.Name); err != nil {
if err = f(cms.Items[i].ObjectMeta.Name); err != nil {
return err
}
}

View File

@ -49,7 +49,7 @@ func (nc *NodeCache) EnsureCacheDirectory(cacheDir string) error {
return nil
}
//ForAll list the metadata in Nodecache and filters outs based on the pattern
// ForAll list the metadata in Nodecache and filters outs based on the pattern
func (nc *NodeCache) ForAll(pattern string, destObj interface{}, f ForAllFunc) error {
err := nc.EnsureCacheDirectory(nc.CacheDir)
if err != nil {
@ -59,9 +59,9 @@ func (nc *NodeCache) ForAll(pattern string, destObj interface{}, f ForAllFunc) e
if err != nil {
return errors.Wrapf(err, "node-cache: failed to read %s folder", nc.BasePath)
}
path := path.Join(nc.BasePath, nc.CacheDir)
cachePath := path.Join(nc.BasePath, nc.CacheDir)
for _, file := range files {
err = decodeObj(path, pattern, file, destObj)
err = decodeObj(cachePath, pattern, file, destObj)
if err == errDec {
continue
} else if err == nil {
@ -75,7 +75,7 @@ func (nc *NodeCache) ForAll(pattern string, destObj interface{}, f ForAllFunc) e
return nil
}
func decodeObj(filepath, pattern string, file os.FileInfo, destObj interface{}) error {
func decodeObj(fpath, pattern string, file os.FileInfo, destObj interface{}) error {
match, err := regexp.MatchString(pattern, file.Name())
if err != nil || !match {
return errDec
@ -84,7 +84,7 @@ func decodeObj(filepath, pattern string, file os.FileInfo, destObj interface{})
return errDec
}
// #nosec
fp, err := os.Open(path.Join(filepath, file.Name()))
fp, err := os.Open(path.Join(fpath, file.Name()))
if err != nil {
klog.Infof("node-cache: open file: %s err %v", file.Name(), err)
return errDec

View File

@ -39,7 +39,7 @@ func RoundUpToMiB(size int64) int64 {
return roundUpSize(requestBytes, MiB)
}
func roundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
func roundUpSize(volumeSizeBytes, allocationUnitBytes int64) int64 {
roundedUp := volumeSizeBytes / allocationUnitBytes
if volumeSizeBytes%allocationUnitBytes > 0 {
roundedUp++
@ -74,7 +74,7 @@ func createPersistentStorage(persistentStoragePath string) error {
// ValidateDriverName validates the driver name
func ValidateDriverName(driverName string) error {
if len(driverName) == 0 {
if driverName == "" {
return errors.New("driver name is empty")
}

View File

@ -139,7 +139,7 @@ func (ci *CSIIdentifier) DecomposeCSIID(composedCSIID string) (err error) {
ci.LocationID = int64(binary.BigEndian.Uint64(buf64))
// 16 for poolID encoding and 1 for '-' separator
bytesToProcess -= 17
nextFieldStartIdx = nextFieldStartIdx + 17
nextFieldStartIdx += 17
// has to be an exact match
if bytesToProcess != uuidSize {

View File

@ -107,6 +107,18 @@ linters-settings:
line-length: 180
# tab width in spaces. Default to 1.
tab-width: 1
gocritic:
enabled-tags:
- performance
- style
- experimental
disabled-checks:
- sloppyReassign
- elseif
- unslice
- wrapperFunc
- unnamedResult
- dupImport # https://github.com/go-critic/go-critic/issues/845
unused:
# treat code as a program (not a library) and report unused exported
# identifiers; default is false.
@ -131,23 +143,9 @@ linters-settings:
max-func-lines: 30
linters:
enable:
- megacheck
- govet
- golint
- stylecheck
- interfacer
- unconvert
- gofmt
- gocyclo
- maligned
- lll
- nakedret
enable-all: false
enable-all: true
disable:
- prealloc
disable-all: false
presets:
- bugs
- unused
fast: false
- dupl
- gochecknoinits
- gochecknoglobals