cleanup: append pointer instead of value to avoid copying lock value

This commit resolves the govet issue -
`copylocks: call of append copies lock value ... contains sync.Mutex`

Embedding DoNotCopy in a struct is a convention to signal and prevent
shallow copies, as recommended in Go's best practices. This does not
rely on a language feature but is instead a special case within the vet
checker.

For more details, see https://golang.org/issues/8005

Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
Praveen M 2024-07-10 12:00:03 +05:30 committed by mergify[bot]
parent 69ef70e25b
commit d376271376
2 changed files with 8 additions and 8 deletions

View File

@ -178,7 +178,7 @@ func (cs *ControllerServer) CreateVolumeGroupSnapshot(
CreationTime: timestamppb.New(time.Now()), CreationTime: timestamppb.New(time.Now()),
} }
for _, r := range *resp { for _, r := range resp {
r.Snapshot.GroupSnapshotId = vgs.VolumeGroupSnapshotID r.Snapshot.GroupSnapshotId = vgs.VolumeGroupSnapshotID
response.GroupSnapshot.Snapshots = append(response.GroupSnapshot.Snapshots, r.GetSnapshot()) response.GroupSnapshot.Snapshots = append(response.GroupSnapshot.Snapshots, r.GetSnapshot())
} }
@ -260,7 +260,7 @@ func (cs *ControllerServer) releaseQuiesceAndGetVolumeGroupSnapshotResponse(
} }
} }
}() }()
snapshotResponses := make([]csi.CreateSnapshotResponse, 0) snapshotResponses := make([]*csi.CreateSnapshotResponse, 0)
for _, volID := range req.GetSourceVolumeIds() { for _, volID := range req.GetSourceVolumeIds() {
// Create the snapshot for the volumeID // Create the snapshot for the volumeID
clusterID := getClusterIDForVolumeID(fsMap, volID) clusterID := getClusterIDForVolumeID(fsMap, volID)
@ -281,7 +281,7 @@ func (cs *ControllerServer) releaseQuiesceAndGetVolumeGroupSnapshotResponse(
"failed to create snapshot and add to volume group journal: %v", "failed to create snapshot and add to volume group journal: %v",
err) err)
} }
snapshotResponses = append(snapshotResponses, *resp) snapshotResponses = append(snapshotResponses, resp)
} }
response := &csi.CreateVolumeGroupSnapshotResponse{} response := &csi.CreateVolumeGroupSnapshotResponse{}
@ -314,13 +314,13 @@ func (cs *ControllerServer) createSnapshotAddToVolumeGroupJournal(
vgs *store.VolumeGroupSnapshotIdentifier, vgs *store.VolumeGroupSnapshotIdentifier,
cr *util.Credentials, cr *util.Credentials,
fsMap map[string]core.FSQuiesceClient) ( fsMap map[string]core.FSQuiesceClient) (
*[]csi.CreateSnapshotResponse, []*csi.CreateSnapshotResponse,
error, error,
) { ) {
var err error var err error
var resp *csi.CreateSnapshotResponse var resp *csi.CreateSnapshotResponse
responses := make([]csi.CreateSnapshotResponse, 0) responses := make([]*csi.CreateSnapshotResponse, 0)
for _, volID := range req.GetSourceVolumeIds() { for _, volID := range req.GetSourceVolumeIds() {
err = fsQuiesceWithExpireTimeout(ctx, vgo.RequestName, fsMap) err = fsQuiesceWithExpireTimeout(ctx, vgo.RequestName, fsMap)
if err != nil { if err != nil {
@ -345,7 +345,7 @@ func (cs *ControllerServer) createSnapshotAddToVolumeGroupJournal(
return nil, err return nil, err
} }
responses = append(responses, *resp) responses = append(responses, resp)
} }
err = releaseFSQuiesce(ctx, vgo.RequestName, fsMap) err = releaseFSQuiesce(ctx, vgo.RequestName, fsMap)
@ -355,7 +355,7 @@ func (cs *ControllerServer) createSnapshotAddToVolumeGroupJournal(
return nil, err return nil, err
} }
return &responses, nil return responses, nil
} }
func formatCreateSnapshotRequest(volID, groupSnapshotName, func formatCreateSnapshotRequest(volID, groupSnapshotName,

View File

@ -211,7 +211,7 @@ func FindPoolAndTopology(topologyPools *[]TopologyConstrainedPool,
return "", "", nil, fmt.Errorf("none of the topology constrained pools matched requested "+ return "", "", nil, fmt.Errorf("none of the topology constrained pools matched requested "+
"topology constraints : pools (%+v) requested topology (%+v)", "topology constraints : pools (%+v) requested topology (%+v)",
*topologyPools, *accessibilityRequirements) *topologyPools, accessibilityRequirements)
} }
// matchPoolToTopology loops through passed in pools, and for each pool checks if all // matchPoolToTopology loops through passed in pools, and for each pool checks if all