diff --git a/.github/workflows/static.yaml b/.github/workflows/static.yaml index 60e9ac0300..e39ff8b0e2 100644 --- a/.github/workflows/static.yaml +++ b/.github/workflows/static.yaml @@ -15,5 +15,5 @@ jobs: - name: Run linter uses: golangci/golangci-lint-action@v3 with: - version: v1.54 - args: -E=gofmt,unused,ineffassign,revive,misspell,exportloopref,asciicheck,bodyclose,depguard,dogsled,durationcheck,errname,forbidigo -D=staticcheck --timeout=30m0s + version: v1.60 + args: --timeout 10m \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index 666ec5ffa3..ee50f259b2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,9 +4,8 @@ run: linters: disable: - typecheck - - structcheck enable: - - golint + - revive linters-settings: depguard: diff --git a/pkg/azuredisk/azure_common_linux_test.go b/pkg/azuredisk/azure_common_linux_test.go index d1ff7a30c2..cdeb791ba8 100644 --- a/pkg/azuredisk/azure_common_linux_test.go +++ b/pkg/azuredisk/azure_common_linux_test.go @@ -24,7 +24,7 @@ import ( ) func TestRescanAllVolumes(t *testing.T) { - if runtime.GOOS == "darwin" { + if runtime.GOOS == "darwin" { // nolint: staticcheck t.Skipf("skip test on GOOS=%s", runtime.GOOS) } err := rescanAllVolumes(azureutils.NewOSIOHandler()) diff --git a/pkg/azuredisk/azuredisk.go b/pkg/azuredisk/azuredisk.go index 443ea7b0a5..26cfd6515b 100644 --- a/pkg/azuredisk/azuredisk.go +++ b/pkg/azuredisk/azuredisk.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" @@ -177,7 +177,7 @@ func newDriverV1(options *DriverOptions) *Driver { topologyKey = fmt.Sprintf("topology.%s/zone", driver.Name) - getter := func(key string) (interface{}, error) { return nil, nil } + getter := func(_ string) (interface{}, error) { return nil, nil } var err error if driver.getDiskThrottlingCache, err = azcache.NewTimedCache(5*time.Minute, getter, false); err != nil { klog.Fatalf("%v", err) @@ -458,7 +458,7 @@ func (d *DriverCore) waitForSnapshotReady(ctx context.Context, subsID, resourceG return nil } - timeTick := time.Tick(intervel) + timeTick := time.Tick(intervel) // nolint: staticcheck timeAfter := time.After(timeout) for { select { diff --git a/pkg/azuredisk/azuredisk_test.go b/pkg/azuredisk/azuredisk_test.go index 20e08a57cd..b2fc44132c 100644 --- a/pkg/azuredisk/azuredisk_test.go +++ b/pkg/azuredisk/azuredisk_test.go @@ -25,7 +25,7 @@ import ( "testing" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/Azure/go-autorest/autorest/date" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" diff --git a/pkg/azuredisk/azuredisk_v1.go b/pkg/azuredisk/azuredisk_v1.go index 7ec157798b..4b55e4bd00 100644 --- a/pkg/azuredisk/azuredisk_v1.go +++ b/pkg/azuredisk/azuredisk_v1.go @@ -21,6 +21,6 @@ package azuredisk // NewDriver Creates a NewCSIDriver object. Assumes vendor version is equal to driver version & // does not support optional driver plugin info manifest field. Refer to CSI spec for more details. -func NewDriver(options *DriverOptions) CSIDriver { +func NewDriver(options *DriverOptions) CSIDriver { // nolint: staticcheck return newDriverV1(options) } diff --git a/pkg/azuredisk/azuredisk_v1_test.go b/pkg/azuredisk/azuredisk_v1_test.go index 093e840bce..01f58318c9 100644 --- a/pkg/azuredisk/azuredisk_v1_test.go +++ b/pkg/azuredisk/azuredisk_v1_test.go @@ -23,7 +23,7 @@ import ( "context" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" consts "sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants" diff --git a/pkg/azuredisk/controllerserver.go b/pkg/azuredisk/controllerserver.go index df44d97b62..b2cbbc7c41 100644 --- a/pkg/azuredisk/controllerserver.go +++ b/pkg/azuredisk/controllerserver.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" @@ -297,7 +297,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) if strings.Contains(err.Error(), consts.NotFound) { return nil, status.Error(codes.NotFound, err.Error()) } - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } isOperationSucceeded = true @@ -389,7 +389,7 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle nodeName := types.NodeName(nodeID) diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_publish_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -427,11 +427,11 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle } else { if strings.Contains(strings.ToLower(err.Error()), strings.ToLower(consts.TooManyRequests)) || strings.Contains(strings.ToLower(err.Error()), consts.ClientThrottled) { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } var cachingMode compute.CachingTypes if cachingMode, err = azureutils.GetCachingMode(volumeContext); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } klog.V(2).Infof("Trying to attach volume %s to node %s", diskURI, nodeName) @@ -464,7 +464,7 @@ func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.Controlle if len(errMsg) > maxErrMsgLength { errMsg = errMsg[:maxErrMsgLength] } - return nil, status.Errorf(codes.Internal, errMsg) + return nil, status.Errorf(codes.Internal, "%v", errMsg) } } klog.V(2).Infof("attach volume %s to node %s successfully", diskURI, nodeName) @@ -496,7 +496,7 @@ func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.Control diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_unpublish_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -516,7 +516,7 @@ func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.Control if len(errMsg) > maxErrMsgLength { errMsg = errMsg[:maxErrMsgLength] } - return nil, status.Errorf(codes.Internal, errMsg) + return nil, status.Errorf(codes.Internal, "%v", errMsg) } } klog.V(2).Infof("detach volume %s from node %s successfully", diskURI, nodeID) @@ -594,7 +594,7 @@ func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) ( func (d *Driver) listVolumesInCluster(ctx context.Context, start, maxEntries int) (*csi.ListVolumesResponse, error) { pvList, err := d.cloud.KubeClient.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) if err != nil { - return nil, status.Errorf(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: %v", err.Error()) + return nil, status.Errorf(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: %v", err) } // get all resource groups and put them into a sorted slice @@ -893,7 +893,7 @@ func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequ customTagsMap, err := volumehelper.ConvertTagsToMap(customTags) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Errorf(codes.InvalidArgument, "%v", err) } tags := make(map[string]*string) for k, v := range customTagsMap { @@ -1038,7 +1038,7 @@ func (d *Driver) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequ if azureutils.IsARMResourceID(snapshotID) { snapshotName, resourceGroup, subsID, err = d.getSnapshotInfo(snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } } @@ -1095,7 +1095,7 @@ func (d *Driver) getSnapshotByID(ctx context.Context, subsID, resourceGroup, sna if azureutils.IsARMResourceID(snapshotID) { snapshotName, resourceGroup, subsID, err = d.getSnapshotInfo(snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } } diff --git a/pkg/azuredisk/controllerserver_test.go b/pkg/azuredisk/controllerserver_test.go index 9d2642b4f5..cd07e3a8f9 100644 --- a/pkg/azuredisk/controllerserver_test.go +++ b/pkg/azuredisk/controllerserver_test.go @@ -22,7 +22,7 @@ import ( "reflect" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/Azure/go-autorest/autorest/date" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/mock/gomock" diff --git a/pkg/azuredisk/controllerserver_v2.go b/pkg/azuredisk/controllerserver_v2.go index c99d41c20d..0ba3373e58 100644 --- a/pkg/azuredisk/controllerserver_v2.go +++ b/pkg/azuredisk/controllerserver_v2.go @@ -236,7 +236,7 @@ func (d *DriverV2) CreateVolume(ctx context.Context, req *csi.CreateVolumeReques if strings.Contains(err.Error(), consts.NotFound) { return nil, status.Error(codes.NotFound, err.Error()) } - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } isOperationSucceeded = true @@ -332,7 +332,7 @@ func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.Control nodeName := types.NodeName(nodeID) diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_publish_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -365,11 +365,11 @@ func (d *DriverV2) ControllerPublishVolume(ctx context.Context, req *csi.Control } else { if strings.Contains(strings.ToLower(err.Error()), strings.ToLower(consts.TooManyRequests)) || strings.Contains(strings.ToLower(err.Error()), consts.ClientThrottled) { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } var cachingMode compute.CachingTypes if cachingMode, err = azureutils.GetCachingMode(volumeContext); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } klog.V(2).Infof("Trying to attach volume %s to node %s", diskURI, nodeName) @@ -419,7 +419,7 @@ func (d *DriverV2) ControllerUnpublishVolume(ctx context.Context, req *csi.Contr diskName, err := azureutils.GetDiskName(diskURI) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } mc := metrics.NewMetricContext(consts.AzureDiskCSIDriverName, "controller_unpublish_volume", d.cloud.ResourceGroup, d.cloud.SubscriptionID, d.Name) @@ -511,7 +511,7 @@ func (d *DriverV2) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) func (d *DriverV2) listVolumesInCluster(ctx context.Context, start, maxEntries int) (*csi.ListVolumesResponse, error) { pvList, err := d.cloud.KubeClient.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) if err != nil { - return nil, status.Errorf(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: %v", err.Error()) + return nil, status.Errorf(codes.Internal, "ListVolumes failed while fetching PersistentVolumes List with error: %v", err) } // get all resource groups and put them into a sorted slice @@ -799,7 +799,7 @@ func (d *DriverV2) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRe customTagsMap, err := volumehelper.ConvertTagsToMap(customTags) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Errorf(codes.InvalidArgument, "%v", err) } tags := make(map[string]*string) for k, v := range customTagsMap { @@ -872,7 +872,7 @@ func (d *DriverV2) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRe if azureutils.IsARMResourceID(snapshotID) { snapshotName, resourceGroup, subsID, err = d.getSnapshotInfo(snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } } @@ -930,7 +930,7 @@ func (d *DriverV2) getSnapshotByID(ctx context.Context, subsID, resourceGroup, s if azureutils.IsARMResourceID(snapshotID) { snapshotName, resourceGroup, subsID, err = d.getSnapshotInfo(snapshotID) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } } diff --git a/pkg/azuredisk/fake_azuredisk.go b/pkg/azuredisk/fake_azuredisk.go index 16be534a31..ce8495820e 100644 --- a/pkg/azuredisk/fake_azuredisk.go +++ b/pkg/azuredisk/fake_azuredisk.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -126,7 +126,7 @@ func newFakeDriverV1(t *testing.T) (*fakeDriverV1, error) { driver.mounter = mounter - cache, err := azcache.NewTimedCache(time.Minute, func(key string) (interface{}, error) { + cache, err := azcache.NewTimedCache(time.Minute, func(_ string) (interface{}, error) { return nil, nil }, false) if err != nil { diff --git a/pkg/azuredisk/nodeserver.go b/pkg/azuredisk/nodeserver.go index c3717bf6e3..946cd30a96 100644 --- a/pkg/azuredisk/nodeserver.go +++ b/pkg/azuredisk/nodeserver.go @@ -263,7 +263,7 @@ func (d *Driver) NodePublishVolume(_ context.Context, req *csi.NodePublishVolume } klog.V(2).Infof("NodePublishVolume [block]: found device path %s with lun %s", source, lun) if err = d.ensureBlockTargetFile(target); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } case *csi.VolumeCapability_Mount: mnt, err := d.ensureMountPoint(target) @@ -497,7 +497,7 @@ func (d *Driver) NodeExpandVolume(_ context.Context, req *csi.NodeExpandVolumeRe devicePath, err := getDevicePathWithMountPath(volumePath, d.mounter) if err != nil { - return nil, status.Errorf(codes.NotFound, err.Error()) + return nil, status.Errorf(codes.NotFound, "%v", err) } if d.enableDiskOnlineResize { @@ -612,7 +612,7 @@ func (d *Driver) getDevicePathWithLUN(lunStr string) (string, error) { scsiHostRescan(d.ioHandler, d.mounter) newDevicePath := "" - err = wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 2*time.Minute, true, func(context.Context) (bool, error) { var err error if newDevicePath, err = findDiskByLun(int(lun), d.ioHandler, d.mounter); err != nil { return false, fmt.Errorf("azureDisk - findDiskByLun(%v) failed with error(%s)", lun, err) diff --git a/pkg/azuredisk/nodeserver_test.go b/pkg/azuredisk/nodeserver_test.go index 53b173488f..1edbae024b 100644 --- a/pkg/azuredisk/nodeserver_test.go +++ b/pkg/azuredisk/nodeserver_test.go @@ -29,7 +29,7 @@ import ( "syscall" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -207,7 +207,7 @@ func TestEnsureMountPoint(t *testing.T) { if !(runtime.GOOS == "windows" && test.skipOnWindows) && !(runtime.GOOS == "darwin" && test.skipOnDarwin) { mnt, err := d.ensureMountPoint(test.target) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } if err == nil { assert.Equal(t, test.expectedMnt, mnt) @@ -232,8 +232,8 @@ func TestNodeGetInfo(t *testing.T) { desc string expectedErr error skipOnDarwin bool - setupFunc func(t *testing.T, d FakeDriver) - validateFunc func(t *testing.T, resp *csi.NodeGetInfoResponse) + setupFunc func(_ *testing.T, _ FakeDriver) + validateFunc func(_ *testing.T, _ *csi.NodeGetInfoResponse) }{ { desc: "[Success] Get node information for existing VM", @@ -268,7 +268,7 @@ func TestNodeGetInfo(t *testing.T) { { desc: "[Failure] Get node information for non-existing VM", expectedErr: status.Error(codes.Internal, fmt.Sprintf("getNodeInfoFromLabels on node(%s) failed with %s", "fakeNodeID", "kubeClient is nil")), - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.getCloud().VirtualMachinesClient.(*mockvmclient.MockInterface).EXPECT(). Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). Return(compute.VirtualMachine{}, notFoundErr). @@ -335,7 +335,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { desc: "Block volume path success", - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.getHostUtil().(*azureutils.FakeHostUtil).SetPathIsDeviceResult(blockVolumePath, true, nil) d.setNextCommandOutputScripts(blockdevAction) }, @@ -353,7 +353,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { desc: "failed to determine block device", - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.getHostUtil().(*azureutils.FakeHostUtil).SetPathIsDeviceResult(fakePath, true, fmt.Errorf("host util is not device path")) }, req: csi.NodeGetVolumeStatsRequest{VolumePath: fakePath, VolumeId: "vol_1"}, @@ -474,13 +474,13 @@ func TestNodeStageVolume(t *testing.T) { }, { desc: "Volume operation in progress", - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.getVolumeLocks().TryAcquire("vol_1") }, req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap, AccessType: stdVolCapBlock}}, expectedErr: status.Error(codes.Aborted, fmt.Sprintf(volumeOperationAlreadyExistsFmt, "vol_1")), - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.getVolumeLocks().Release("vol_1") }, }, @@ -505,7 +505,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "Successfully staged", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setNextCommandOutputScripts(blkidAction, fsckAction, blockSizeAction, blkidAction, blockSizeAction, blkidAction) }, req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, @@ -520,7 +520,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "Successfully with resize", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setNextCommandOutputScripts(blkidAction, fsckAction, blkidAction, resize2fsAction) }, req: csi.NodeStageVolumeRequest{VolumeId: "vol_1", StagingTargetPath: sourceTest, @@ -535,7 +535,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "failed to get perf attributes", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(true) d.setNextCommandOutputScripts(blkidAction, fsckAction, blockSizeAction, blockSizeAction) }, @@ -545,7 +545,7 @@ func TestNodeStageVolume(t *testing.T) { PublishContext: publishContext, VolumeContext: volumeContextWithPerfProfileField, }, - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(false) }, expectedErr: status.Errorf(codes.Internal, "failed to get perf attributes for /dev/sdd. Error: %v", fmt.Errorf("Perf profile wrong is invalid")), @@ -554,7 +554,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "Successfully staged with performance optimizations", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(true) mockoptimization := d.getDeviceHelper().(*mockoptimization.MockInterface) diskSupportsPerfOptimizationCall := mockoptimization.EXPECT(). @@ -573,7 +573,7 @@ func TestNodeStageVolume(t *testing.T) { PublishContext: publishContext, VolumeContext: volumeContext, }, - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(false) }, expectedErr: nil, @@ -582,7 +582,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "failed to optimize device performance", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(true) mockoptimization := d.getDeviceHelper().(*mockoptimization.MockInterface) diskSupportsPerfOptimizationCall := mockoptimization.EXPECT(). @@ -601,7 +601,7 @@ func TestNodeStageVolume(t *testing.T) { PublishContext: publishContext, VolumeContext: volumeContext, }, - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(false) }, expectedErr: status.Errorf(codes.Internal, "failed to optimize device performance for target(/dev/sdd) error(%s)", fmt.Errorf("failed to optimize device performance")), @@ -610,7 +610,7 @@ func TestNodeStageVolume(t *testing.T) { desc: "Successfully staged with perf optimization is disabled", skipOnDarwin: true, skipOnWindows: true, - setupFunc: func(t *testing.T, d FakeDriver) { + setupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(true) mockoptimization := d.getDeviceHelper().(*mockoptimization.MockInterface) mockoptimization.EXPECT(). @@ -625,7 +625,7 @@ func TestNodeStageVolume(t *testing.T) { PublishContext: publishContext, VolumeContext: volumeContext, }, - cleanupFunc: func(t *testing.T, d FakeDriver) { + cleanupFunc: func(_ *testing.T, d FakeDriver) { d.setPerfOptimizationEnabled(false) }, expectedErr: nil, @@ -737,7 +737,7 @@ func TestNodeUnstageVolume(t *testing.T) { !(runtime.GOOS == "darwin" && test.skipOnDarwin) { _, err := d.NodeUnstageVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } if test.cleanup != nil { @@ -849,8 +849,8 @@ func TestNodePublishVolume(t *testing.T) { skipOnWindows: true, // permission issues skipOnDarwin: true, expectedErr: testutil.TestError{ - DefaultError: status.Errorf(codes.Internal, fmt.Sprintf("could not mount target \"%s\": "+ - "mkdir %s: not a directory", azuredisk, azuredisk)), + DefaultError: status.Errorf(codes.Internal, "could not mount target \"%s\": "+ + "mkdir %s: not a directory", azuredisk, azuredisk), }, }, { @@ -885,8 +885,8 @@ func TestNodePublishVolume(t *testing.T) { Readonly: true}, skipOnWindows: true, // permission issues expectedErr: testutil.TestError{ - DefaultError: status.Errorf(codes.Internal, fmt.Sprintf("could not mount \"%s\" at \"%s\": "+ - "fake Mount: source error", errorMountSource, targetTest)), + DefaultError: status.Errorf(codes.Internal, "could not mount \"%s\" at \"%s\": "+ + "fake Mount: source error", errorMountSource, targetTest), }, }, { @@ -926,7 +926,7 @@ func TestNodePublishVolume(t *testing.T) { var err error _, err = d.NodePublishVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } if test.cleanup != nil { @@ -1001,7 +1001,7 @@ func TestNodeUnpublishVolume(t *testing.T) { !(test.skipOnDarwin && runtime.GOOS == "darwin") { _, err := d.NodeUnpublishVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } if test.cleanup != nil { @@ -1206,7 +1206,7 @@ func TestNodeExpandVolume(t *testing.T) { _, err := d.NodeExpandVolume(context.Background(), &test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } err = os.RemoveAll(targetTest) @@ -1258,7 +1258,7 @@ func TestGetBlockSizeBytes(t *testing.T) { for _, test := range tests { _, err := getBlockSizeBytes(test.req, d.getMounter()) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } //Setup @@ -1303,7 +1303,7 @@ func TestEnsureBlockTargetFile(t *testing.T) { for _, test := range tests { err := d.ensureBlockTargetFile(test.req) if !testutil.AssertError(&test.expectedErr, err) { - t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr.Error()) + t.Errorf("desc: %s\n actualErr: (%v), expectedErr: (%v)", test.desc, err, test.expectedErr) } } err = os.RemoveAll(testTarget) diff --git a/pkg/azuredisk/nodeserver_v2.go b/pkg/azuredisk/nodeserver_v2.go index 718e17de7b..bd45af01f7 100644 --- a/pkg/azuredisk/nodeserver_v2.go +++ b/pkg/azuredisk/nodeserver_v2.go @@ -255,7 +255,7 @@ func (d *DriverV2) NodePublishVolume(ctx context.Context, req *csi.NodePublishVo } klog.V(2).Infof("NodePublishVolume [block]: found device path %s with lun %s", source, lun) if err = d.ensureBlockTargetFile(target); err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, "%v", err) } case *csi.VolumeCapability_Mount: mnt, err := d.ensureMountPoint(target) @@ -455,7 +455,7 @@ func (d *DriverV2) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolu devicePath, err := getDevicePathWithMountPath(volumePath, d.mounter) if err != nil { - return nil, status.Errorf(codes.NotFound, err.Error()) + return nil, status.Errorf(codes.NotFound, "%v", err) } if d.enableDiskOnlineResize { diff --git a/pkg/azurediskplugin/main.go b/pkg/azurediskplugin/main.go index b86d5201dc..e182dd7634 100644 --- a/pkg/azurediskplugin/main.go +++ b/pkg/azurediskplugin/main.go @@ -120,8 +120,8 @@ func handle() { GetNodeIDFromIMDS: *getNodeIDFromIMDS, WaitForSnapshotReady: *waitForSnapshotReady, } - driver := azuredisk.NewDriver(&driverOptions) - if driver == nil { + driver := azuredisk.NewDriver(&driverOptions) // nolint: staticcheck + if driver == nil { // nolint: staticcheck klog.Fatalln("Failed to initialize azuredisk CSI Driver") } testingMock := false diff --git a/pkg/azureutils/azure_disk_utils.go b/pkg/azureutils/azure_disk_utils.go index 97da44d9e9..f2c5f370c0 100644 --- a/pkg/azureutils/azure_disk_utils.go +++ b/pkg/azureutils/azure_disk_utils.go @@ -30,7 +30,7 @@ import ( "time" "unicode" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/container-storage-interface/spec/lib/go/csi" "github.com/pborman/uuid" v1 "k8s.io/api/core/v1" diff --git a/pkg/azureutils/azure_disk_utils_test.go b/pkg/azureutils/azure_disk_utils_test.go index d931592915..b468d5c040 100644 --- a/pkg/azureutils/azure_disk_utils_test.go +++ b/pkg/azureutils/azure_disk_utils_test.go @@ -28,7 +28,7 @@ import ( "testing" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/pkg/azureutils/azure_snapshot_utils.go b/pkg/azureutils/azure_snapshot_utils.go index fba3c2fc41..ba6b876f2b 100644 --- a/pkg/azureutils/azure_snapshot_utils.go +++ b/pkg/azureutils/azure_snapshot_utils.go @@ -21,7 +21,7 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" diff --git a/pkg/azureutils/azure_snapshot_utils_test.go b/pkg/azureutils/azure_snapshot_utils_test.go index 676e2f5f6e..5cb4d8e128 100644 --- a/pkg/azureutils/azure_snapshot_utils_test.go +++ b/pkg/azureutils/azure_snapshot_utils_test.go @@ -21,7 +21,7 @@ import ( "reflect" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "github.com/Azure/go-autorest/autorest/date" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/stretchr/testify/assert" diff --git a/pkg/csi-common/utils_test.go b/pkg/csi-common/utils_test.go index b152cda5e4..77e6bd2421 100644 --- a/pkg/csi-common/utils_test.go +++ b/pkg/csi-common/utils_test.go @@ -99,7 +99,7 @@ func TestLogGRPC(t *testing.T) { buf := new(bytes.Buffer) klog.SetOutput(buf) - handler := func(ctx context.Context, req interface{}) (interface{}, error) { return nil, nil } + handler := func(_ context.Context, _ interface{}) (interface{}, error) { return nil, nil } info := grpc.UnaryServerInfo{ FullMethod: "fake", } diff --git a/pkg/os/disk/types.go b/pkg/os/disk/types.go index 993a9b4656..2b691132a9 100644 --- a/pkg/os/disk/types.go +++ b/pkg/os/disk/types.go @@ -27,38 +27,38 @@ type StoragePropertyID uint32 const ( StorageDeviceProperty StoragePropertyID = 0 - StorageAdapterProperty = 1 - StorageDeviceIDProperty = 2 - StorageDeviceUniqueIDProperty = 3 - StorageDeviceWriteCacheProperty = 4 - StorageMiniportProperty = 5 - StorageAccessAlignmentProperty = 6 - StorageDeviceSeekPenaltyProperty = 7 - StorageDeviceTrimProperty = 8 - StorageDeviceWriteAggregationProperty = 9 - StorageDeviceDeviceTelemetryProperty = 10 - StorageDeviceLBProvisioningProperty = 11 - StorageDevicePowerProperty = 12 - StorageDeviceCopyOffloadProperty = 13 - StorageDeviceResiliencyProperty = 14 - StorageDeviceMediumProductType = 15 - StorageAdapterRpmbProperty = 16 - StorageAdapterCryptoProperty = 17 - StorageDeviceIoCapabilityProperty = 18 - StorageAdapterProtocolSpecificProperty = 19 - StorageDeviceProtocolSpecificProperty = 20 - StorageAdapterTemperatureProperty = 21 - StorageDeviceTemperatureProperty = 22 - StorageAdapterPhysicalTopologyProperty = 23 - StorageDevicePhysicalTopologyProperty = 24 - StorageDeviceAttributesProperty = 25 - StorageDeviceManagementStatus = 26 - StorageAdapterSerialNumberProperty = 27 - StorageDeviceLocationProperty = 28 - StorageDeviceNumaProperty = 29 - StorageDeviceZonedDeviceProperty = 30 - StorageDeviceUnsafeShutdownCount = 31 - StorageDeviceEnduranceProperty = 32 + StorageAdapterProperty StoragePropertyID = 1 + StorageDeviceIDProperty StoragePropertyID = 2 + StorageDeviceUniqueIDProperty StoragePropertyID = 3 + StorageDeviceWriteCacheProperty StoragePropertyID = 4 + StorageMiniportProperty StoragePropertyID = 5 + StorageAccessAlignmentProperty StoragePropertyID = 6 + StorageDeviceSeekPenaltyProperty StoragePropertyID = 7 + StorageDeviceTrimProperty StoragePropertyID = 8 + StorageDeviceWriteAggregationProperty StoragePropertyID = 9 + StorageDeviceDeviceTelemetryProperty StoragePropertyID = 10 + StorageDeviceLBProvisioningProperty StoragePropertyID = 11 + StorageDevicePowerProperty StoragePropertyID = 12 + StorageDeviceCopyOffloadProperty StoragePropertyID = 13 + StorageDeviceResiliencyProperty StoragePropertyID = 14 + StorageDeviceMediumProductType StoragePropertyID = 15 + StorageAdapterRpmbProperty StoragePropertyID = 16 + StorageAdapterCryptoProperty StoragePropertyID = 17 + StorageDeviceIoCapabilityProperty StoragePropertyID = 18 + StorageAdapterProtocolSpecificProperty StoragePropertyID = 19 + StorageDeviceProtocolSpecificProperty StoragePropertyID = 20 + StorageAdapterTemperatureProperty StoragePropertyID = 21 + StorageDeviceTemperatureProperty StoragePropertyID = 22 + StorageAdapterPhysicalTopologyProperty StoragePropertyID = 23 + StorageDevicePhysicalTopologyProperty StoragePropertyID = 24 + StorageDeviceAttributesProperty StoragePropertyID = 25 + StorageDeviceManagementStatus StoragePropertyID = 26 + StorageAdapterSerialNumberProperty StoragePropertyID = 27 + StorageDeviceLocationProperty StoragePropertyID = 28 + StorageDeviceNumaProperty StoragePropertyID = 29 + StorageDeviceZonedDeviceProperty StoragePropertyID = 30 + StorageDeviceUnsafeShutdownCount StoragePropertyID = 31 + StorageDeviceEnduranceProperty StoragePropertyID = 32 ) type StorageQueryType uint32 @@ -89,31 +89,31 @@ type StorageIdentifierCodeSet uint32 const ( StorageIDCodeSetReserved StorageIdentifierCodeSet = 0 - StorageIDCodeSetBinary = 1 - StorageIDCodeSetASCII = 2 - StorageIDCodeSetUtf8 = 3 + StorageIDCodeSetBinary StorageIdentifierCodeSet = 1 + StorageIDCodeSetASCII StorageIdentifierCodeSet = 2 + StorageIDCodeSetUtf8 StorageIdentifierCodeSet = 3 ) type StorageIdentifierType uint32 const ( StorageIDTypeVendorSpecific StorageIdentifierType = 0 - StorageIDTypeVendorID = 1 - StorageIDTypeEUI64 = 2 - StorageIDTypeFCPHName = 3 - StorageIDTypePortRelative = 4 - StorageIDTypeTargetPortGroup = 5 - StorageIDTypeLogicalUnitGroup = 6 - StorageIDTypeMD5LogicalUnitIdentifier = 7 - StorageIDTypeScsiNameString = 8 + StorageIDTypeVendorID StorageIdentifierType = 1 + StorageIDTypeEUI64 StorageIdentifierType = 2 + StorageIDTypeFCPHName StorageIdentifierType = 3 + StorageIDTypePortRelative StorageIdentifierType = 4 + StorageIDTypeTargetPortGroup StorageIdentifierType = 5 + StorageIDTypeLogicalUnitGroup StorageIdentifierType = 6 + StorageIDTypeMD5LogicalUnitIdentifier StorageIdentifierType = 7 + StorageIDTypeScsiNameString StorageIdentifierType = 8 ) type StorageAssociationType uint32 const ( StorageIDAssocDevice StorageAssociationType = 0 - StorageIDAssocPort = 1 - StorageIDAssocTarget = 2 + StorageIDAssocPort StorageAssociationType = 1 + StorageIDAssocTarget StorageAssociationType = 2 ) type StorageIdentifier struct { diff --git a/pkg/os/volume/volume.go b/pkg/os/volume/volume.go index 4095758d5f..7b47f5564b 100644 --- a/pkg/os/volume/volume.go +++ b/pkg/os/volume/volume.go @@ -71,8 +71,8 @@ func ListVolumesOnDisk(diskNumber uint32, partitionNumber uint32) (volumeIDs []s return []string{}, fmt.Errorf("error list volumes on disk. cmd: %s, output: %s, error: %v", cmd, string(out), err) } - volumeIds := strings.Split(strings.TrimSpace(string(out)), "\r\n") - return volumeIds, nil + volumeIDs = strings.Split(strings.TrimSpace(string(out)), "\r\n") + return volumeIDs, nil } // FormatVolume - Formats a volume with the NTFS format. diff --git a/pkg/tool/gen-disk-skus-map.go b/pkg/tool/gen-disk-skus-map.go index 7969186ed1..5364b04787 100644 --- a/pkg/tool/gen-disk-skus-map.go +++ b/pkg/tool/gen-disk-skus-map.go @@ -26,7 +26,7 @@ import ( "strconv" "strings" - compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute" // nolint: staticcheck "k8s.io/klog/v2" "sigs.k8s.io/azuredisk-csi-driver/pkg/optimization" diff --git a/pkg/util/util.go b/pkg/util/util.go index e86d1e0b07..8d1c6d1ac0 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -124,13 +124,13 @@ func MakeFile(pathname string) error { } type VolumeLocks struct { - locks sets.String + locks sets.Set[string] mux sync.Mutex } func NewVolumeLocks() *VolumeLocks { return &VolumeLocks{ - locks: sets.NewString(), + locks: sets.New[string](), } } diff --git a/test/e2e/pre_provisioning_test.go b/test/e2e/pre_provisioning_test.go index 5499efe2d6..f345a84720 100644 --- a/test/e2e/pre_provisioning_test.go +++ b/test/e2e/pre_provisioning_test.go @@ -24,6 +24,7 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -47,7 +48,7 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { skipVolumeDeletion bool ) - ginkgo.BeforeEach(func(ctx ginkgo.SpecContext) { + ginkgo.BeforeEach(func(_ ginkgo.SpecContext) { cs = f.ClientSet ns = f.Namespace testDriver = driver.InitAzureDiskDriver() @@ -171,7 +172,7 @@ var _ = ginkgo.Describe("Pre-Provisioned", func() { req := makeCreateVolumeReq("invalid-maxShares", 256) req.Parameters = map[string]string{"maxShares": "0"} _, err := azurediskDriver.CreateVolume(ctx, req) - framework.ExpectError(err) + gomega.Expect(err).To(gomega.HaveOccurred()) }) ginkgo.It("should succeed when attaching a shared block volume to multiple pods [disk.csi.azure.com][shared disk]", func(ctx ginkgo.SpecContext) { diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 29e4acd0d0..2b8ceeb066 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -28,7 +28,6 @@ import ( "testing" "github.com/onsi/ginkgo/v2" - "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/gomega" "github.com/pborman/uuid" "k8s.io/kubernetes/test/e2e/framework" @@ -163,7 +162,7 @@ var _ = ginkgo.BeforeSuite(func(ctx ginkgo.SpecContext) { } }) -var _ = ginkgo.AfterSuite(func(ctx ginkgo.SpecContext) { +var _ = ginkgo.AfterSuite(func(_ ginkgo.SpecContext) { // Default storage driver configuration is CSI. Freshly built // CSI driver is installed for that case. if isTestingMigration || isUsingInTreeVolumePlugin { @@ -266,8 +265,9 @@ func TestE2E(t *testing.T) { if reportDir == "" { reportDir = defaultReportDir } - r := []ginkgo.Reporter{reporters.NewJUnitReporter(path.Join(reportDir, "junit_01.xml"))} - ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "AzureDisk CSI Driver End-to-End Tests", r) + _, reporterConfig := ginkgo.GinkgoConfiguration() + reporterConfig.JUnitReport = path.Join(reportDir, "junit_01.xml") + ginkgo.RunSpecs(t, "AzureDisk CSI Driver End-to-End Tests", reporterConfig) } func execTestCmd(cmds []testCmd) { diff --git a/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go b/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go index a26b642865..f7d82ecd6e 100644 --- a/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go @@ -21,6 +21,7 @@ import ( "fmt" "time" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" compute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -86,17 +87,17 @@ func (t *DynamicallyProvisionedAzureDiskDetach) Run(ctx context.Context, client framework.ExpectNoError(err, fmt.Sprintf("Error getting client for azuredisk %v", err)) disktest, err := disksClient.Get(ctx, resourceGroup, diskName) framework.ExpectNoError(err, fmt.Sprintf("Error getting disk for azuredisk %v", err)) - framework.ExpectEqual(compute.DiskStateAttached, *disktest.Properties.DiskState) + gomega.Expect(compute.DiskStateAttached).To(gomega.Equal(*disktest.Properties.DiskState)) ginkgo.By("begin to delete the pod") tpod.Cleanup(ctx) - err = wait.Poll(15*time.Second, 10*time.Minute, func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 15*time.Second, 10*time.Minute, false, func(context.Context) (bool, error) { disktest, err := disksClient.Get(ctx, resourceGroup, diskName) if err != nil { return false, fmt.Errorf("Error getting disk for azuredisk %v", err) } - if *disktest.Properties.DiskState == compute.DiskStateUnattached { + if *disktest.Properties.DiskState == armcompute.DiskStateUnattached { return true, nil } ginkgo.By(fmt.Sprintf("current disk state(%v) is not in unattached state, wait and recheck", *disktest.Properties.DiskState)) diff --git a/test/e2e/testsuites/dynamically_provisioned_azuredisk_tag_tester.go b/test/e2e/testsuites/dynamically_provisioned_azuredisk_tag_tester.go index b0e1c618b2..c1b8f2b04f 100644 --- a/test/e2e/testsuites/dynamically_provisioned_azuredisk_tag_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_azuredisk_tag_tester.go @@ -90,28 +90,28 @@ func (t *DynamicallyProvisionedAzureDiskWithTag) Run(ctx context.Context, client for k, v := range test { _, ok := disktest.Tags[k] - framework.ExpectEqual(ok, true) + gomega.Expect(ok).To(gomega.Equal(true)) if ok { - framework.ExpectEqual(*disktest.Tags[k], v) + gomega.Expect(*disktest.Tags[k]).To(gomega.Equal(v)) } } tag, ok := disktest.Tags["kubernetes.io-created-for-pv-name"] - framework.ExpectEqual(ok, true) - framework.ExpectEqual(tag != nil, true) + gomega.Expect(ok).To(gomega.Equal(true)) + gomega.Expect(tag != nil).To(gomega.Equal(true)) if tag != nil { ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pv-name: %s", *tag)) } tag, ok = disktest.Tags["kubernetes.io-created-for-pvc-name"] - framework.ExpectEqual(ok, true) - framework.ExpectEqual(tag != nil, true) + gomega.Expect(ok).To(gomega.Equal(true)) + gomega.Expect(tag != nil).To(gomega.Equal(true)) if tag != nil { ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pvc-name: %s", *tag)) } tag, ok = disktest.Tags["kubernetes.io-created-for-pvc-namespace"] - framework.ExpectEqual(ok, true) - framework.ExpectEqual(tag != nil, true) + gomega.Expect(ok).To(gomega.Equal(true)) + gomega.Expect(tag != nil).To(gomega.Equal(true)) if tag != nil { ginkgo.By(fmt.Sprintf("kubernetes.io-created-for-pvc-namespace: %s", *tag)) } diff --git a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go index a5be82c4bf..c94119d8d1 100644 --- a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go +++ b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go @@ -120,7 +120,7 @@ func (t *DynamicallyProvisionedResizeVolumeTest) Run(ctx context.Context, client var newPv *v1.PersistentVolume var newPvSize resource.Quantity - err = wait.PollImmediate(30*time.Second, 10*time.Minute, func() (bool, error) { + err = wait.PollUntilContextTimeout(ctx, 30*time.Second, 10*time.Minute, true, func(context.Context) (bool, error) { //takes 3-6 minutes on average for dynamic resize ginkgo.By("checking the resizing PV result") newPv, _ = client.CoreV1().PersistentVolumes().Get(ctx, newPvc.Spec.VolumeName, metav1.GetOptions{}) diff --git a/test/e2e/testsuites/testsuites.go b/test/e2e/testsuites/testsuites.go index fda258af4f..5a56ebfb4e 100644 --- a/test/e2e/testsuites/testsuites.go +++ b/test/e2e/testsuites/testsuites.go @@ -166,7 +166,7 @@ func (t *TestVolumeSnapshotClass) CreateSnapshot(ctx context.Context, pvc *v1.Pe func (t *TestVolumeSnapshotClass) ReadyToUse(ctx context.Context, snapshot *snapshotv1.VolumeSnapshot) { ginkgo.By("waiting for VolumeSnapshot to be ready to use - " + snapshot.Name) - err := wait.Poll(15*time.Second, 30*time.Minute, func() (bool, error) { + err := wait.PollUntilContextTimeout(ctx, 15*time.Second, 30*time.Minute, false, func(context.Context) (bool, error) { vs, err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshots(t.namespace.Name).Get(ctx, snapshot.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("did not see ReadyToUse: %v", err) @@ -753,7 +753,7 @@ func (t *TestStatefulset) Logs(ctx context.Context) ([]byte, error) { return podLogs(ctx, t.client, t.podName, t.namespace.Name) } func waitForStatefulSetComplete(ctx context.Context, cs clientset.Interface, ns *v1.Namespace, ss *apps.StatefulSet) error { - err := wait.PollImmediate(poll, pollTimeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(ctx, poll, pollTimeout, true, func(context.Context) (bool, error) { var err error statefulSet, err := cs.AppsV1().StatefulSets(ns.Name).Get(ctx, ss.Name, metav1.GetOptions{}) if err != nil { @@ -1056,7 +1056,7 @@ func getWinImageTag(winServerVer string) string { func pollForStringWorker(namespace string, pod string, command []string, expectedString string, ch chan<- error) { args := append([]string{"exec", pod, "--"}, command...) - err := wait.PollImmediate(poll, pollForStringTimeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), poll, pollForStringTimeout, true, func(context.Context) (bool, error) { stdout, err := e2ekubectl.RunKubectl(namespace, args...) if err != nil { framework.Logf("Error waiting for output %q in pod %q: %v.", expectedString, pod, err)