Skip to content

Commit

Permalink
Bumped golangci-lint and fixed go-lint
Browse files Browse the repository at this point in the history
  • Loading branch information
abhishekdwivedi3060 committed Aug 10, 2024
1 parent 4646608 commit 7c0e385
Show file tree
Hide file tree
Showing 19 changed files with 160 additions and 35 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/golangci-lint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,5 @@ jobs:
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.57
version: v1.59.1
args: --timeout=5m
12 changes: 10 additions & 2 deletions .golangci.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@

linters-settings:
revive:
rules:
- name: dot-imports
arguments:
- allowedPackages: # Allow dot imports for testing packages
- "github.com/onsi/ginkgo/v2"
- "github.com/onsi/gomega"
goconst:
min-len: 2
min-occurrences: 3
Expand All @@ -11,9 +18,10 @@ linters-settings:
- performance
- style
govet:
check-shadowing: true
enable:
- fieldalignment
- shadow

nolintlint:
require-explanation: true
require-specific: true
Expand Down Expand Up @@ -56,7 +64,7 @@ linters:

run:
issues-exit-code: 1
go: '1.21'
go: '1.22'
# skip-dirs:
# - sample
# skip-files:
Expand Down
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ vet: ## Run go vet against code.
go vet ./...

.PHONY: go-lint
go-lint: golanci-lint ## Run golangci-lint against code.
go-lint: golangci-lint ## Run golangci-lint against code.
$(GOLANGCI_LINT) run

.PHONY: go-lint-fix
Expand Down Expand Up @@ -245,7 +245,7 @@ GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION)
KUSTOMIZE_VERSION ?= v5.3.0
CONTROLLER_TOOLS_VERSION ?= v0.14.0
ENVTEST_VERSION ?= release-0.17
GOLANGCI_LINT_VERSION ?= v1.57.2
GOLANGCI_LINT_VERSION ?= v1.59.1
# Set the Operator SDK version to use. By default, what is installed on the system is used.
# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit.
OPERATOR_SDK_VERSION ?= v1.36.0
Expand Down
4 changes: 2 additions & 2 deletions internal/controller/cluster/aerospikecluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ func (r *AerospikeClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
Owns(
&appsv1.StatefulSet{}, builder.WithPredicates(
predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
CreateFunc: func(_ event.CreateEvent) bool {
return false
},
UpdateFunc: func(e event.UpdateEvent) bool {
UpdateFunc: func(_ event.UpdateEvent) bool {
return false
},
},
Expand Down
30 changes: 21 additions & 9 deletions internal/controller/cluster/client_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,15 +231,19 @@ func (r *SingleClusterReconciler) appendCACertFromFileOrPath(
if err != nil {
return err
}

if !d.IsDir() {
var caData []byte

if caData, err = os.ReadFile(path); err != nil {
return err
}

serverPool.AppendCertsFromPEM(caData)
r.Log.Info("Loaded CA certs from file.", "ca-path", caPath,
"file", path)
}

return nil
},
)
Expand Down Expand Up @@ -357,28 +361,36 @@ func (r *SingleClusterReconciler) loadCertAndKeyFromSecret(
return nil, err
}

if crtData, crtExists := found.Data[secretSource.ClientCertFilename]; !crtExists {
crtData, crtExists := found.Data[secretSource.ClientCertFilename]
if !crtExists {
return nil, fmt.Errorf(
"can't find certificate `%s` in secret %+v",
secretSource.ClientCertFilename, secretName,
)
} else if keyData, keyExists := found.Data[secretSource.ClientKeyFilename]; !keyExists {
}

keyData, keyExists := found.Data[secretSource.ClientKeyFilename]
if !keyExists {
return nil, fmt.Errorf(
"can't find client key `%s` in secret %+v",
secretSource.ClientKeyFilename, secretName,
)
} else if cert, err := tls.X509KeyPair(crtData, keyData); err != nil {
}

cert, err := tls.X509KeyPair(crtData, keyData)
if err != nil {
return nil, fmt.Errorf(
"failed to load X509 key pair for cluster from secret %+v: %w",
secretName, err,
)
} else {
r.Log.Info(
"Loading Aerospike Cluster client cert from secret", "secret",
secretName,
)
return &cert, nil
}

r.Log.Info(
"Loading Aerospike Cluster client cert from secret", "secret",
secretName,
)

return &cert, nil
}

func namespacedSecret(
Expand Down
3 changes: 3 additions & 0 deletions internal/controller/cluster/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ func init() {
if err != nil {
return err
}

if !d.IsDir() {
content, err := fs.ReadFile(scripts, path)
if err != nil {
Expand All @@ -76,9 +77,11 @@ func init() {
if err != nil {
return err
}

key := filepath.Base(path)
scriptTemplates[key] = evaluated
}

return nil
},
)
Expand Down
4 changes: 3 additions & 1 deletion internal/controller/cluster/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -872,6 +872,7 @@ func (r *SingleClusterReconciler) getIgnorablePods(racksToDelete []asdbv1.Rack,
}

ignorablePodNames.Insert(failedPod[podIdx])

failedAllowed--
}
}
Expand Down Expand Up @@ -1474,7 +1475,7 @@ func (r *SingleClusterReconciler) patchPodStatus(ctx context.Context, patches []

constantPatch := client.RawPatch(types.JSONPatchType, jsonPatchJSON)

return retry.OnError(retry.DefaultBackoff, func(err error) bool {
return retry.OnError(retry.DefaultBackoff, func(_ error) bool {
// Customize the error check for retrying, return true to retry, false to stop retrying
return true
}, func() error {
Expand All @@ -1488,6 +1489,7 @@ func (r *SingleClusterReconciler) patchPodStatus(ctx context.Context, patches []
}

r.Log.Info("Pod status patched successfully")

return nil
})
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/jsonpatch/jsonpatch.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ func handleValues(
// array replaced by non-array
patch = append(patch, NewPatch("replace", p, bv))
} else {
minLen := min(len(at), len(bt))
minLen := minimum(len(at), len(bt))
for i := 0; i < minLen; i++ {
// Patch matching indices.
patch, err = handleValues(at[i], bt[i], makePath(p, i), patch)
Expand Down Expand Up @@ -299,7 +299,7 @@ func compareArray(av, bv []interface{}, p string) []PatchOperation { //nolint:un
return retVal
}

func min(x, y int) int {
func minimum(x, y int) int {
if x < y {
return x
}
Expand Down
9 changes: 5 additions & 4 deletions test/backup/test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,7 @@ func waitForBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup,
pkgLog.Info("Backup status not updated yet")
return false, nil
}

return true, nil
})
}
Expand Down Expand Up @@ -280,7 +281,7 @@ func validateTriggeredBackup(k8sClient client.Client, backup *asdbv1beta1.Aerosp
// Wait for Service LB IP to be populated
if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true,
func(ctx context.Context) (bool, error) {
if err := k8sClient.Get(testCtx,
if err := k8sClient.Get(ctx,
types.NamespacedName{
Name: backup.Spec.BackupService.Name,
Namespace: backup.Spec.BackupService.Namespace,
Expand All @@ -305,7 +306,7 @@ func validateTriggeredBackup(k8sClient client.Client, backup *asdbv1beta1.Aerosp

// Wait for Backup service to be ready
if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true,
func(ctx context.Context) (bool, error) {
func(_ context.Context) (bool, error) {
config, err := serviceClient.GetBackupServiceConfig()
if err != nil {
pkgLog.Error(err, "Failed to get backup service config")
Expand All @@ -331,7 +332,7 @@ func GetBackupDataPaths(k8sClient client.Client, backup *asdbv1beta1.AerospikeBa
// Wait for Service LB IP to be populated
if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true,
func(ctx context.Context) (bool, error) {
if err := k8sClient.Get(testCtx,
if err := k8sClient.Get(ctx,
types.NamespacedName{
Name: backup.Spec.BackupService.Name,
Namespace: backup.Spec.BackupService.Namespace,
Expand Down Expand Up @@ -365,7 +366,7 @@ func GetBackupDataPaths(k8sClient client.Client, backup *asdbv1beta1.AerospikeBa
}

if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true,
func(ctx context.Context) (bool, error) {
func(_ context.Context) (bool, error) {
for routineName := range config.BackupRoutines {
backups, err := serviceClient.GetFullBackupsForRoutine(routineName)
if err != nil {
Expand Down
21 changes: 21 additions & 0 deletions test/cluster/batch_restart_pods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,15 +246,19 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac
// Restart 1 node at a time
It("Should restart one pod at a time", func() {
By("Using default RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count")

aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = schedulableResource("1Gi")
err = updateCluster(k8sClient, ctx, aeroCluster)
Expect(err).ToNot(HaveOccurred())

By("Using RollingUpdateBatchSize PCT which is not enough eg. 1%")

aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = percent("1%")
aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = nil
err = updateCluster(k8sClient, ctx, aeroCluster)
Expand Down Expand Up @@ -291,13 +295,15 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac
// Restart batch of nodes
It("Should do BatchRollingRestart", func() {
By("Use RollingUpdateBatchSize PCT")

err := batchRollingRestartTest(k8sClient, ctx, clusterNamespacedName, percent("90%"))
Expect(err).ToNot(HaveOccurred())

err = rollingRestartTest(k8sClient, ctx, clusterNamespacedName, percent("90%"), "1Gi")
Expect(err).ToNot(HaveOccurred())

By("Update RollingUpdateBatchSize Count")

err = batchRollingRestartTest(k8sClient, ctx, clusterNamespacedName, count(3))
Expect(err).ToNot(HaveOccurred())

Expand All @@ -308,8 +314,10 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac
// User should be able to change RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count when restart is going on
It("Should allow multiple changes in RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count", func() {
By("Update RollingUpdateBatchSize Count")

aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = count(3)
aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = schedulableResource("1Gi")
err = k8sClient.Update(ctx, aeroCluster)
Expand All @@ -318,8 +326,10 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac
time.Sleep(time.Second * 1)

By("Again Update RollingUpdateBatchSize Count")

aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = count(1)
aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = nil
err = k8sClient.Update(ctx, aeroCluster)
Expand All @@ -328,6 +338,7 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac
time.Sleep(time.Second * 1)

By("Again Update RollingUpdateBatchSize Count")

err = rollingRestartTest(k8sClient, ctx, clusterNamespacedName, count(3), "1Gi")
Expect(err).ToNot(HaveOccurred())
})
Expand Down Expand Up @@ -357,13 +368,16 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName)
// Restart 1 node at a time
It("Should upgrade one pod at a time", func() {
By("Using default RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count")

aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

aeroCluster.Spec.Image = availableImage1
err = updateCluster(k8sClient, ctx, aeroCluster)
Expect(err).ToNot(HaveOccurred())

By("Using RollingUpdateBatchSize PCT which is not enough eg. 1%")

err = upgradeTest(k8sClient, ctx, clusterNamespacedName, percent("1%"), availableImage1)
Expect(err).ToNot(HaveOccurred())
})
Expand Down Expand Up @@ -398,13 +412,15 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName)
// Restart batch of nodes
It("Should do BatchUpgrade", func() {
By("Use RollingUpdateBatchSize PCT")

err := batchUpgradeTest(k8sClient, ctx, clusterNamespacedName, percent("90%"))
Expect(err).ToNot(HaveOccurred())

err = upgradeTest(k8sClient, ctx, clusterNamespacedName, percent("90%"), availableImage1)
Expect(err).ToNot(HaveOccurred())

By("Update RollingUpdateBatchSize Count")

err = batchUpgradeTest(k8sClient, ctx, clusterNamespacedName, count(3))
Expect(err).ToNot(HaveOccurred())

Expand All @@ -415,8 +431,10 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName)
// User should be able to change RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count when restart is going on
It("Should allow multiple changes in RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count", func() {
By("Update RollingUpdateBatchSize Count")

aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = count(3)
aeroCluster.Spec.Image = availableImage1
err = k8sClient.Update(ctx, aeroCluster)
Expand All @@ -425,8 +443,10 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName)
time.Sleep(time.Second * 1)

By("Again Update RollingUpdateBatchSize Count")

aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = count(1)
aeroCluster.Spec.Image = latestImage
err = k8sClient.Update(ctx, aeroCluster)
Expand All @@ -435,6 +455,7 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName)
time.Sleep(time.Second * 1)

By("Again Update RollingUpdateBatchSize Count")

err = upgradeTest(k8sClient, ctx, clusterNamespacedName, count(3), availableImage1)
Expect(err).ToNot(HaveOccurred())
})
Expand Down
2 changes: 2 additions & 0 deletions test/cluster/cluster_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,9 @@ func validateMigrateFillDelay(
if err != nil {
return false, err
}

svcConfs := confs["service"].(lib.Stats)

current, exists := svcConfs["migrate-fill-delay"]
if !exists {
return false, fmt.Errorf("migrate-fill-delay missing from the Aerospike Service config")
Expand Down
Loading

0 comments on commit 7c0e385

Please sign in to comment.