Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug fix for 3.4.0 #317

Merged
merged 6 commits into from
Oct 16, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions api/v1beta1/aerospikebackupservice_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package v1beta1
import (
"fmt"

set "github.com/deckarep/golang-set/v2"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
Expand Down Expand Up @@ -60,6 +61,10 @@ func (r *AerospikeBackupService) ValidateCreate() (admission.Warnings, error) {
return nil, err
}

if err := r.validateBackupServiceSecrets(); err != nil {
return nil, err
}

return nil, nil
}

Expand All @@ -73,6 +78,10 @@ func (r *AerospikeBackupService) ValidateUpdate(_ runtime.Object) (admission.War
return nil, err
}

if err := r.validateBackupServiceSecrets(); err != nil {
return nil, err
}

return nil, nil
}

Expand Down Expand Up @@ -116,3 +125,17 @@ func (r *AerospikeBackupService) validateBackupServiceConfig() error {

return config.Validate()
}

func (r *AerospikeBackupService) validateBackupServiceSecrets() error {
volumeNameSet := set.NewSet[string]()

for _, secret := range r.Spec.SecretMounts {
if volumeNameSet.Contains(secret.VolumeMount.Name) {
return fmt.Errorf("duplicate volume name %s found in secrets field", secret.VolumeMount.Name)
}

volumeNameSet.Add(secret.VolumeMount.Name)
}

return nil
}
1 change: 1 addition & 0 deletions config/samples/aerospikebackupservice.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ spec:
type: aws-s3
path: "s3://aerospike-kubernetes-operator-test"
s3-region: us-east-1
s3-endpoint-override: ""
s3-profile: default

secrets:
Expand Down
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module github.com/aerospike/aerospike-kubernetes-operator
go 1.22

require (
github.com/aerospike/aerospike-management-lib v1.4.0
github.com/aerospike/aerospike-management-lib v1.4.1-0.20240905074352-532f7cf2e66b
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/evanphx/json-patch v4.12.0+incompatible
github.com/go-logr/logr v1.4.2
Expand All @@ -23,7 +23,7 @@ require (
github.com/aerospike/aerospike-backup-service v0.0.0-20240822110128-dc2b4811b9d3
github.com/aerospike/aerospike-client-go/v7 v7.6.1
github.com/deckarep/golang-set/v2 v2.3.1
github.com/sirupsen/logrus v1.9.0
github.com/sirupsen/logrus v1.9.1
golang.org/x/crypto v0.24.0
golang.org/x/net v0.26.0
gomodules.xyz/jsonpatch/v2 v2.4.0
Expand Down
8 changes: 4 additions & 4 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@ github.com/aerospike/aerospike-backup-service v0.0.0-20240822110128-dc2b4811b9d3
github.com/aerospike/aerospike-backup-service v0.0.0-20240822110128-dc2b4811b9d3/go.mod h1:PFWhqxcMsEEyoOZtQ70b+X8xWbbemDYuitT24EPBizk=
github.com/aerospike/aerospike-client-go/v7 v7.6.1 h1:VZK6S9YKq2w6ptTk3kXXjTxG2U9M9Y7Oi3YQ+3T7wQQ=
github.com/aerospike/aerospike-client-go/v7 v7.6.1/go.mod h1:uCbSYMpjlRcH/9f26VSF/luzDDXrcDaV8c6/WIcKtT4=
github.com/aerospike/aerospike-management-lib v1.4.0 h1:wT0l3kwzXv5DV5Cd+hD0BQq3hjSIyaPX1HaUb1304TI=
github.com/aerospike/aerospike-management-lib v1.4.0/go.mod h1:3JKrmC/mLSV8SygbrPQPNV8T7bFaTMjB8wfnX25gB+4=
github.com/aerospike/aerospike-management-lib v1.4.1-0.20240905074352-532f7cf2e66b h1:x7zrG9VzAdGU4MUOVBhB8OZ8VWUgko1qqfA5ikyocWw=
github.com/aerospike/aerospike-management-lib v1.4.1-0.20240905074352-532f7cf2e66b/go.mod h1:3JKrmC/mLSV8SygbrPQPNV8T7bFaTMjB8wfnX25gB+4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
Expand Down Expand Up @@ -112,8 +112,8 @@ github.com/reugn/go-quartz v0.12.0 h1:RsrklW++R5Swc7mCPYseXM06PTWN4N7/f1rsYkhHiw
github.com/reugn/go-quartz v0.12.0/go.mod h1:no4ktgYbAAuY0E1SchR8cTx1LF4jYIzdgaQhzRPSkpk=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.1 h1:Ou41VVR3nMWWmTiEUnj0OlsgOSCUFgsPAOl6jRIcVtQ=
github.com/sirupsen/logrus v1.9.1/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
Expand Down
58 changes: 58 additions & 0 deletions internal/controller/cluster/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,17 @@ func (r *SingleClusterReconciler) Reconcile() (result ctrl.Result, recErr error)
return reconcile.Result{}, recErr
}

// Doing recluster before setting up roster to get the latest observed node list from server.
if r.IsReclusterNeeded() {
if err = deployment.InfoRecluster(
r.Log,
policy, allHostConns,
); err != nil {
r.Log.Error(err, "Failed to do recluster")
return reconcile.Result{}, err
}
}

if asdbv1.IsClusterSCEnabled(r.aeroCluster) {
if !r.IsStatusEmpty() {
if res := r.waitForClusterStability(policy, allHostConns); !res.IsSuccess {
Expand Down Expand Up @@ -1023,3 +1034,50 @@ func (r *SingleClusterReconciler) AddAPIVersionLabel(ctx context.Context) error

return r.Client.Update(ctx, aeroCluster, common.UpdateOption)
}

func (r *SingleClusterReconciler) IsReclusterNeeded() bool {
// Return false if dynamic configuration updates are disabled
if !asdbv1.GetBool(r.aeroCluster.Spec.EnableDynamicConfigUpdate) {
return false
}

// Check for any active-rack addition/update across all the namespaces.
// If there is any active-rack change, recluster is required.
for specIdx := range r.aeroCluster.Spec.RackConfig.Racks {
for statusIdx := range r.aeroCluster.Status.RackConfig.Racks {
if r.aeroCluster.Spec.RackConfig.Racks[specIdx].ID == r.aeroCluster.Status.RackConfig.Racks[statusIdx].ID &&
r.IsReclusterNeededForRack(&r.aeroCluster.Spec.RackConfig.Racks[specIdx],
&r.aeroCluster.Status.RackConfig.Racks[statusIdx]) {
return true
}
}
}

return false
}

func (r *SingleClusterReconciler) IsReclusterNeededForRack(specRack, statusRack *asdbv1.Rack) bool {
specNamespaces, ok := specRack.AerospikeConfig.Value["namespaces"].([]interface{})
if !ok {
return false
}

statusNamespaces, ok := statusRack.AerospikeConfig.Value["namespaces"].([]interface{})
if !ok {
return false
}

for _, specNamespace := range specNamespaces {
for _, statusNamespace := range statusNamespaces {
if specNamespace.(map[string]interface{})["name"] != statusNamespace.(map[string]interface{})["name"] {
continue
}

if specNamespace.(map[string]interface{})["active-rack"] != statusNamespace.(map[string]interface{})["active-rack"] {
return true
}
}
}

return false
}
21 changes: 18 additions & 3 deletions internal/controller/cluster/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ func (r *SingleClusterReconciler) createSTS(
ports := getSTSContainerPort(
r.aeroCluster.Spec.PodSpec.MultiPodPerHost,
r.aeroCluster.Spec.AerospikeConfig,
&r.aeroCluster.Spec.AerospikeNetworkPolicy,
)

operatorDefinedLabels := utils.LabelsForAerospikeClusterRack(
Expand Down Expand Up @@ -608,6 +609,7 @@ func (r *SingleClusterReconciler) updateSTSPorts(
ports := getSTSContainerPort(
r.aeroCluster.Spec.PodSpec.MultiPodPerHost,
r.aeroCluster.Spec.AerospikeConfig,
&r.aeroCluster.Spec.AerospikeNetworkPolicy,
)

st.Spec.Template.Spec.Containers[0].Ports = ports
Expand Down Expand Up @@ -1539,12 +1541,24 @@ func addVolumeDeviceInContainer(
}

func getSTSContainerPort(
multiPodPerHost *bool, aeroConf *asdbv1.AerospikeConfigSpec,
multiPodPerHost *bool, aeroConf *asdbv1.AerospikeConfigSpec, aeroNetworkPolicy *asdbv1.AerospikeNetworkPolicy,
) []corev1.ContainerPort {
ports := make([]corev1.ContainerPort, 0, len(defaultContainerPorts))
portNames := make([]string, 0, len(defaultContainerPorts))
podOnlyNetwork := true

// Sorting defaultContainerPorts to fetch map in ordered manner.
// Check for podOnlyNetwork for all TLS and nonTLS fields.
if svcPort := asdbv1.GetServicePort(aeroConf); svcPort != nil {
podOnlyNetwork = aeroNetworkPolicy.AccessType == asdbv1.AerospikeNetworkTypePod &&
aeroNetworkPolicy.AlternateAccessType == asdbv1.AerospikeNetworkTypePod
}

if _, tlsSvcPort := asdbv1.GetServiceTLSNameAndPort(aeroConf); tlsSvcPort != nil {
podOnlyNetwork = podOnlyNetwork && aeroNetworkPolicy.TLSAccessType == asdbv1.AerospikeNetworkTypePod &&
aeroNetworkPolicy.TLSAlternateAccessType == asdbv1.AerospikeNetworkTypePod
}

// Sorting defaultContainerPorts to fetch map in an ordered manner.
// Helps reduce unnecessary sts object updates.
for portName := range defaultContainerPorts {
portNames = append(portNames, portName)
Expand All @@ -1567,11 +1581,12 @@ func getSTSContainerPort(
ContainerPort: int32(*configPort),
}
// Single pod per host. Enable hostPort setting
// when pod only network is not defined.
// The hostPort setting applies to the Kubernetes containers.
// The container port will be exposed to the external network at <hostIP>:<hostPort>,
// where the hostIP is the IP address of the Kubernetes node where
// the container is running and the hostPort is the port requested by the user
if !asdbv1.GetBool(multiPodPerHost) && portInfo.exposedOnHost {
if !asdbv1.GetBool(multiPodPerHost) && portInfo.exposedOnHost && !podOnlyNetwork {
containerPort.HostPort = containerPort.ContainerPort
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/backup-service/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (c *Client) GetBackupServiceConfig() (map[string]interface{}, error) {
func (c *Client) ApplyConfig() error {
url := c.API("/config/apply")

resp, err := http.Get(url)
resp, err := http.Post(url, "application/json", nil)
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/configschema/schemas
11 changes: 11 additions & 0 deletions test/backup_service/backup_service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,17 @@ var _ = Describe(
Expect(err).To(HaveOccurred())
})

It("Should fail when duplicate volume names are given in secrets", func() {
backupService, err = NewBackupService()
Expect(err).ToNot(HaveOccurred())
secretCopy := backupService.Spec.SecretMounts[0]
backupService.Spec.SecretMounts = append(backupService.Spec.SecretMounts, secretCopy)

err = deployBackupServiceWithTO(k8sClient, backupService, 5*time.Second)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("duplicate volume name"))
})

It("Should fail when aerospike-clusters field is given", func() {
configMap := getBackupServiceConfMap()
configMap[common.AerospikeClustersKey] = map[string]interface{}{
Expand Down
9 changes: 5 additions & 4 deletions test/backup_service/test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,10 +243,11 @@ func getBackupServiceConfMap() map[string]interface{} {
"type": "local",
},
"s3Storage": map[string]interface{}{
"type": "aws-s3",
"path": "s3://aerospike-kubernetes-operator-test",
"s3-region": "us-east-1",
"s3-profile": "default",
"type": "aws-s3",
"path": "s3://aerospike-kubernetes-operator-test",
"s3-region": "us-east-1",
"s3-endpoint-override": "",
"s3-profile": "default",
},
},
}
Expand Down
113 changes: 113 additions & 0 deletions test/cluster/dynamic_config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -607,6 +607,119 @@ var _ = Describe(
)
},
)

Context(
"When changing fields those need recluster", func() {
BeforeEach(
func() {
// Create a 4 node cluster
aeroCluster := createNonSCDummyAerospikeCluster(
clusterNamespacedName, 4,
)
aeroCluster.Spec.Storage.Volumes = append(
aeroCluster.Spec.Storage.Volumes,
asdbv1.VolumeSpec{
Name: "ns1",
Source: asdbv1.VolumeSource{
PersistentVolume: &asdbv1.PersistentVolumeSpec{
Size: resource.MustParse("1Gi"),
StorageClass: storageClass,
VolumeMode: v1.PersistentVolumeBlock,
},
},
Aerospike: &asdbv1.AerospikeServerVolumeAttachment{
Path: "/test/dev/xvdf1",
},
},
)
aeroCluster.Spec.EnableDynamicConfigUpdate = ptr.To(true)
aeroCluster.Spec.Image = "aerospike.jfrog.io/docker/aerospike/aerospike-server-enterprise-rc:7.2.0.0-rc2"
aeroCluster.Spec.PodSpec.ImagePullSecrets = []v1.LocalObjectReference{
{
Name: "regcred",
},
}
aeroCluster.Spec.RackConfig.Racks = append(aeroCluster.Spec.RackConfig.Racks,
asdbv1.Rack{
ID: 1,
},
asdbv1.Rack{
ID: 2,
})
aeroCluster.Spec.RackConfig.Namespaces = []string{
"test",
"test1",
}
nsList := aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{})
nsList = append(nsList, getSCNamespaceConfig("test1", "/test/dev/xvdf1"))
aeroCluster.Spec.AerospikeConfig.Value["namespaces"] = nsList
err := deployCluster(k8sClient, ctx, aeroCluster)
Expect(err).ToNot(HaveOccurred())
},
)

AfterEach(
func() {
aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

_ = deleteCluster(k8sClient, ctx, aeroCluster)
},
)

It(
"Should update active-rack dynamically", func() {

By("Modify dynamic config by adding fields")

aeroCluster, err := getCluster(
k8sClient, ctx, clusterNamespacedName,
)
Expect(err).ToNot(HaveOccurred())

podPIDMap, err := getPodIDs(ctx, aeroCluster)
Expect(err).ToNot(HaveOccurred())

nsList := aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{})
nsList[0].(map[string]interface{})["active-rack"] = 1
nsList[1].(map[string]interface{})["active-rack"] = 2

err = updateCluster(k8sClient, ctx, aeroCluster)
Expect(err).ToNot(HaveOccurred())

By("Fetch and verify dynamic configs")

pod := aeroCluster.Status.Pods["dynamic-config-test-1-0"]

info, err := requestInfoFromNode(logger, k8sClient, ctx, clusterNamespacedName, "namespace/test", &pod)
Expect(err).ToNot(HaveOccurred())

confs := strings.Split(info["namespace/test"], ";")
for _, conf := range confs {
if strings.Contains(conf, "effective_active_rack") {
keyValue := strings.Split(conf, "=")
Expect(keyValue[1]).To(Equal("1"))
}
}

info, err = requestInfoFromNode(logger, k8sClient, ctx, clusterNamespacedName, "namespace/test1", &pod)
Expect(err).ToNot(HaveOccurred())

confs = strings.Split(info["namespace/test1"], ";")
for _, conf := range confs {
if strings.Contains(conf, "effective_active_rack") {
keyValue := strings.Split(conf, "=")
Expect(keyValue[1]).To(Equal("2"))
}
}

By("Verify no warm/cold restarts in Pods")

validateServerRestart(ctx, aeroCluster, podPIDMap, false)
},
)
},
)
},
)

Expand Down
Loading
Loading