Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Syncing latest changes from upstream devel for ceph-csi #380

Merged
merged 6 commits into from
Sep 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion build.env
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ HELM_SCRIPT=https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
HELM_VERSION=v3.14.3

# minikube settings
MINIKUBE_VERSION=v1.33.0
MINIKUBE_VERSION=v1.34.0
VM_DRIVER=none
CHANGE_MINIKUBE_NONE_USER=true

Expand Down
15 changes: 14 additions & 1 deletion e2e/cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"strings"
"sync"

snapapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
. "github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
Expand Down Expand Up @@ -2478,6 +2478,19 @@ var _ = Describe(cephfsType, func() {
}
})

By("test volumeGroupSnapshot", func() {
scName := "csi-cephfs-sc"
snapshotter, err := newCephFSVolumeGroupSnapshot(f, f.UniqueName, scName, false, deployTimeout, 3)
if err != nil {
framework.Failf("failed to create volumeGroupSnapshot Base: %v", err)
}

err = snapshotter.TestVolumeGroupSnapshot()
if err != nil {
framework.Failf("failed to test volumeGroupSnapshot: %v", err)
}
})

// FIXME: in case NFS testing is done, prevent deletion
// of the CephFS filesystem and related pool. This can
// probably be addressed in a nicer way, making sure
Expand Down
2 changes: 1 addition & 1 deletion e2e/cephfs_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
"strings"
"time"

snapapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
Expand Down
2 changes: 2 additions & 0 deletions e2e/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,8 @@ func (yrn *yamlResourceNamespaced) Do(action kubectlAction) error {
return fmt.Errorf("failed to read content from %q: %w", yrn.filename, err)
}

data = replaceLogLevelInTemplate(data)

if yrn.oneReplica {
data = oneReplicaDeployYaml(data)
}
Expand Down
2 changes: 1 addition & 1 deletion e2e/nfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (
"sync"
"time"

snapapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
. "github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
Expand Down
13 changes: 13 additions & 0 deletions e2e/pvc.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,19 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
}

if pvc.Spec.VolumeName == "" {
var events *v1.EventList
// Log the events for the PVC if its not bound yet
events, err = c.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{
FieldSelector: "involvedObject.kind=PersistentVolumeClaim,involvedObject.name=" + name,
})
if err == nil {
for i := range events.Items {
framework.Logf("PVC %s Event: %s - %s", name, events.Items[i].Reason, events.Items[i].Message)
}
} else {
framework.Logf("error getting events for PVC %s: %v", name, err)
}

return false, nil
}

Expand Down
2 changes: 1 addition & 1 deletion e2e/rbd_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (

"github.com/ceph/ceph-csi/internal/util"

snapapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
v1 "k8s.io/api/core/v1"
scv1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down
4 changes: 2 additions & 2 deletions e2e/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ import (
"fmt"
"time"

snapapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/typed/volumesnapshot/v1"
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
snapclient "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/typed/volumesnapshot/v1"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
Expand Down
45 changes: 34 additions & 11 deletions e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import (
"sync"
"time"

snapapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
appsv1 "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
Expand All @@ -52,8 +52,9 @@ const (
rbdType = "rbd"
cephfsType = "cephfs"

volumesType = "volumes"
snapsType = "snaps"
volumesType = "volumes"
snapsType = "snaps"
groupSnapsType = "groupsnaps"

rookToolBoxPodLabel = "app=rook-ceph-tools"
rbdMountOptions = "mountOptions"
Expand Down Expand Up @@ -174,17 +175,20 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
volumeMode: volumesType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.",
radosLsCmdFilter: fmt.Sprintf(
"rados ls --pool=%s --namespace csi | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi|wc -l",
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi | wc -l",
pool),
},
{
volumeMode: volumesType,
driverType: rbdType,
radosLsCmd: "rados ls " + rbdOptions(pool),
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.", rbdOptions(pool)),
volumeMode: volumesType,
driverType: rbdType,
radosLsCmd: "rados ls " + rbdOptions(pool),
radosLsCmdFilter: fmt.Sprintf(
"rados ls %s | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
rbdOptions(pool)),
radosLsKeysCmd: "rados listomapkeys csi.volumes.default " + rbdOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)),
},
Expand All @@ -195,7 +199,7 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.snap.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi|wc -l",
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi | wc -l",
pool),
},
{
Expand All @@ -206,6 +210,16 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
radosLsKeysCmd: "rados listomapkeys csi.snaps.default " + rbdOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)),
},
{
volumeMode: groupSnapsType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.group.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi | wc -l",
pool),
},
}

for _, cmds := range radosListCommands {
Expand All @@ -228,7 +242,7 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
if err == nil {
continue
}
saveErr := err
saveErr := fmt.Errorf("failed to validate omap count for %s: %w", cmd, err)
if strings.Contains(err.Error(), "expected omap object count") {
stdOut, stdErr, err = execCommandInToolBoxPod(f, filterLessCmds[i], rookNamespace)
if err == nil {
Expand Down Expand Up @@ -823,6 +837,15 @@ func oneReplicaDeployYaml(template string) string {
return re.ReplaceAllString(template, `$1 1`)
}

// replaceLogLevelInTemplate replaces the log level in the template file to 5.
func replaceLogLevelInTemplate(template string) string {
// Regular expression to find --v=<number> arguments
re := regexp.MustCompile(`--v=\d+`)

// template can contain different log levels, replace it with --v=5
return re.ReplaceAllString(template, "--v=5")
}

func enableReadAffinityInTemplate(template string) string {
return strings.ReplaceAll(template, "# - \"--enable-read-affinity=true\"", "- \"--enable-read-affinity=true\"")
}
Expand Down
119 changes: 119 additions & 0 deletions e2e/volumegroupsnapshot.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
/*
Copyright 2024 The Ceph-CSI Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"

groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
)

type cephFSVolumeGroupSnapshot struct {
*volumeGroupSnapshotterBase
}

var _ VolumeGroupSnapshotter = &cephFSVolumeGroupSnapshot{}

func newCephFSVolumeGroupSnapshot(f *framework.Framework, namespace,
storageClass string,
blockPVC bool,
timeout, totalPVCCount int,
) (VolumeGroupSnapshotter, error) {
base, err := newVolumeGroupSnapshotBase(f, namespace, storageClass, blockPVC, timeout, totalPVCCount)
if err != nil {
return nil, fmt.Errorf("failed to create volumeGroupSnapshotterBase: %w", err)
}

return &cephFSVolumeGroupSnapshot{
volumeGroupSnapshotterBase: base,
}, nil
}

func (c *cephFSVolumeGroupSnapshot) TestVolumeGroupSnapshot() error {
return c.volumeGroupSnapshotterBase.testVolumeGroupSnapshot(c)
}

func (c *cephFSVolumeGroupSnapshot) GetVolumeGroupSnapshotClass() (*groupsnapapi.VolumeGroupSnapshotClass, error) {
vgscPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "groupsnapshotclass.yaml")
vgsc := &groupsnapapi.VolumeGroupSnapshotClass{}
err := unmarshal(vgscPath, vgsc)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal VolumeGroupSnapshotClass: %w", err)
}

vgsc.Parameters["csi.storage.k8s.io/group-snapshotter-secret-namespace"] = cephCSINamespace
vgsc.Parameters["csi.storage.k8s.io/group-snapshotter-secret-name"] = cephFSProvisionerSecretName
vgsc.Parameters["fsName"] = fileSystemName

fsID, err := getClusterID(c.framework)
if err != nil {
return nil, fmt.Errorf("failed to get clusterID: %w", err)
}
vgsc.Parameters["clusterID"] = fsID

return vgsc, nil
}

func (c *cephFSVolumeGroupSnapshot) ValidateResourcesForCreate(vgs *groupsnapapi.VolumeGroupSnapshot) error {
ctx := context.TODO()
metadataPool, err := getCephFSMetadataPoolName(c.framework, fileSystemName)
if err != nil {
return fmt.Errorf("failed getting cephFS metadata pool name: %w", err)
}

sourcePVCCount := len(vgs.Status.PVCVolumeSnapshotRefList)
// we are creating clones for each source PVC
clonePVCCount := len(vgs.Status.PVCVolumeSnapshotRefList)
totalPVCCount := sourcePVCCount + clonePVCCount
validateSubvolumeCount(c.framework, totalPVCCount, fileSystemName, subvolumegroup)

// we are creating 1 snapshot for each source PVC, validate the snapshot count
for _, pvcSnap := range vgs.Status.PVCVolumeSnapshotRefList {
pvc, err := c.framework.ClientSet.CoreV1().PersistentVolumeClaims(vgs.Namespace).Get(ctx,
pvcSnap.PersistentVolumeClaimRef.Name,
metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get PVC: %w", err)
}
pv := pvc.Spec.VolumeName
pvObj, err := c.framework.ClientSet.CoreV1().PersistentVolumes().Get(ctx, pv, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get PV: %w", err)
}
validateCephFSSnapshotCount(c.framework, 1, subvolumegroup, pvObj)
}
validateOmapCount(c.framework, totalPVCCount, cephfsType, metadataPool, volumesType)
validateOmapCount(c.framework, sourcePVCCount, cephfsType, metadataPool, snapsType)
validateOmapCount(c.framework, 1, cephfsType, metadataPool, groupSnapsType)

return nil
}

func (c *cephFSVolumeGroupSnapshot) ValidateResourcesForDelete() error {
metadataPool, err := getCephFSMetadataPoolName(c.framework, fileSystemName)
if err != nil {
return fmt.Errorf("failed getting cephFS metadata pool name: %w", err)
}
validateOmapCount(c.framework, 0, cephfsType, metadataPool, volumesType)
validateOmapCount(c.framework, 0, cephfsType, metadataPool, snapsType)
validateOmapCount(c.framework, 0, cephfsType, metadataPool, groupSnapsType)

return nil
}
Loading