diff --git a/Makefile b/Makefile index 57024dc20f9..7a4067fbf2f 100644 --- a/Makefile +++ b/Makefile @@ -32,6 +32,11 @@ ifeq ($(HAVE_CPUSET),1) CPUSET ?= --cpuset-cpus=0-${CPUS} endif +ifneq ($(GITHUB_ACTION),) + # see https://github.com/containers/podman/issues/21012 + SECURITY_OPT ?= --security-opt seccomp=unconfined +endif + CSI_IMAGE_NAME=$(if $(ENV_CSI_IMAGE_NAME),$(ENV_CSI_IMAGE_NAME),quay.io/cephcsi/cephcsi) CSI_IMAGE_VERSION=$(shell . $(CURDIR)/build.env ; echo $${CSI_IMAGE_VERSION}) CSI_IMAGE=$(CSI_IMAGE_NAME):$(CSI_IMAGE_VERSION) @@ -229,7 +234,7 @@ ifeq ($(USE_PULLED_IMAGE),no) .test-container-id: .container-cmd build.env scripts/Dockerfile.test [ ! -f .test-container-id ] || $(CONTAINER_CMD) rmi $(CSI_IMAGE_NAME):test $(RM) .test-container-id - $(CONTAINER_CMD) build $(CPUSET) --build-arg GOARCH=$(GOARCH) -t $(CSI_IMAGE_NAME):test -f ./scripts/Dockerfile.test . + $(CONTAINER_CMD) build $(CPUSET) $(SECURITY_OPT) --build-arg GOARCH=$(GOARCH) -t $(CSI_IMAGE_NAME):test -f ./scripts/Dockerfile.test . $(CONTAINER_CMD) inspect -f '{{.Id}}' $(CSI_IMAGE_NAME):test > .test-container-id else # create the .test-container-id file based on the pulled image diff --git a/charts/ceph-csi-cephfs/README.md b/charts/ceph-csi-cephfs/README.md index 703b923ff75..1769e3f28bc 100644 --- a/charts/ceph-csi-cephfs/README.md +++ b/charts/ceph-csi-cephfs/README.md @@ -135,7 +135,6 @@ charts and their default values. | `nodeplugin.forcecephkernelclient` | Set to true to enable Ceph Kernel clients on kernel < 4.17 which support quotas | `true` | | `nodeplugin.kernelmountoptions` | Comma separated string of mount options accepted by cephfs kernel mounter quotas | `""` | | `nodeplugin.fusemountoptions` | Comma separated string of mount options accepted by ceph-fuse mounter quotas | `""` | -| `nodeplugin.podSecurityPolicy.enabled` | If true, create & use [Pod Security Policy resources](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). | `false` | | `provisioner.name` | Specifies the name of provisioner | `provisioner` | | `provisioner.replicaCount` | Specifies the replicaCount | `3` | | `provisioner.timeout` | GRPC timeout for waiting for creation or deletion of a volume | `60s` | @@ -163,7 +162,6 @@ charts and their default values. | `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` | | `provisioner.tolerations` | Specifies the tolerations for provisioner deployment | `{}` | | `provisioner.affinity` | Specifies the affinity for provisioner deployment | `{}` | -| `provisioner.podSecurityPolicy.enabled` | Specifies whether podSecurityPolicy is enabled | `false` | | `provisioner.podSecurityContext` | Specifies pod-level security context. | `{}` | | `provisionerSocketFile` | The filename of the provisioner socket | `csi-provisioner.sock` | | `pluginSocketFile` | The filename of the plugin socket | `csi.sock` | @@ -210,5 +208,5 @@ Specify each parameter using the --set key=value argument to helm install. For Example: ```bash -helm install --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true +helm install --set configMapName=ceph-csi-config ``` diff --git a/charts/ceph-csi-rbd/README.md b/charts/ceph-csi-rbd/README.md index e18ee51b8a7..c516d410664 100644 --- a/charts/ceph-csi-rbd/README.md +++ b/charts/ceph-csi-rbd/README.md @@ -134,7 +134,6 @@ charts and their default values. | `nodeplugin.podSecurityContext` | Specifies pod-level security context. | `{}` | | `nodeplugin.nodeSelector` | Kubernetes `nodeSelector` to add to the Daemonset | `{}` | | `nodeplugin.tolerations` | List of Kubernetes `tolerations` to add to the Daemonset | `{}` | -| `nodeplugin.podSecurityPolicy.enabled` | If true, create & use [Pod Security Policy resources](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). | `false` | | `provisioner.name` | Specifies the name of provisioner | `provisioner` | | `provisioner.replicaCount` | Specifies the replicaCount | `3` | | `provisioner.defaultFSType` | Specifies the default Fstype | `ext4` | @@ -176,7 +175,6 @@ charts and their default values. | `provisioner.nodeSelector` | Specifies the node selector for provisioner deployment | `{}` | | `provisioner.tolerations` | Specifies the tolerations for provisioner deployment | `{}` | | `provisioner.affinity` | Specifies the affinity for provisioner deployment | `{}` | -| `provisioner.podSecurityPolicy.enabled` | Specifies whether podSecurityPolicy is enabled | `false` | | `topology.enabled` | Specifies whether topology based provisioning support should be exposed by CSI | `false` | | `topology.domainLabels` | DomainLabels define which node labels to use as domains for CSI nodeplugins to advertise their domains | `{}` | | `readAffinity.enabled` | Enable read affinity for RBD volumes. Recommended to set to true if running kernel 5.8 or newer. | `false` | @@ -238,5 +236,5 @@ Specify each parameter using the --set key=value argument to helm install. For Example: ```bash -helm install --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true +helm install --set configMapName=ceph-csi-config ``` diff --git a/cmd/cephcsi.go b/cmd/cephcsi.go index 9ef607be463..a43ac276362 100644 --- a/cmd/cephcsi.go +++ b/cmd/cephcsi.go @@ -70,8 +70,8 @@ func init() { flag.StringVar(&conf.StagingPath, "stagingpath", defaultStagingPath, "staging path") flag.StringVar(&conf.ClusterName, "clustername", "", "name of the cluster") flag.BoolVar(&conf.SetMetadata, "setmetadata", false, "set metadata on the volume") - flag.StringVar(&conf.InstanceID, "instanceid", "", "Unique ID distinguishing this instance of Ceph CSI among other"+ - " instances, when sharing Ceph clusters across CSI instances for provisioning") + flag.StringVar(&conf.InstanceID, "instanceid", "default", "Unique ID distinguishing this instance of Ceph-CSI"+ + " among other instances, when sharing Ceph clusters across CSI instances for provisioning") flag.IntVar(&conf.PidLimit, "pidlimit", 0, "the PID limit to configure through cgroups") flag.BoolVar(&conf.IsControllerServer, "controllerserver", false, "start cephcsi controller server") flag.BoolVar(&conf.IsNodeServer, "nodeserver", false, "start cephcsi node server") @@ -249,6 +249,7 @@ func main() { DriverName: dname, Namespace: conf.DriverNamespace, ClusterName: conf.ClusterName, + InstanceID: conf.InstanceID, SetMetadata: conf.SetMetadata, } // initialize all controllers before starting. diff --git a/internal/cephfs/driver.go b/internal/cephfs/driver.go index 162b027ee2f..97509956d77 100644 --- a/internal/cephfs/driver.go +++ b/internal/cephfs/driver.go @@ -45,10 +45,6 @@ type Driver struct { cas *csiaddons.CSIAddonsServer } -// CSIInstanceID is the instance ID that is unique to an instance of CSI, used when sharing -// ceph clusters across CSI instances, to differentiate omap names per CSI instance. -var CSIInstanceID = "default" - // NewDriver returns new ceph driver. func NewDriver() *Driver { return &Driver{} @@ -105,11 +101,6 @@ func (fs *Driver) Run(conf *util.Config) { log.FatalLogMsg("cephfs: failed to load ceph mounters: %v", err) } - // Use passed in instance ID, if provided for omap suffix naming - if conf.InstanceID != "" { - CSIInstanceID = conf.InstanceID - } - // Use passed in radosNamespace, if provided for storing CSI specific objects and keys. if conf.RadosNamespaceCephFS != "" { fsutil.RadosNamespace = conf.RadosNamespaceCephFS @@ -127,16 +118,16 @@ func (fs *Driver) Run(conf *util.Config) { } // Create an instance of the volume journal - store.VolJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace) + store.VolJournal = journal.NewCSIVolumeJournalWithNamespace(conf.InstanceID, fsutil.RadosNamespace) - store.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace) + store.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(conf.InstanceID, fsutil.RadosNamespace) store.VolumeGroupJournal = journal.NewCSIVolumeGroupJournalWithNamespace( - CSIInstanceID, + conf.InstanceID, fsutil.RadosNamespace) // Initialize default library driver - fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID) + fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID, conf.InstanceID) if fs.cd == nil { log.FatalLogMsg("failed to initialize CSI driver") } diff --git a/internal/cephfs/groupcontrollerserver_test.go b/internal/cephfs/groupcontrollerserver_test.go index 6aacc223bb7..a505064c7f1 100644 --- a/internal/cephfs/groupcontrollerserver_test.go +++ b/internal/cephfs/groupcontrollerserver_test.go @@ -31,7 +31,7 @@ func TestControllerServer_validateCreateVolumeGroupSnapshotRequest(t *testing.T) t.Parallel() cs := ControllerServer{ DefaultControllerServer: csicommon.NewDefaultControllerServer( - csicommon.NewCSIDriver("cephfs.csi.ceph.com", "1.0.0", "test")), + csicommon.NewCSIDriver("cephfs.csi.ceph.com", "1.0.0", "test", "default")), } type args struct { diff --git a/internal/controller/controller.go b/internal/controller/controller.go index c1707f14c85..37066045e91 100644 --- a/internal/controller/controller.go +++ b/internal/controller/controller.go @@ -39,6 +39,7 @@ type Config struct { DriverName string Namespace string ClusterName string + InstanceID string SetMetadata bool } diff --git a/internal/controller/persistentvolume/persistentvolume.go b/internal/controller/persistentvolume/persistentvolume.go index 433bf616b2f..07ff02d8f11 100644 --- a/internal/controller/persistentvolume/persistentvolume.go +++ b/internal/controller/persistentvolume/persistentvolume.go @@ -189,6 +189,7 @@ func (r *ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime requestName, pvcNamespace, r.config.ClusterName, + r.config.InstanceID, r.config.SetMetadata, cr) if err != nil { diff --git a/internal/csi-common/driver.go b/internal/csi-common/driver.go index 1100f05a3af..bfefba1015d 100644 --- a/internal/csi-common/driver.go +++ b/internal/csi-common/driver.go @@ -30,6 +30,11 @@ type CSIDriver struct { name string nodeID string version string + + // instance is the instance ID that is unique to an instance of CSI, used when sharing + // ceph clusters across CSI instances, to differentiate omap names per CSI instance. + instance string + // topology constraints that this nodeserver will advertise topology map[string]string capabilities []*csi.ControllerServiceCapability @@ -40,7 +45,7 @@ type CSIDriver struct { // NewCSIDriver Creates a NewCSIDriver object. Assumes vendor // version is equal to driver version & does not support optional // driver plugin info manifest field. Refer to CSI spec for more details. -func NewCSIDriver(name, v, nodeID string) *CSIDriver { +func NewCSIDriver(name, v, nodeID, instance string) *CSIDriver { if name == "" { klog.Errorf("Driver name missing") @@ -59,15 +64,27 @@ func NewCSIDriver(name, v, nodeID string) *CSIDriver { return nil } + if instance == "" { + klog.Errorf("Instance argument missing") + + return nil + } + driver := CSIDriver{ - name: name, - version: v, - nodeID: nodeID, + name: name, + version: v, + nodeID: nodeID, + instance: instance, } return &driver } +// GetInstance returns the instance identification of the CSI driver. +func (d *CSIDriver) GetInstanceID() string { + return d.instance +} + // ValidateControllerServiceRequest validates the controller // plugin capabilities. func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error { diff --git a/internal/nfs/controller/controllerserver.go b/internal/nfs/controller/controllerserver.go index 15c610131a7..125330284d2 100644 --- a/internal/nfs/controller/controllerserver.go +++ b/internal/nfs/controller/controllerserver.go @@ -45,8 +45,8 @@ type Server struct { // NewControllerServer initialize a controller server for ceph CSI driver. func NewControllerServer(d *csicommon.CSIDriver) *Server { // global instance of the volume journal, yuck - store.VolJournal = journal.NewCSIVolumeJournalWithNamespace(cephfs.CSIInstanceID, fsutil.RadosNamespace) - store.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(cephfs.CSIInstanceID, fsutil.RadosNamespace) + store.VolJournal = journal.NewCSIVolumeJournalWithNamespace(d.GetInstanceID(), fsutil.RadosNamespace) + store.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(d.GetInstanceID(), fsutil.RadosNamespace) return &Server{ backendServer: cephfs.NewControllerServer(d), diff --git a/internal/nfs/driver/driver.go b/internal/nfs/driver/driver.go index ecefa55a946..51eefc568e4 100644 --- a/internal/nfs/driver/driver.go +++ b/internal/nfs/driver/driver.go @@ -39,7 +39,7 @@ func NewDriver() *Driver { // ceph CSI driver which can serve multiple parallel requests. func (fs *Driver) Run(conf *util.Config) { // Initialize default library driver - cd := csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID) + cd := csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID, conf.InstanceID) if cd == nil { log.FatalLogMsg("failed to initialize CSI driver") } diff --git a/internal/rbd/driver/driver.go b/internal/rbd/driver/driver.go index c1db88fa908..cebfc53cf6b 100644 --- a/internal/rbd/driver/driver.go +++ b/internal/rbd/driver/driver.go @@ -100,7 +100,7 @@ func (r *Driver) Run(conf *util.Config) { rbd.InitJournals(conf.InstanceID) // Initialize default library driver - r.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID) + r.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID, conf.InstanceID) if r.cd == nil { log.FatalLogMsg("Failed to initialize CSI Driver.") } @@ -217,7 +217,7 @@ func (r *Driver) setupCSIAddonsServer(conf *util.Config) error { fcs := casrbd.NewFenceControllerServer() r.cas.RegisterService(fcs) - rcs := casrbd.NewReplicationServer(rbd.CSIInstanceID, NewControllerServer(r.cd)) + rcs := casrbd.NewReplicationServer(conf.InstanceID, NewControllerServer(r.cd)) r.cas.RegisterService(rcs) vgcs := casrbd.NewVolumeGroupServer(conf.InstanceID) diff --git a/internal/rbd/globals.go b/internal/rbd/globals.go index b1a6ef3dced..c16a11d3e9c 100644 --- a/internal/rbd/globals.go +++ b/internal/rbd/globals.go @@ -23,10 +23,6 @@ import ( ) var ( - // CSIInstanceID is the instance ID that is unique to an instance of CSI, used when sharing - // ceph clusters across CSI instances, to differentiate omap names per CSI instance. - CSIInstanceID = "default" - // volJournal and snapJournal are used to maintain RADOS based journals for CO generated // VolumeName to backing RBD images. volJournal *journal.Config @@ -91,11 +87,6 @@ func SetGlobalBool(name string, value bool) { // NodeService where appropriate. Using global journals limits the ability to // configure these options based on the Ceph cluster or StorageClass. func InitJournals(instance string) { - // Use passed in instance ID, if provided for omap suffix naming - if instance != "" { - CSIInstanceID = instance - } - - volJournal = journal.NewCSIVolumeJournal(CSIInstanceID) - snapJournal = journal.NewCSISnapshotJournal(CSIInstanceID) + volJournal = journal.NewCSIVolumeJournal(instance) + snapJournal = journal.NewCSISnapshotJournal(instance) } diff --git a/internal/rbd/rbd_journal.go b/internal/rbd/rbd_journal.go index 78f5d5fdde6..44c951c5338 100644 --- a/internal/rbd/rbd_journal.go +++ b/internal/rbd/rbd_journal.go @@ -547,7 +547,8 @@ func RegenerateJournal( volumeID, requestName, owner, - clusterName string, + clusterName, + instanceID string, setMetadata bool, cr *util.Credentials, ) (string, error) { @@ -595,7 +596,7 @@ func RegenerateJournal( if rbdVol.JournalPool == "" { rbdVol.JournalPool = rbdVol.Pool } - volJournal = journal.NewCSIVolumeJournal(CSIInstanceID) + volJournal = journal.NewCSIVolumeJournal(instanceID) j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr) if err != nil { return "", err diff --git a/scripts/Dockerfile.test b/scripts/Dockerfile.test index 9af0888a6a0..a05d7737e08 100644 --- a/scripts/Dockerfile.test +++ b/scripts/Dockerfile.test @@ -8,7 +8,7 @@ # little different. # -FROM registry.fedoraproject.org/fedora:39 +FROM registry.fedoraproject.org/fedora:latest ARG GOPATH=/go ARG GOROOT=/usr/local/go