Skip to content

Commit

Permalink
Merge pull request #211 from red-hat-storage/sync_us--devel
Browse files Browse the repository at this point in the history
Syncing latest changes from devel for ceph-csi
  • Loading branch information
openshift-merge-bot[bot] authored Nov 9, 2023
2 parents 8c428f5 + c4e373c commit 9f9c3fe
Show file tree
Hide file tree
Showing 28 changed files with 599 additions and 146 deletions.
18 changes: 18 additions & 0 deletions .github/workflows/tickgit.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
name: List TODO's
# yamllint disable-line rule:truthy
on:
push:
branches:
- devel

permissions:
contents: read

jobs:
tickgit:
name: tickgit
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: make containerized-test TARGET=tickgit
4 changes: 4 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,10 @@ check-env:

codespell:
codespell --config scripts/codespell.conf

tickgit:
tickgit $(CURDIR)

#
# commitlint will do a rebase on top of GIT_SINCE when REBASE=1 is passed.
#
Expand Down
5 changes: 5 additions & 0 deletions PendingReleaseNotes.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,8 @@
- Removed the deprecated grpc metrics flag's in [PR](https://github.com/ceph/ceph-csi/pull/4225)

## Features

RBD

- Support for configuring read affinity for individuals cluster within the ceph-csi-config
ConfigMap in [PR](https://github.com/ceph/ceph-csi/pull/4165)
2 changes: 1 addition & 1 deletion api/deploy/ocp/scc.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ type SecurityContextConstraintsValues struct {
}

// SecurityContextConstraintsDefaults can be used for generating deployment
// artifacts with defails values.
// artifacts with details values.
var SecurityContextConstraintsDefaults = SecurityContextConstraintsValues{
Namespace: "ceph-csi",
Deployer: "",
Expand Down
5 changes: 5 additions & 0 deletions charts/ceph-csi-rbd/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@ serviceAccounts:
# - "<MONValue2>"
# rbd:
# netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net"
# readAffinity:
# enabled: true
# crushLocationLabels:
# - topology.kubernetes.io/region
# - topology.kubernetes.io/zone
csiConfig: []

# Configuration details of clusterID,PoolID and FscID mapping
Expand Down
13 changes: 13 additions & 0 deletions deploy/csi-config-map-sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ kind: ConfigMap
# path for the Ceph cluster identified by the <cluster-id>, This will be used
# by the RBD CSI plugin to execute the rbd map/unmap in the
# network namespace specified by the "rbd.netNamespaceFilePath".
# The "readAffinity" fields are used to enable read affinity and pass the crush
# location map for the Ceph cluster identified by the cluster <cluster-id>,
# enabling this will add
# "read_from_replica=localize,crush_location=<label:value>" to the map option.
# If a CSI plugin is using more than one Ceph cluster, repeat the section for
# each such cluster in use.
# NOTE: Changes to the configmap is automatically updated in the running pods,
Expand Down Expand Up @@ -66,6 +70,15 @@ data:
}
"nfs": {
"netNamespaceFilePath": "<kubeletRootPath>/plugins/nfs.csi.ceph.com/net",
},
"readAffinity": {
"enabled": "false",
"crushLocationLabels": [
"<Label1>",
"<Label2>"
...
"<Label3>"
]
}
}
]
Expand Down
8 changes: 7 additions & 1 deletion docs/deploy-rbd.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ make image-cephcsi
| `--maxsnapshotsonimage` | `450` | Maximum number of snapshots allowed on rbd image without flattening |
| `--setmetadata` | `false` | Set metadata on volume |
| `--enable-read-affinity` | `false` | enable read affinity |
| `--crush-location-labels`| _empty_ | Kubernetes node labels that determine the CRUSH location the node belongs to, separated by ',' |
| `--crush-location-labels`| _empty_ | Kubernetes node labels that determine the CRUSH location the node belongs to, separated by ','.<br>`Note: These labels will be replaced if crush location labels are defined in the ceph-csi-config ConfigMap for the specific cluster.` |

**Available volume parameters:**

Expand Down Expand Up @@ -222,6 +222,12 @@ If enabled, this option will be added to all RBD volumes mapped by Ceph CSI.
Well known labels can be found
[here](https://kubernetes.io/docs/reference/labels-annotations-taints/).

Read affinity can be configured for individual clusters within the
`ceph-csi-config` ConfigMap. This allows configuring the crush location labels
for each ceph cluster separately. The crush location labels specified in the
ConfigMap will supersede those provided via command line argument
`--crush-location-labels`.

>Note: Label values will have all its dots `"."` normalized with dashes `"-"`
in order for it to work with ceph CRUSH map.

Expand Down
10 changes: 10 additions & 0 deletions e2e/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,16 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
}{
RadosNamespace: radosNamespace,
},
ReadAffinity: struct {
Enabled bool `json:"enabled"`
CrushLocationLabels []string `json:"crushLocationLabels"`
}{
Enabled: true,
CrushLocationLabels: []string{
crushLocationRegionLabel,
crushLocationZoneLabel,
},
},
}}
if upgradeTesting {
subvolumegroup = "csi"
Expand Down
2 changes: 1 addition & 1 deletion e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -1226,7 +1226,7 @@ func validatePVCSnapshot(
checkSumClone, chErrs[n] = calculateSHA512sum(f, &a, filePath, &opt)
framework.Logf("checksum value for the clone is %s with pod name %s", checkSumClone, name)
if chErrs[n] != nil {
framework.Logf("failed to calculte checksum for clone: %s", chErrs[n])
framework.Logf("failed to calculate checksum for clone: %s", chErrs[n])
}
if checkSumClone != checkSum {
framework.Logf(
Expand Down
29 changes: 15 additions & 14 deletions internal/cephfs/core/clone.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,10 @@ func (s *subVolumeClient) CreateCloneFromSubvolume(
return err
}

// if cloneErr is not nil we will delete the snapshot
var cloneErr error

defer func() {
if cloneErr != nil {
// if any error occurs while cloning, resizing or deleting the snapshot
// fails then we need to delete the clone and snapshot.
if err != nil && !cerrors.IsCloneRetryError(err) {
if err = s.PurgeVolume(ctx, true); err != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", s.VolID, err)
}
Expand All @@ -81,18 +80,19 @@ func (s *subVolumeClient) CreateCloneFromSubvolume(
}
}
}()
cloneErr = snapClient.CloneSnapshot(ctx, s.SubVolume)
if cloneErr != nil {
log.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", parentvolOpt.VolID, snapshotID, s.VolID, cloneErr)
err = snapClient.CloneSnapshot(ctx, s.SubVolume)
if err != nil {
log.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", parentvolOpt.VolID, snapshotID, s.VolID, err)

return cloneErr
return err
}

cloneState, cloneErr := s.GetCloneState(ctx)
if cloneErr != nil {
log.ErrorLog(ctx, "failed to get clone state: %v", cloneErr)
var cloneState cephFSCloneState
cloneState, err = s.GetCloneState(ctx)
if err != nil {
log.ErrorLog(ctx, "failed to get clone state: %v", err)

return cloneErr
return err
}

err = cloneState.ToError()
Expand Down Expand Up @@ -157,8 +157,9 @@ func (s *subVolumeClient) CreateCloneFromSnapshot(
}
}
}()

cloneState, err := s.GetCloneState(ctx)
var cloneState cephFSCloneState
// avoid err variable shadowing
cloneState, err = s.GetCloneState(ctx)
if err != nil {
log.ErrorLog(ctx, "failed to get clone state: %v", err)

Expand Down
2 changes: 1 addition & 1 deletion internal/cephfs/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ func (fs *Driver) Run(conf *util.Config) {
fs.cs = NewControllerServer(fs.cd)
}

// configre CSI-Addons server and components
// configure CSI-Addons server and components
err = fs.setupCSIAddonsServer(conf)
if err != nil {
log.FatalLogMsg(err.Error())
Expand Down
12 changes: 6 additions & 6 deletions internal/cephfs/fuserecovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ func (ns *NodeServer) getMountState(path string) (mountState, error) {
return msNotMounted, nil
}

func findMountinfo(mountpoint string, mis []mountutil.MountInfo) int {
for i := range mis {
if mis[i].MountPoint == mountpoint {
func findMountinfo(mountpoint string, minfo []mountutil.MountInfo) int {
for i := range minfo {
if minfo[i].MountPoint == mountpoint {
return i
}
}
Expand All @@ -80,9 +80,9 @@ func findMountinfo(mountpoint string, mis []mountutil.MountInfo) int {

// Ensures that given mountpoint is of specified fstype.
// Returns true if fstype matches, or if no such mountpoint exists.
func validateFsType(mountpoint, fsType string, mis []mountutil.MountInfo) bool {
if idx := findMountinfo(mountpoint, mis); idx > 0 {
mi := mis[idx]
func validateFsType(mountpoint, fsType string, minfo []mountutil.MountInfo) bool {
if idx := findMountinfo(mountpoint, minfo); idx > 0 {
mi := minfo[idx]

if mi.FsType != fsType {
return false
Expand Down
9 changes: 6 additions & 3 deletions internal/csi-addons/networkfence/fencing.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,9 +212,12 @@ func (ac *activeClient) fetchIP() (string, error) {
clientInfo := ac.Inst
parts := strings.Fields(clientInfo)
if len(parts) >= 2 {
ip := strings.Split(parts[1], ":")[0]

return ip, nil
lastColonIndex := strings.LastIndex(parts[1], ":")
firstPart := parts[1][:lastColonIndex]
ip := net.ParseIP(firstPart)
if ip != nil {
return ip.String(), nil
}
}

return "", fmt.Errorf("failed to extract IP address, incorrect format: %s", clientInfo)
Expand Down
5 changes: 5 additions & 0 deletions internal/csi-addons/networkfence/fencing_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,11 @@ func TestFetchIP(t *testing.T) {
expectedIP: "172.21.9.34",
expectedErr: false,
},
{
clientInfo: "client.4305 2001:0db8:85a3:0000:0000:8a2e:0370:7334:0/422650892",
expectedIP: "2001:db8:85a3::8a2e:370:7334",
expectedErr: false,
},
{
clientInfo: "",
expectedIP: "",
Expand Down
27 changes: 16 additions & 11 deletions internal/rbd/driver/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/rbd"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/k8s"
"github.com/ceph/ceph-csi/internal/util/log"

"github.com/container-storage-interface/spec/lib/go/csi"
Expand Down Expand Up @@ -68,14 +69,14 @@ func NewControllerServer(d *csicommon.CSIDriver) *rbd.ControllerServer {
func NewNodeServer(
d *csicommon.CSIDriver,
t string,
topology map[string]string,
crushLocationMap map[string]string,
nodeLabels, topology, crushLocationMap map[string]string,
) (*rbd.NodeServer, error) {
ns := rbd.NodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
VolumeLocks: util.NewVolumeLocks(),
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
VolumeLocks: util.NewVolumeLocks(),
NodeLabels: nodeLabels,
CLIReadAffinityMapOptions: util.ConstructReadAffinityMapOption(crushLocationMap),
}
ns.SetReadAffinityMapOptions(crushLocationMap)

return &ns, nil
}
Expand All @@ -87,8 +88,8 @@ func NewNodeServer(
// setupCSIAddonsServer().
func (r *Driver) Run(conf *util.Config) {
var (
err error
topology, crushLocationMap map[string]string
err error
nodeLabels, topology, crushLocationMap map[string]string
)
// update clone soft and hard limit
rbd.SetGlobalInt("rbdHardMaxCloneDepth", conf.RbdHardMaxCloneDepth)
Expand Down Expand Up @@ -125,13 +126,17 @@ func (r *Driver) Run(conf *util.Config) {
})
}

if conf.EnableReadAffinity {
crushLocationMap, err = util.GetCrushLocationMap(conf.CrushLocationLabels, conf.NodeID)
if k8s.RunsOnKubernetes() {
nodeLabels, err = k8s.GetNodeLabels(conf.NodeID)
if err != nil {
log.FatalLogMsg(err.Error())
}
}

if conf.EnableReadAffinity {
crushLocationMap = util.GetCrushLocationMap(conf.CrushLocationLabels, nodeLabels)
}

// Create GRPC servers
r.ids = NewIdentityServer(r.cd)

Expand All @@ -140,7 +145,7 @@ func (r *Driver) Run(conf *util.Config) {
if err != nil {
log.FatalLogMsg(err.Error())
}
r.ns, err = NewNodeServer(r.cd, conf.Vtype, topology, crushLocationMap)
r.ns, err = NewNodeServer(r.cd, conf.Vtype, nodeLabels, topology, crushLocationMap)
if err != nil {
log.FatalLogMsg("failed to start node server, err %v\n", err)
}
Expand All @@ -165,7 +170,7 @@ func (r *Driver) Run(conf *util.Config) {
r.cs.SetMetadata = conf.SetMetadata
}

// configre CSI-Addons server and components
// configure CSI-Addons server and components
err = r.setupCSIAddonsServer(conf)
if err != nil {
log.FatalLogMsg(err.Error())
Expand Down
36 changes: 9 additions & 27 deletions internal/rbd/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,10 @@ type NodeServer struct {
// A map storing all volumes with ongoing operations so that additional operations
// for that same volume (as defined by VolumeID) return an Aborted error
VolumeLocks *util.VolumeLocks
// readAffinityMapOptions contains map options to enable read affinity.
readAffinityMapOptions string
// NodeLabels stores the node labels
NodeLabels map[string]string
// CLIReadAffinityMapOptions contains map options passed through command line to enable read affinity.
CLIReadAffinityMapOptions string
}

// stageTransaction struct represents the state a transaction was when it either completed
Expand Down Expand Up @@ -258,11 +260,10 @@ func (ns *NodeServer) populateRbdVol(
rv.Mounter = rbdNbdMounter
}

err = getMapOptions(req, rv)
err = ns.getMapOptions(req, rv)
if err != nil {
return nil, err
}
ns.appendReadAffinityMapOptions(rv)

rv.VolID = volID

Expand All @@ -280,14 +281,14 @@ func (ns *NodeServer) populateRbdVol(

// appendReadAffinityMapOptions appends readAffinityMapOptions to mapOptions
// if mounter is rbdDefaultMounter and readAffinityMapOptions is not empty.
func (ns NodeServer) appendReadAffinityMapOptions(rv *rbdVolume) {
func (rv *rbdVolume) appendReadAffinityMapOptions(readAffinityMapOptions string) {
switch {
case ns.readAffinityMapOptions == "" || rv.Mounter != rbdDefaultMounter:
case readAffinityMapOptions == "" || rv.Mounter != rbdDefaultMounter:
return
case rv.MapOptions != "":
rv.MapOptions += "," + ns.readAffinityMapOptions
rv.MapOptions += "," + readAffinityMapOptions
default:
rv.MapOptions = ns.readAffinityMapOptions
rv.MapOptions = readAffinityMapOptions
}
}

Expand Down Expand Up @@ -1378,22 +1379,3 @@ func getDeviceSize(ctx context.Context, devicePath string) (uint64, error) {

return size, nil
}

func (ns *NodeServer) SetReadAffinityMapOptions(crushLocationMap map[string]string) {
if len(crushLocationMap) == 0 {
return
}

var b strings.Builder
b.WriteString("read_from_replica=localize,crush_location=")
first := true
for key, val := range crushLocationMap {
if first {
b.WriteString(fmt.Sprintf("%s:%s", key, val))
first = false
} else {
b.WriteString(fmt.Sprintf("|%s:%s", key, val))
}
}
ns.readAffinityMapOptions = b.String()
}
Loading

0 comments on commit 9f9c3fe

Please sign in to comment.