diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index ed9a6371..966c9f16 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -20,31 +20,23 @@ jobs: packages: write steps: - name: Check out repository - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 - name: Install Go - uses: actions/setup-go@84cbf8094393cdc5fe1fe1671ff2647332956b1a + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 with: - go-version: "1.18" - - - name: Pseudo version - id: pseudo-version - run: | - git clone https://github.com/edgelesssys/constellation /constellation - cd /constellation/hack/pseudo-version - echo "pseudoVersion=$(go run .)" >> $GITHUB_ENV - echo ${{ env.pseudoVersion }} + go-version: "1.22.0" - name: Set up Docker Buildx id: docker-setup - uses: docker/setup-buildx-action@f211e3e9ded2d9377c8cadc4489a4e38014bc4c9 + uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 - name: Build container image - run: make REGISTRY=ghcr.io/edgelesssys VERSION=${{ env.pseudoVersion }} image-csi-plugin + run: make REGISTRY=ghcr.io/edgelesssys/constellation VERSION=${{ inputs.versionTag }} build-local-image-cinder-csi-plugin - name: Log in to the Container registry id: docker-login - uses: docker/login-action@dd4fa0671be5250ee6f50aedf4cb05514abda2c7 + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d with: registry: ghcr.io username: ${{ github.actor }} @@ -52,9 +44,4 @@ jobs: - name: Push container image run: | - docker push ghcr.io/edgelesssys/cinder-csi-plugin:${{ env.pseudoVersion }} - if [ "${{ inputs.versionTag }}" != "" ] - then - docker tag ghcr.io/edgelesssys/constellation/cinder-csi-plugin:${{ env.pseudoVersion }} ghcr.io/edgelesssys/constellation/cinder-csi-plugin:${{ inputs.versionTag }} - docker push ghcr.io/edgelesssys/constellation/cinder-csi-plugin:${{ inputs.versionTag }} - fi + docker push ghcr.io/edgelesssys/constellation/cinder-csi-plugin:${{ inputs.versionTag }} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 15f9ba23..c8c68bb4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ We'd love to accept your patches! Before we can accept them you need to sign Clo ## Reporting an issue If you find a bug or a feature request related to cloud-provider-openstack you can create a new github issue in this repo @[kubernetes/cloud-provider-openstack](https://github.com/kubernetes/cloud-provider-openstack/issues). -## Submiting a Pull Request +## Submitting a Pull Request * Fork the cloud-provider-openstack repo, develop and test your code changes. * Submit a pull request. * The bot will automatically assigns someone to review your PR. diff --git a/Dockerfile b/Dockerfile index 2beb6a1c..6b311d69 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,7 +32,7 @@ ## BUILD ARGS ## ################################################################################ # This build arg allows the specification of a custom Golang image. -ARG GOLANG_IMAGE=golang:1.20.3 +ARG GOLANG_IMAGE=golang:1.21.5-bullseye # The distroless image on which the CPI manager image is built. # @@ -40,11 +40,11 @@ ARG GOLANG_IMAGE=golang:1.20.3 # deterministic builds. Follow what kubernetes uses to build # kube-controller-manager, for example for 1.27.x: # https://github.com/kubernetes/kubernetes/blob/release-1.27/build/common.sh#L99 -ARG DISTROLESS_IMAGE=registry.k8s.io/build-image/go-runner:v2.3.1-go1.20.3-bullseye.0 +ARG DISTROLESS_IMAGE=registry.k8s.io/build-image/go-runner:v2.3.1-go1.21.5-bookworm.0 # We use Alpine as the source for default CA certificates and some output # images -ARG ALPINE_IMAGE=alpine:3.17.3 +ARG ALPINE_IMAGE=alpine:3.17.5 # cinder-csi-plugin uses Debian as a base image ARG DEBIAN_IMAGE=registry.k8s.io/build-image/debian-base:bullseye-v1.4.3 @@ -125,11 +125,33 @@ CMD ["sh", "-c", "/bin/barbican-kms-plugin --socketpath ${socketpath} --cloud-co ## ## cinder-csi-plugin ## -FROM --platform=${TARGETPLATFORM} ${DEBIAN_IMAGE} as cinder-csi-plugin -# Install e4fsprogs for format -RUN clean-install btrfs-progs e2fsprogs mount udev xfsprogs libcryptsetup12 libcryptsetup-dev +# step 1: copy all necessary files from Debian distro to /dest folder +# all magic happens in tools/csi-deps.sh +FROM --platform=${TARGETPLATFORM} ${DEBIAN_IMAGE} as cinder-csi-plugin-utils +RUN clean-install bash rsync mount udev btrfs-progs e2fsprogs xfsprogs util-linux libcryptsetup12 libcryptsetup-dev libgcc-s1 +COPY tools/csi-deps.sh /tools/csi-deps.sh +RUN /tools/csi-deps.sh + +# step 2: check if all necessary files are copied and work properly +# the build have to finish without errors, but the result image will not be used +FROM --platform=${TARGETPLATFORM} ${DISTROLESS_IMAGE} as cinder-csi-plugin-utils-check + +COPY --from=cinder-csi-plugin-utils /dest / +COPY --from=cinder-csi-plugin-utils /bin/sh /bin/sh +COPY tools/csi-deps-check.sh /tools/csi-deps-check.sh + +SHELL ["/bin/sh"] +RUN /tools/csi-deps-check.sh + +# step 3: build tiny cinder-csi-plugin image with only necessary files +FROM --platform=${TARGETPLATFORM} ${DISTROLESS_IMAGE} as cinder-csi-plugin + +# Copying csi-deps-check.sh simply ensures that the resulting image has a dependency +# on cinder-csi-plugin-utils-check and therefore that the check has passed +COPY --from=cinder-csi-plugin-utils-check /tools/csi-deps-check.sh /bin/csi-deps-check.sh +COPY --from=cinder-csi-plugin-utils /dest / COPY --from=builder /build/cinder-csi-plugin /bin/cinder-csi-plugin COPY --from=certs /etc/ssl/certs /etc/ssl/certs diff --git a/Makefile b/Makefile index 4455c090..0ca5a2b7 100644 --- a/Makefile +++ b/Makefile @@ -36,7 +36,7 @@ VERSION ?= $(shell git describe --dirty --tags --match='v*') GOARCH := GOFLAGS := TAGS := -LDFLAGS := "-w -s -X 'k8s.io/component-base/version.gitVersion=$(VERSION)'" +LDFLAGS := "-w -s -X 'k8s.io/component-base/version.gitVersion=$(VERSION)' -X 'k8s.io/cloud-provider-openstack/pkg/version.Version=$(VERSION)'" GOX_LDFLAGS := $(shell echo "$(LDFLAGS) -extldflags \"-static\"") REGISTRY ?= registry.k8s.io/provider-os IMAGE_OS ?= linux @@ -81,7 +81,7 @@ $(BUILD_CMDS): $(SOURCES) test: unit functional check: work - go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 run ./... + go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2 run ./... unit: work CGO_ENABLED=1 go test -tags=unit $(shell go list ./... | sed -e '/sanity/ { N; d; }' | sed -e '/tests/ {N; d;}') $(TESTARGS) diff --git a/OWNERS b/OWNERS index 56802c4a..34e39c07 100644 --- a/OWNERS +++ b/OWNERS @@ -1,16 +1,15 @@ emeritus_approvers: -- lingxiankong -approvers: - chrigl +- lingxiankong - ramineni -- zetaab +approvers: +- dulek - jichenjc +- kayrus +- zetaab reviewers: -- chrigl - dulek -- Fedosin - jichenjc - kayrus - mdbooth -- ramineni - zetaab diff --git a/charts/cinder-csi-plugin/Chart.yaml b/charts/cinder-csi-plugin/Chart.yaml index d263e85d..27b471f7 100644 --- a/charts/cinder-csi-plugin/Chart.yaml +++ b/charts/cinder-csi-plugin/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: v1.0.0 +appVersion: v1.0.1 description: Cinder CSI Chart for OpenStack with on-node encryption support name: openstack-cinder-csi -version: 1.0.0 +version: 1.0.1 diff --git a/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml b/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml index 9e13f851..18f98326 100644 --- a/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml +++ b/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml @@ -5,6 +5,10 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "cinder-csi.controllerplugin.labels" . | nindent 4 }} + annotations: + {{- with .Values.commonAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} spec: replicas: {{ .Values.csi.plugin.controllerPlugin.replicas }} strategy: @@ -21,10 +25,18 @@ spec: metadata: labels: {{- include "cinder-csi.controllerplugin.labels" . | nindent 8 }} + annotations: + {{- with .Values.commonAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} spec: serviceAccount: csi-cinder-controller-sa + securityContext: + {{- toYaml .Values.csi.plugin.controllerPlugin.podSecurityContext | nindent 8 }} containers: - name: csi-attacher + securityContext: + {{- toYaml .Values.csi.plugin.controllerPlugin.securityContext | nindent 12 }} image: "{{ .Values.csi.attacher.image.repository }}:{{ .Values.csi.attacher.image.tag }}" imagePullPolicy: {{ .Values.csi.attacher.image.pullPolicy }} args: @@ -46,6 +58,8 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ resources: {{ toYaml .Values.csi.attacher.resources | nindent 12 }} - name: csi-provisioner + securityContext: + {{- toYaml .Values.csi.plugin.controllerPlugin.securityContext | nindent 12 }} image: "{{ .Values.csi.provisioner.image.repository }}:{{ .Values.csi.provisioner.image.tag }}" imagePullPolicy: {{ .Values.csi.provisioner.image.pullPolicy }} args: @@ -69,6 +83,8 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ resources: {{ toYaml .Values.csi.provisioner.resources | nindent 12 }} - name: csi-snapshotter + securityContext: + {{- toYaml .Values.csi.plugin.controllerPlugin.securityContext | nindent 12 }} image: "{{ .Values.csi.snapshotter.image.repository }}:{{ .Values.csi.snapshotter.image.tag }}" imagePullPolicy: {{ .Values.csi.snapshotter.image.pullPolicy }} args: @@ -89,6 +105,8 @@ spec: name: socket-dir resources: {{ toYaml .Values.csi.snapshotter.resources | nindent 12 }} - name: csi-resizer + securityContext: + {{- toYaml .Values.csi.plugin.controllerPlugin.securityContext | nindent 12 }} image: "{{ .Values.csi.resizer.image.repository }}:{{ .Values.csi.resizer.image.tag }}" imagePullPolicy: {{ .Values.csi.resizer.image.pullPolicy }} args: @@ -110,6 +128,8 @@ spec: mountPath: /var/lib/csi/sockets/pluginproxy/ resources: {{ toYaml .Values.csi.resizer.resources | nindent 12 }} - name: liveness-probe + securityContext: + {{- toYaml .Values.csi.plugin.controllerPlugin.securityContext | nindent 12 }} image: "{{ .Values.csi.livenessprobe.image.repository }}:{{ .Values.csi.livenessprobe.image.tag }}" imagePullPolicy: {{ .Values.csi.livenessprobe.image.pullPolicy }} args: @@ -128,6 +148,8 @@ spec: name: socket-dir resources: {{ toYaml .Values.csi.livenessprobe.resources | nindent 12 }} - name: cinder-csi-plugin + securityContext: + {{- toYaml .Values.csi.plugin.controllerPlugin.securityContext | nindent 12 }} image: "{{ .Values.csi.plugin.image.repository }}:{{ .Values.csi.plugin.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.csi.plugin.image.pullPolicy }} args: @@ -137,6 +159,9 @@ spec: - "--cloud-config=$(CLOUD_CONFIG)" - "--cluster=$(CLUSTER_NAME)" - "--kms-addr={{ .Values.csi.kms.keyServiceName }}.{{ .Values.csi.kms.keyServiceNamespace | default .Release.Namespace }}:{{ .Values.csi.kms.keyServicePort }}" + {{- if .Values.csi.plugin.httpEndpoint.enabled }} + - "--http-endpoint=:{{ .Values.csi.plugin.httpEndpoint.port }}" + {{- end }} {{- if .Values.csi.plugin.extraArgs }} {{- with .Values.csi.plugin.extraArgs }} {{- tpl . $ | trim | nindent 12 }} @@ -153,6 +178,11 @@ spec: - containerPort: 9808 name: healthz protocol: TCP + {{- if .Values.csi.plugin.httpEndpoint.enabled }} + - containerPort: {{ .Values.csi.plugin.httpEndpoint.port }} + name: http + protocol: TCP + {{- end }} # The probe livenessProbe: failureThreshold: {{ .Values.csi.livenessprobe.failureThreshold }} @@ -169,22 +199,33 @@ spec: mountPath: /etc/kubernetes/{{ .Values.secret.filename }} readOnly: true subPath: {{ .Values.secret.filename }} + {{- with .Values.csi.plugin.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} resources: {{ toYaml .Values.csi.plugin.resources | nindent 12 }} volumes: - name: socket-dir emptyDir: - - name: cloud-config {{- if .Values.secret.enabled }} + - name: cloud-config secret: secretName: {{ .Values.secret.name }} - {{- else }} + {{- else if .Values.secret.hostMount }} + - name: cloud-config hostPath: path: /etc/kubernetes {{- end }} + {{- with .Values.csi.plugin.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} affinity: {{ toYaml .Values.csi.plugin.controllerPlugin.affinity | nindent 8 }} nodeSelector: {{ toYaml .Values.csi.plugin.controllerPlugin.nodeSelector | nindent 8 }} tolerations: {{ toYaml .Values.csi.plugin.controllerPlugin.tolerations | nindent 8 }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- with .Values.csi.plugin.controllerPlugin.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} diff --git a/charts/cinder-csi-plugin/templates/controllerplugin-podmonitor.yaml b/charts/cinder-csi-plugin/templates/controllerplugin-podmonitor.yaml new file mode 100644 index 00000000..a1b4ceb4 --- /dev/null +++ b/charts/cinder-csi-plugin/templates/controllerplugin-podmonitor.yaml @@ -0,0 +1,22 @@ +{{- if .Values.csi.plugin.podMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + labels: + {{- include "cinder-csi.controllerplugin.labels" . | nindent 4 }} + name: {{ include "cinder-csi.name" . }}-controllerplugin + namespace: {{ .Release.Namespace }} + annotations: + {{- with .Values.commonAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - interval: 30s + port: http + scheme: http + jobLabel: component + selector: + matchLabels: + {{- include "cinder-csi.controllerplugin.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml b/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml index dd9f513a..cf9521f2 100644 --- a/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml +++ b/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml @@ -5,6 +5,10 @@ metadata: namespace: {{ .Release.Namespace }} labels: {{- include "cinder-csi.nodeplugin.labels" . | nindent 4 }} + annotations: + {{- with .Values.commonAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} spec: selector: matchLabels: @@ -13,12 +17,18 @@ spec: metadata: labels: {{- include "cinder-csi.nodeplugin.labels" . | nindent 8 }} + annotations: + {{- with .Values.commonAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} spec: serviceAccount: csi-cinder-node-sa hostNetwork: true dnsPolicy: ClusterFirstWithHostNet containers: - name: node-driver-registrar + securityContext: + {{- toYaml .Values.csi.plugin.nodePlugin.securityContext | nindent 12 }} image: "{{ .Values.csi.nodeDriverRegistrar.image.repository }}:{{ .Values.csi.nodeDriverRegistrar.image.tag }}" imagePullPolicy: {{ .Values.csi.nodeDriverRegistrar.image.pullPolicy }} args: @@ -46,6 +56,8 @@ spec: mountPath: /registration resources: {{ toYaml .Values.csi.nodeDriverRegistrar.resources | nindent 12 }} - name: liveness-probe + securityContext: + {{- toYaml .Values.csi.plugin.nodePlugin.securityContext | nindent 12 }} image: "{{ .Values.csi.livenessprobe.image.repository }}:{{ .Values.csi.livenessprobe.image.tag }}" imagePullPolicy: {{ .Values.csi.livenessprobe.image.pullPolicy }} args: @@ -110,6 +122,14 @@ spec: mountPath: /etc/kubernetes/{{ .Values.secret.filename }} readOnly: true subPath: {{ .Values.secret.filename }} + # Edgeless specific mounts for cryptsetup + - name: sys + mountPath: /sys + - name: cryptsetup + mountPath: /run/cryptsetup + {{- with .Values.csi.plugin.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} resources: {{ toYaml .Values.csi.plugin.resources | nindent 12 }} volumes: - name: socket-dir @@ -124,6 +144,14 @@ spec: hostPath: path: {{ .Values.csi.plugin.nodePlugin.kubeletDir }} type: Directory + - name: sys + hostPath: + path: /sys + type: Directory + - name: cryptsetup + hostPath: + path: /run/cryptsetup + type: Directory # - name: pods-cloud-data # hostPath: # path: /var/lib/cloud/data @@ -132,18 +160,26 @@ spec: hostPath: path: /dev type: Directory - - name: cloud-config {{- if .Values.secret.enabled }} + - name: cloud-config secret: secretName: {{ .Values.secret.name }} - {{- else }} + {{- else if .Values.secret.hostMount }} + - name: cloud-config hostPath: path: /etc/kubernetes {{- end }} + {{- with .Values.csi.plugin.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} affinity: {{ toYaml .Values.csi.plugin.nodePlugin.affinity | nindent 8 }} nodeSelector: {{ toYaml .Values.csi.plugin.nodePlugin.nodeSelector | nindent 8 }} tolerations: {{ toYaml .Values.csi.plugin.nodePlugin.tolerations | nindent 8 }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- with .Values.csi.plugin.nodePlugin.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} diff --git a/charts/cinder-csi-plugin/templates/secret.yaml b/charts/cinder-csi-plugin/templates/secret.yaml index b11ef856..597880c0 100644 --- a/charts/cinder-csi-plugin/templates/secret.yaml +++ b/charts/cinder-csi-plugin/templates/secret.yaml @@ -1,4 +1,4 @@ -{{- if .Values.secret.create }} +{{- if and (.Values.secret.create) (.Values.secret.enabled) }} apiVersion: v1 kind: Secret metadata: diff --git a/charts/cinder-csi-plugin/values.yaml b/charts/cinder-csi-plugin/values.yaml index 40d98610..7e34d381 100644 --- a/charts/cinder-csi-plugin/values.yaml +++ b/charts/cinder-csi-plugin/values.yaml @@ -8,7 +8,7 @@ csi: attacher: image: repository: registry.k8s.io/sig-storage/csi-attacher - tag: v4.2.0 + tag: v4.4.2@sha256:11b955fe4da278aa0e8ca9d6fd70758f2aec4b0c1e23168c665ca345260f1882 pullPolicy: IfNotPresent resources: {} extraArgs: {} @@ -16,28 +16,28 @@ csi: topology: "true" image: repository: registry.k8s.io/sig-storage/csi-provisioner - tag: v3.4.1 + tag: v3.6.2@sha256:49b94f975603d85a1820b72b1188e5b351d122011b3e5351f98c49d72719aa78 pullPolicy: IfNotPresent resources: {} extraArgs: {} snapshotter: image: repository: registry.k8s.io/sig-storage/csi-snapshotter - tag: v6.2.1 + tag: v6.3.2@sha256:4c5a1b57e685b2631909b958487f65af7746361346fcd82a8635bea3ef14509d pullPolicy: IfNotPresent resources: {} extraArgs: {} resizer: image: repository: registry.k8s.io/sig-storage/csi-resizer - tag: v1.7.0 + tag: v1.9.2@sha256:e998f22243869416f9860fc6a1fb07d4202eac8846defc1b85ebd015c1207605 pullPolicy: IfNotPresent resources: {} extraArgs: {} livenessprobe: image: repository: registry.k8s.io/sig-storage/livenessprobe - tag: v2.9.0 + tag: v2.11.0@sha256:82adbebdf5d5a1f40f246aef8ddbee7f89dea190652aefe83336008e69f9a89f pullPolicy: IfNotPresent failureThreshold: 5 initialDelaySeconds: 10 @@ -48,7 +48,7 @@ csi: nodeDriverRegistrar: image: repository: registry.k8s.io/sig-storage/csi-node-driver-registrar - tag: v2.6.2 + tag: v2.9.2@sha256:a18e989a93722e43885120e90bc1d0da0740fcbf44bc10403572b368b9800606 pullPolicy: IfNotPresent resources: {} extraArgs: {} @@ -56,17 +56,31 @@ csi: image: repository: ghcr.io/edgelesssys/constellation/cinder-csi-plugin pullPolicy: IfNotPresent - tag: # defaults to .Chart.AppVersion + # CSI driver version is independent of Constellation releases + tag: v1.0.1@sha256:65b59c9b64701f92c59d05f80d5b2bae0a2bc281e74b1f0db0fa3802081fd298 volumeMounts: - name: cloud-config mountPath: /etc/kubernetes readOnly: true nodePlugin: + dnsPolicy: ClusterFirstWithHostNet + podSecurityContext: {} + securityContext: {} + # capabilities: + # drop: + # - ALL + # seccompProfile: + # type: RuntimeDefault affinity: {} nodeSelector: {} tolerations: - operator: Exists kubeletDir: /var/lib/kubelet + # Allow for specifying internal IP addresses for multiple hostnames + # hostAliases: + # - ip: "10.0.0.1" + # hostnames: + # - "keystone.hostname.com" controllerPlugin: replicas: 1 strategy: @@ -80,10 +94,36 @@ csi: # maxSurge is the maximum number of pods that can be # created over the desired number of pods. maxSurge: 1 + podSecurityContext: {} + # runAsNonRoot: true + # runAsUser: 65532 + # runAsGroup: 65532 + # fsGroup: 65532 + # fsGroupChangePolicy: OnRootMismatch + securityContext: {} + # capabilities: + # drop: + # - ALL + # seccompProfile: + # type: RuntimeDefault + # readOnlyRootFilesystem: true affinity: {} nodeSelector: {} tolerations: [] + # Allow for specifying internal IP addresses for multiple hostnames + # hostAliases: + # - ip: "10.0.0.1" + # hostnames: + # - "keystone.hostname.com" resources: {} + # Enable built-in http server through the http-endpoint flag + httpEndpoint: + enabled: false + port: 8080 + # Create Prometheus Operator PodMonitor. Requires http server above. + # See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.PodMonitor + podMonitor: + enabled: false extraArgs: {} kms: keyServiceName: "key-service" @@ -95,6 +135,12 @@ csi: # for description of individual verbosity levels. logVerbosityLevel: 2 +# the secret should contain the openstack credentials +# there are several options to inject the credentials: +# 1) from kubernetes secret that doesn't exist: set "enabled" and "create" to true, this will create a secret from the values written to "data" down below +# 2) from kubernetes secret that already exists: set "enabled" to true and "create" to false +# 3) from host system path /etc/cloud/cloud.conf: set "enabled" to false and "hostMount" to true +# 4) via agent-injector (e.g. hashicorp vault): set "enabled" and "hostMount" to false, you have to provide credentials on your own by injecting credentials into the pod secret: enabled: true create: false @@ -118,3 +164,6 @@ priorityClassName: "" imagePullSecrets: [] # - name: my-imagepull-secret + +# add annotations to all pods +commonAnnotations: {} diff --git a/charts/manila-csi-plugin/Chart.yaml b/charts/manila-csi-plugin/Chart.yaml index b13e112e..4345a437 100644 --- a/charts/manila-csi-plugin/Chart.yaml +++ b/charts/manila-csi-plugin/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 -appVersion: v1.27.0 +appVersion: v1.29.0 description: Manila CSI Chart for OpenStack name: openstack-manila-csi -version: 2.28.0-alpha.1 +version: 2.29.0 home: http://github.com/kubernetes/cloud-provider-openstack icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png maintainers: diff --git a/charts/manila-csi-plugin/templates/controllerplugin-statefulset.yaml b/charts/manila-csi-plugin/templates/controllerplugin-statefulset.yaml index ff8574fd..1e84be5c 100644 --- a/charts/manila-csi-plugin/templates/controllerplugin-statefulset.yaml +++ b/charts/manila-csi-plugin/templates/controllerplugin-statefulset.yaml @@ -153,3 +153,11 @@ spec: {{- if .Values.controllerplugin.tolerations }} tolerations: {{ toYaml .Values.controllerplugin.tolerations | nindent 8 }} {{- end }} + {{- with .Values.controllerplugin.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} diff --git a/charts/manila-csi-plugin/templates/nodeplugin-daemonset.yaml b/charts/manila-csi-plugin/templates/nodeplugin-daemonset.yaml index 9c853f44..597b472d 100644 --- a/charts/manila-csi-plugin/templates/nodeplugin-daemonset.yaml +++ b/charts/manila-csi-plugin/templates/nodeplugin-daemonset.yaml @@ -117,6 +117,14 @@ spec: {{- if .Values.nodeplugin.tolerations }} tolerations: {{ toYaml .Values.nodeplugin.tolerations | nindent 8 }} {{- end }} + {{- with .Values.nodeplugin.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} {{- if .Values.nodeplugin.priorityClassName }} priorityClassName: {{ .Values.nodeplugin.priorityClassName }} {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} diff --git a/charts/manila-csi-plugin/values.yaml b/charts/manila-csi-plugin/values.yaml index 4accac2a..7f86ce97 100644 --- a/charts/manila-csi-plugin/values.yaml +++ b/charts/manila-csi-plugin/values.yaml @@ -16,6 +16,10 @@ shareProtocols: # dir: /var/lib/kubelet/plugins/cephfs.csi.ceph.com # sockFile: csi.sock +# ImagePullSecret for all pods +imagePullSecrets: [] +# - name: my-imagepull-secret + extraLabels: {} # CSI Manila spec csimanila: @@ -62,6 +66,11 @@ nodeplugin: nodeSelector: {} tolerations: [] affinity: {} + # Allow for specifying internal IP addresses for multiple hostnames + # hostAliases: + # - ip: "10.0.0.1" + # hostnames: + # - "keystone.hostname.com" priorityClassName: "" # Use fullnameOverride to fully override the name of this component # fullnameOverride: some-other-name @@ -92,12 +101,17 @@ controllerplugin: resizer: image: repository: registry.k8s.io/sig-storage/csi-resizer - tag: v1.3.0 + tag: v1.8.0 pullPolicy: IfNotPresent resources: {} nodeSelector: {} tolerations: [] affinity: {} + # Allow for specifying internal IP addresses for multiple hostnames + # hostAliases: + # - ip: "10.0.0.1" + # hostnames: + # - "keystone.hostname.com" # Use fullnameOverride to fully override the name of this component # fullnameOverride: some-other-name diff --git a/charts/openstack-cloud-controller-manager/Chart.yaml b/charts/openstack-cloud-controller-manager/Chart.yaml index 1b267f3c..ff39ef84 100644 --- a/charts/openstack-cloud-controller-manager/Chart.yaml +++ b/charts/openstack-cloud-controller-manager/Chart.yaml @@ -1,10 +1,10 @@ apiVersion: v1 -appVersion: v1.27.0 +appVersion: v1.29.0 description: Openstack Cloud Controller Manager Helm Chart icon: https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-images-prod/openstack-logo/OpenStack-Logo-Vertical.png home: https://github.com/kubernetes/cloud-provider-openstack name: openstack-cloud-controller-manager -version: 2.28.0-alpha.1 +version: 2.29.0 maintainers: - name: eumel8 email: f.kloeker@telekom.de diff --git a/charts/openstack-cloud-controller-manager/README.md b/charts/openstack-cloud-controller-manager/README.md index 25fb4a91..77c224bb 100644 --- a/charts/openstack-cloud-controller-manager/README.md +++ b/charts/openstack-cloud-controller-manager/README.md @@ -13,20 +13,48 @@ You need to configure an `openstack-ccm.yaml` values file with at least: - with password: `cloudConfig.global.username` and `cloudconfig.global.password` - with application credentials: (`cloudConfig.global.application-credential-id` or `cloudConfig.global.application-credential-name`) and `cloudConfig.global.application-credential-secret` - Load balancing - - `cloudConfig.loadbalancer.floating-network-id` **or** - - `cloudConfig.loadbalancer.floating-subnet-id` **or** - - `cloudConfig.loadbalancer.floating-subnet` + - `cloudConfig.loadBalancer.floating-network-id` **or** + - `cloudConfig.loadBalancer.floating-subnet-id` **or** + - `cloudConfig.loadBalancer.floating-subnet` -If you want to enable health checks for your Load Balancers (optional), set `cloudConfig.loadbalancer.create-monitor: true`. +If you want to enable health checks for your Load Balancers (optional), set `cloudConfig.loadBalancer.create-monitor: true`. Then run: -``` +```sh helm repo add cpo https://kubernetes.github.io/cloud-provider-openstack helm repo update helm install openstack-ccm cpo/openstack-cloud-controller-manager --values openstack-ccm.yaml ``` +## Using an external secret + +In order to use an external secret for the OCCM: + +```yaml +secret: + enabled: true + name: cloud-config + create: false +``` + +Create the secret with: + +```sh +kubectl create secret -n kube-system generic cloud-config --from-file=./cloud.conf +``` + +## Tolerations + +To deploy OCCM to worker nodes only (e.g. when the controlplane is isolated), adjust the tolerations in the chart: + +```yaml +tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule +``` + ## Unsupported configurations - The chart does not support the mounting of custom `clouds.yaml` files. Therefore, the following config values in the `[Global]` section won’t have any effect: diff --git a/charts/openstack-cloud-controller-manager/templates/clusterrole.yaml b/charts/openstack-cloud-controller-manager/templates/clusterrole.yaml index 7eee6c4e..6786931f 100644 --- a/charts/openstack-cloud-controller-manager/templates/clusterrole.yaml +++ b/charts/openstack-cloud-controller-manager/templates/clusterrole.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: system:openstack-cloud-controller-manager + name: {{ .Values.clusterRoleName }} annotations: {{- with .Values.commonAnnotations }} {{- toYaml . | nindent 4 }} diff --git a/charts/openstack-cloud-controller-manager/templates/clusterrolebinding.yaml b/charts/openstack-cloud-controller-manager/templates/clusterrolebinding.yaml index f19f0ef9..a5727109 100644 --- a/charts/openstack-cloud-controller-manager/templates/clusterrolebinding.yaml +++ b/charts/openstack-cloud-controller-manager/templates/clusterrolebinding.yaml @@ -1,7 +1,8 @@ +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: system:openstack-cloud-controller-manager + name: {{ .Values.clusterRoleName }} annotations: {{- with .Values.commonAnnotations }} {{- toYaml . | nindent 4 }} @@ -9,8 +10,8 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: system:openstack-cloud-controller-manager + name: {{ .Values.clusterRoleName }} subjects: - kind: ServiceAccount - name: openstack-cloud-controller-manager + name: {{ .Values.serviceAccountName }} namespace: {{ .Release.Namespace | quote }} diff --git a/charts/openstack-cloud-controller-manager/templates/daemonset.yaml b/charts/openstack-cloud-controller-manager/templates/daemonset.yaml index 03d82856..1808dee9 100644 --- a/charts/openstack-cloud-controller-manager/templates/daemonset.yaml +++ b/charts/openstack-cloud-controller-manager/templates/daemonset.yaml @@ -18,14 +18,17 @@ spec: template: metadata: annotations: - checksum/config: {{ include "cloudConfig" . | sha256sum }} - labels: - {{- include "occm.controllermanager.labels" . | nindent 8 }} - annotations: + checksum/config: {{ include "cloudConfig" . | sha256sum }} {{- with .Values.commonAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} + labels: + {{- include "occm.controllermanager.labels" . | nindent 8 }} spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -38,7 +41,11 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} - serviceAccountName: openstack-cloud-controller-manager + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ .Values.serviceAccountName }} containers: - name: openstack-cloud-controller-manager image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" @@ -67,10 +74,14 @@ spec: name: http protocol: TCP {{- end }} + {{- if or (.Values.extraVolumeMounts) (.Values.secret.enabled) }} volumeMounts: + {{- end }} + {{- if .Values.secret.enabled }} - mountPath: /etc/config name: cloud-config-volume readOnly: true + {{- end }} {{- if .Values.extraVolumeMounts }} {{- toYaml .Values.extraVolumeMounts | nindent 12 }} {{- end }} @@ -91,14 +102,25 @@ spec: value: /etc/config/cloud.conf - name: CLUSTER_NAME value: {{ .Values.cluster.name }} + {{- if .Values.extraEnv }} + {{- toYaml .Values.extraEnv | nindent 12 }} + {{- end }} {{- if .Values.extraInitContainers }} initContainers: {{ toYaml .Values.extraInitContainers | nindent 6 }} {{- end }} hostNetwork: true + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + + {{- if or (.Values.extraVolumes) (.Values.secret.enabled) }} volumes: + {{- end }} + {{- if .Values.secret.enabled }} - name: cloud-config-volume secret: secretName: {{ .Values.secret.name }} + {{- end }} {{- if .Values.extraVolumes }} {{ toYaml .Values.extraVolumes | nindent 6 }} {{- end }} diff --git a/charts/openstack-cloud-controller-manager/templates/secret.yaml b/charts/openstack-cloud-controller-manager/templates/secret.yaml index ea35e01e..66c6352e 100644 --- a/charts/openstack-cloud-controller-manager/templates/secret.yaml +++ b/charts/openstack-cloud-controller-manager/templates/secret.yaml @@ -1,4 +1,4 @@ -{{- if .Values.secret.create }} +{{- if and (.Values.secret.create) (.Values.secret.enabled) }} apiVersion: v1 kind: Secret metadata: diff --git a/charts/openstack-cloud-controller-manager/templates/serviceaccount.yaml b/charts/openstack-cloud-controller-manager/templates/serviceaccount.yaml index e24737e1..f97f1c8a 100644 --- a/charts/openstack-cloud-controller-manager/templates/serviceaccount.yaml +++ b/charts/openstack-cloud-controller-manager/templates/serviceaccount.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: openstack-cloud-controller-manager + name: {{ .Values.serviceAccountName }} namespace: {{ .Release.Namespace }} annotations: {{- with .Values.commonAnnotations }} diff --git a/charts/openstack-cloud-controller-manager/values.yaml b/charts/openstack-cloud-controller-manager/values.yaml index bb5cdc9f..2e5a708e 100644 --- a/charts/openstack-cloud-controller-manager/values.yaml +++ b/charts/openstack-cloud-controller-manager/values.yaml @@ -9,6 +9,11 @@ commonAnnotations: {} # "helm.sh/hook-weight": "-100" # "helm.sh/hook-delete-policy": before-hook-creation +# List of secrets to use as image pull secret +imagePullSecrets: [] +# - pull-secret-1 +# - pull-secret-2 + # Image repository name and tag image: repository: registry.k8s.io/provider-os/openstack-cloud-controller-manager @@ -20,6 +25,11 @@ extraInitContainers: [] # image: busybox # command: ['sh', '-c', 'echo waiting for 10 seconds; sleep 10;'] +# Additional environment variables for the cloud-controller-manager. +extraEnv: [] +# - name: OS_CCM_REGIONAL +# value: "true" + # Set resources for Kubernetes daemonset resources: {} # resources: @@ -36,28 +46,25 @@ livenessProbe: {} # Set readinessProbe in the same way like livenessProbe readinessProbe: {} -# Set nodeSelector where the controller shut run, i.e. controlplane nodes -nodeSelector: [] -# nodeSelector: -# node-role.kubernetes.io/controlplane: "true" - -# Set tolerations for nodes where the controller should run, i.e. node should uninitialized, controlplane... -tolerations: [] -# tolerations: -# - key: node.cloudprovider.kubernetes.io/uninitialized -# value: "true" -# effect: NoSchedule -# - key: node-role.kubernetes.io/controlplane -# value: "true" -# effect: NoSchedule -# - key: node-role.kubernetes.io/etcd -# value: "true" -# effect: NoExecute +# Set nodeSelector where the controller should run, i.e. controlplane nodes +nodeSelector: + node-role.kubernetes.io/control-plane: "" + +# Set tolerations for nodes where the controller should run, i.e. node +# should be uninitialized, controlplane... +tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule # Set security settings for the controller pods # For all available options, see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core podSecurityContext: runAsUser: 1001 + # seccompProfile: + # type: RuntimeDefault # List of controllers should be enabled. # Use '*' to enable all controllers. @@ -81,7 +88,11 @@ serviceMonitor: {} # Create a secret resource cloud-config (or other name) to store credentials and settings from cloudConfig # You can also provide your own secret (not created by the Helm chart), in this case set create to false # and adjust the name of the secret as necessary +# If you dont want to use a secret (because you are using something like an agent injector to inject the cloud config file) +# you can disable the secret usage by setting enabled to false. +# If you disable the secret, you have to insert the cloud config file into the path /etc/cloud/config. secret: + enabled: true create: true name: cloud-config @@ -98,6 +109,16 @@ cloudConfig: blockStorage: metadata: +# Allow for specifying internal IP addresses for multiple hostnames +# hostAliases: +# - ip: "10.0.0.1" +# hostnames: +# - "keystone.hostname.com" + +## Pod priority settings +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: + # The following three volumes are required to use all OCCM controllers, # but might not be needed if you just use a specific controller # Additional volumes that should be available to the pods: @@ -120,3 +141,7 @@ extraVolumeMounts: # cluster name that used for created cluster cluster: name: kubernetes + +clusterRoleName: system:cloud-controller-manager + +serviceAccountName: cloud-controller-manager diff --git a/cmd/barbican-kms-plugin/main.go b/cmd/barbican-kms-plugin/main.go index 52a74a72..42a186d3 100644 --- a/cmd/barbican-kms-plugin/main.go +++ b/cmd/barbican-kms-plugin/main.go @@ -17,61 +17,43 @@ limitations under the License. package main import ( - "flag" "os" "os/signal" "github.com/spf13/cobra" "golang.org/x/sys/unix" "k8s.io/cloud-provider-openstack/pkg/kms/server" + "k8s.io/cloud-provider-openstack/pkg/version" "k8s.io/component-base/cli" "k8s.io/klog/v2" ) var ( - socketpath string - cloudconfig string + socketPath string + cloudConfig string ) func main() { - // Glog requires this otherwise it complains. - if err := flag.CommandLine.Parse(nil); err != nil { - klog.Fatalf("Unable to parse flags: %v", err) - } - // This is a temporary hack to enable proper logging until upstream dependencies - // are migrated to fully utilize klog instead of glog. - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - // Sync the glog and klog flags. - flag.CommandLine.VisitAll(func(f1 *flag.Flag) { - f2 := klogFlags.Lookup(f1.Name) - if f2 != nil { - value := f1.Value.String() - _ = f2.Value.Set(value) - } - }) - cmd := &cobra.Command{ Use: "barbican-kms-plugin", - Short: "Barbican KMS plugin for kubernetes", + Short: "Barbican KMS plugin for Kubernetes", RunE: func(cmd *cobra.Command, args []string) error { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, unix.SIGTERM, unix.SIGINT) - err := server.Run(cloudconfig, socketpath, sigchan) + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, unix.SIGTERM, unix.SIGINT) + err := server.Run(cloudConfig, socketPath, sigChan) return err }, + Version: version.Version, } - cmd.Flags().AddGoFlagSet(flag.CommandLine) - - cmd.PersistentFlags().StringVar(&socketpath, "socketpath", "", "Barbican KMS Plugin unix socket endpoint") + cmd.PersistentFlags().StringVar(&socketPath, "socketpath", "", "Barbican KMS Plugin unix socket endpoint") if err := cmd.MarkPersistentFlagRequired("socketpath"); err != nil { - klog.Fatalf("Unable to mark flag socketpath to be required: %v", err) + klog.Fatalf("Unable to mark flag socketpath as required: %v", err) } - cmd.PersistentFlags().StringVar(&cloudconfig, "cloud-config", "", "Barbican KMS Plugin cloud config") + cmd.PersistentFlags().StringVar(&cloudConfig, "cloud-config", "", "Barbican KMS Plugin cloud config") if err := cmd.MarkPersistentFlagRequired("cloud-config"); err != nil { - klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err) + klog.Fatalf("Unable to mark flag cloud-config as required: %v", err) } code := cli.Run(cmd) diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index 159cc066..b658e47f 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -35,8 +35,6 @@ limitations under the License. package main import ( - "flag" - "fmt" "os" "github.com/edgelesssys/constellation/v2/csi/cryptmapper" @@ -47,6 +45,7 @@ import ( "k8s.io/cloud-provider-openstack/pkg/csi/cinder/openstack" "k8s.io/cloud-provider-openstack/pkg/util/metadata" "k8s.io/cloud-provider-openstack/pkg/util/mount" + "k8s.io/cloud-provider-openstack/pkg/version" "k8s.io/component-base/cli" "k8s.io/klog/v2" ) @@ -54,44 +53,20 @@ import ( var ( endpoint string nodeID string - cloudconfig []string + cloudConfig []string cluster string httpEndpoint string kmsAddr string ) func main() { - if err := flag.CommandLine.Parse([]string{}); err != nil { - klog.Fatalf("Unable to parse flags: %v", err) - } - cmd := &cobra.Command{ Use: "Cinder", Short: "CSI based Cinder driver", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - // Glog requires this otherwise it complains. - if err := flag.CommandLine.Parse(nil); err != nil { - return fmt.Errorf("unable to parse flags: %w", err) - } - - // This is a temporary hack to enable proper logging until upstream dependencies - // are migrated to fully utilize klog instead of glog. - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - - // Sync the glog and klog flags. - cmd.Flags().VisitAll(func(f1 *pflag.Flag) { - f2 := klogFlags.Lookup(f1.Name) - if f2 != nil { - value := f1.Value.String() - _ = f2.Value.Set(value) - } - }) - return nil - }, Run: func(cmd *cobra.Command, args []string) { handle() }, + Version: version.Version, } cmd.PersistentFlags().StringVar(&nodeID, "nodeid", "", "node id") @@ -104,13 +79,13 @@ func main() { klog.Fatalf("Unable to mark flag endpoint to be required: %v", err) } - cmd.PersistentFlags().StringSliceVar(&cloudconfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times") + cmd.PersistentFlags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times") if err := cmd.MarkPersistentFlagRequired("cloud-config"); err != nil { klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err) } cmd.PersistentFlags().StringVar(&cluster, "cluster", "", "The identifier of the cluster that the plugin is running in.") - cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for diagnostics, including metrics and leader election health check, will listen (example: `:8080`). The default is empty string, which means the server is disabled.") + cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for providing metrics for diagnostics, including metrics and leader election health check, will listen (example: `:8080`). The default is empty string, which means the server is disabled.") cmd.PersistentFlags().StringVar(&kmsAddr, "kms-addr", "kms.kube-system:9000", "Address of Constellation's KMS. Used to request keys (default: kms.kube-system:9000)") if err := cmd.MarkPersistentFlagRequired("kms-addr"); err != nil { @@ -124,26 +99,22 @@ func main() { } func handle() { - // Initialize cloud d := cinder.NewDriver(endpoint, cluster) - openstack.InitOpenStackProvider(cloudconfig, httpEndpoint) + openstack.InitOpenStackProvider(cloudConfig, httpEndpoint) cloud, err := openstack.GetOpenStackProvider() if err != nil { klog.Warningf("Failed to GetOpenStackProvider: %v", err) return } - //Initialize mount + // Initialize mount mount := mount.GetMountProvider() - //Initialize Metadata + // Initialize Metadata metadata := metadata.GetMetadataProvider(cloud.GetMetadataOpts().SearchOrder) - //Initialize CryptMapper - cm := cryptmapper.New( - cryptKms.NewConstellationKMS(kmsAddr), - &cryptmapper.CryptDevice{}, - ) + // Initialize CryptMapper + cm := cryptmapper.New(cryptKms.NewConstellationKMS(kmsAddr)) d.SetupDriver(cloud, mount, metadata, cm) d.Run() diff --git a/cmd/client-keystone-auth/main.go b/cmd/client-keystone-auth/main.go index 7b880e78..0612507b 100644 --- a/cmd/client-keystone-auth/main.go +++ b/cmd/client-keystone-auth/main.go @@ -31,6 +31,7 @@ import ( "golang.org/x/term" "k8s.io/cloud-provider-openstack/pkg/identity/keystone" + "k8s.io/cloud-provider-openstack/pkg/version" kflag "k8s.io/component-base/cli/flag" "k8s.io/klog/v2" ) @@ -137,24 +138,6 @@ func argumentsAreSet(url, user, project, password, domain, applicationCredential } func main() { - // Glog requires this otherwise it complains. - if err := flag.CommandLine.Parse(nil); err != nil { - klog.Fatalf("Unable to parse flags: %v", err) - } - // This is a temporary hack to enable proper logging until upstream dependencies - // are migrated to fully utilize klog instead of glog. - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - - // Sync the glog and klog flags. - flag.CommandLine.VisitAll(func(f1 *flag.Flag) { - f2 := klogFlags.Lookup(f1.Name) - if f2 != nil { - value := f1.Value.String() - _ = f2.Value.Set(value) - } - }) - var url string var domain string var user string @@ -168,6 +151,7 @@ func main() { var applicationCredentialID string var applicationCredentialName string var applicationCredentialSecret string + var showVersion bool pflag.StringVar(&url, "keystone-url", os.Getenv("OS_AUTH_URL"), "URL for the OpenStack Keystone API") pflag.StringVar(&domain, "domain-name", os.Getenv("OS_DOMAIN_NAME"), "Keystone domain name") @@ -180,18 +164,28 @@ func main() { pflag.StringVar(&applicationCredentialID, "application-credential-id", os.Getenv("OS_APPLICATION_CREDENTIAL_ID"), "Application Credential ID") pflag.StringVar(&applicationCredentialName, "application-credential-name", os.Getenv("OS_APPLICATION_CREDENTIAL_NAME"), "Application Credential Name") pflag.StringVar(&applicationCredentialSecret, "application-credential-secret", os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET"), "Application Credential Secret") + pflag.BoolVar(&showVersion, "version", false, "Show current version and exit") logs.AddFlags(pflag.CommandLine) - logs.InitLogs() - defer logs.FlushLogs() + klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) + klog.InitFlags(klogFlags) pflag.CommandLine.AddGoFlagSet(klogFlags) + kflag.InitFlags() + if showVersion { + fmt.Println(version.Version) + os.Exit(0) + } + + logs.InitLogs() + defer logs.FlushLogs() + // Generate Gophercloud Auth Options based on input data from stdin // if IsTerminal returns "true", or from env variables otherwise. if !term.IsTerminal(int(os.Stdin.Fd())) { - // If all requiered arguments are set use them + // If all required arguments are set use them if argumentsAreSet(url, user, project, password, domain, applicationCredentialID, applicationCredentialName, applicationCredentialSecret) { options.AuthOptions = gophercloud.AuthOptions{ IdentityEndpoint: url, diff --git a/cmd/k8s-keystone-auth/main.go b/cmd/k8s-keystone-auth/main.go index ddb464f5..fd6cdefe 100644 --- a/cmd/k8s-keystone-auth/main.go +++ b/cmd/k8s-keystone-auth/main.go @@ -15,47 +15,38 @@ limitations under the License. package main import ( - "flag" + "fmt" "os" "github.com/spf13/pflag" "k8s.io/klog/v2" "k8s.io/cloud-provider-openstack/pkg/identity/keystone" + "k8s.io/cloud-provider-openstack/pkg/version" kflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/logs" ) func main() { - // Glog requires this otherwise it complains. - err := flag.CommandLine.Parse(nil) - if err != nil { - klog.Fatalf("Unable to parse flags: %v", err) - } - // This is a temporary hack to enable proper logging until upstream dependencies - // are migrated to fully utilize klog instead of glog. - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) + var showVersion bool + pflag.BoolVar(&showVersion, "version", false, "Show current version and exit") logs.AddFlags(pflag.CommandLine) keystone.AddExtraFlags(pflag.CommandLine) - // Sync the glog and klog flags. - flag.CommandLine.VisitAll(func(f1 *flag.Flag) { - f2 := klogFlags.Lookup(f1.Name) - if f2 != nil { - value := f1.Value.String() - _ = f2.Value.Set(value) - } - }) - logs.InitLogs() defer logs.FlushLogs() config := keystone.NewConfig() config.AddFlags(pflag.CommandLine) + kflag.InitFlags() + if showVersion { + fmt.Println(version.Version) + os.Exit(0) + } + if err := config.ValidateFlags(); err != nil { klog.Errorf("%v", err) os.Exit(1) diff --git a/cmd/manila-csi-plugin/main.go b/cmd/manila-csi-plugin/main.go index bc08137c..1087a332 100644 --- a/cmd/manila-csi-plugin/main.go +++ b/cmd/manila-csi-plugin/main.go @@ -17,34 +17,37 @@ limitations under the License. package main import ( - "flag" "fmt" "os" "strings" "github.com/spf13/cobra" - "github.com/spf13/pflag" "k8s.io/cloud-provider-openstack/pkg/csi/manila" "k8s.io/cloud-provider-openstack/pkg/csi/manila/csiclient" "k8s.io/cloud-provider-openstack/pkg/csi/manila/manilaclient" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/options" "k8s.io/cloud-provider-openstack/pkg/csi/manila/runtimeconfig" + "k8s.io/cloud-provider-openstack/pkg/version" "k8s.io/component-base/cli" "k8s.io/klog/v2" ) var ( - endpoint string + // Driver configuration driverName string - nodeID string - nodeAZ string - runtimeConfigFile string withTopology bool protoSelector string fwdEndpoint string - userAgentData []string compatibilitySettings string - clusterID string + + // Node information + nodeID string + nodeAZ string + clusterID string + + // Runtime options + endpoint string + runtimeConfigFile string + userAgentData []string ) func validateShareProtocolSelector(v string) error { @@ -60,77 +63,15 @@ func validateShareProtocolSelector(v string) error { return fmt.Errorf("share protocol %q not supported; supported protocols are %v", v, supportedShareProtocols) } -func parseCompatOpts() (*options.CompatibilityOptions, error) { - data := make(map[string]string) - - if compatibilitySettings == "" { - return options.NewCompatibilityOptions(data) - } - - knownCompatSettings := map[string]interface{}{} - - isKnown := func(v string) bool { - _, ok := knownCompatSettings[v] - return ok - } - - settings := strings.Split(compatibilitySettings, ",") - for _, elem := range settings { - setting := strings.SplitN(elem, "=", 2) - - if len(setting) != 2 || setting[0] == "" || setting[1] == "" { - return nil, fmt.Errorf("invalid format in option %v, expected KEY=VALUE", setting) - } - - if !isKnown(setting[0]) { - return nil, fmt.Errorf("unrecognized option '%s'", setting[0]) - } - - data[setting[0]] = setting[1] - } - - return options.NewCompatibilityOptions(data) -} - func main() { - if err := flag.CommandLine.Parse([]string{}); err != nil { - klog.Fatalf("Unable to parse flags: %v", err) - } - cmd := &cobra.Command{ Use: os.Args[0], Short: "CSI Manila driver", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - // Glog requires this otherwise it complains. - if err := flag.CommandLine.Parse(nil); err != nil { - return fmt.Errorf("unable to parse flags: %w", err) - } - - // This is a temporary hack to enable proper logging until upstream dependencies - // are migrated to fully utilize klog instead of glog. - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - - // Sync the glog and klog flags. - cmd.Flags().VisitAll(func(f1 *pflag.Flag) { - f2 := klogFlags.Lookup(f1.Name) - if f2 != nil { - value := f1.Value.String() - _ = f2.Value.Set(value) - } - }) - return nil - }, Run: func(cmd *cobra.Command, args []string) { if err := validateShareProtocolSelector(protoSelector); err != nil { klog.Fatalf(err.Error()) } - compatOpts, err := parseCompatOpts() - if err != nil { - klog.Fatalf("failed to parse compatibility settings: %v", err) - } - manilaClientBuilder := &manilaclient.ClientBuilder{UserAgent: "manila-csi-plugin", ExtraUserAgentData: userAgentData} csiClientBuilder := &csiclient.ClientBuilder{} @@ -145,7 +86,6 @@ func main() { FwdCSIEndpoint: fwdEndpoint, ManilaClientBuilder: manilaClientBuilder, CSIClientBuilder: csiClientBuilder, - CompatOpts: compatOpts, ClusterID: clusterID, }, ) @@ -158,10 +98,9 @@ func main() { d.Run() }, + Version: version.Version, } - cmd.Flags().AddGoFlagSet(flag.CommandLine) - cmd.PersistentFlags().StringVar(&endpoint, "endpoint", "unix://tmp/csi.sock", "CSI endpoint") cmd.PersistentFlags().StringVar(&driverName, "drivername", "manila.csi.openstack.org", "name of the driver") diff --git a/cmd/openstack-cloud-controller-manager/main.go b/cmd/openstack-cloud-controller-manager/main.go index cca812b4..720cf51a 100644 --- a/cmd/openstack-cloud-controller-manager/main.go +++ b/cmd/openstack-cloud-controller-manager/main.go @@ -29,6 +29,7 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/cloud-provider/app" "k8s.io/cloud-provider/app/config" + "k8s.io/cloud-provider/names" "k8s.io/cloud-provider/options" cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/logs" @@ -48,7 +49,7 @@ func main() { } fss := cliflag.NamedFlagSets{} - command := app.NewCloudControllerManagerCommand(ccmOptions, cloudInitializer, app.DefaultInitFuncConstructors, fss, wait.NeverStop) + command := app.NewCloudControllerManagerCommand(ccmOptions, cloudInitializer, app.DefaultInitFuncConstructors, names.CCMControllerAliases(), fss, wait.NeverStop) openstack.AddExtraFlags(pflag.CommandLine) diff --git a/docs/barbican-kms-plugin/using-barbican-kms-plugin.md b/docs/barbican-kms-plugin/using-barbican-kms-plugin.md index a289cbcc..c3a178fc 100644 --- a/docs/barbican-kms-plugin/using-barbican-kms-plugin.md +++ b/docs/barbican-kms-plugin/using-barbican-kms-plugin.md @@ -9,10 +9,10 @@ # OpenStack Barbican KMS Plugin -Kubernetes supports to encrypt etcd data with various providers listed [here](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#providers), one of which is *kms*. The Kubernetes *kms provider* uses envelope encryption scheme. The data is encrypted using *DEK's* by kubernetes *kms provider*, *DEK's* are encrypted by *kms plugin* (e.g. barbican) using *KEK*. *Barbican-kms-plugin* uses *key* from barbican to encrypt/decrypt the *DEK's* as requested by kubernetes api server. +Kubernetes supports encrypting etcd data with various providers listed [here](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#providers), one of which is *kms*. The Kubernetes *kms provider* uses an envelope encryption scheme. The data is encrypted using *DEK's* by kubernetes *kms provider*, *DEK's* are encrypted by *kms plugin* (e.g. barbican) using *KEK*. *Barbican-kms-plugin* uses *key* from barbican to encrypt/decrypt the *DEK's* as requested by kubernetes api server. The *KMS provider* uses gRPC to communicate with a specific *KMS plugin*. -It is recommended to read following kubernetes documents +It is recommended to read the following kubernetes documents * [Encrypting Secret Data at Rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#verifying-that-data-is-encrypted) * [Using a KMS provider for data encryption](https://kubernetes.io/docs/tasks/administer-cluster/kms-provider/) @@ -23,7 +23,7 @@ It is recommended to read following kubernetes documents The following installation steps assumes that you have a Kubernetes cluster(v1.10+) running on OpenStack Cloud. -### Create 256bit(32 byte) cbc key and store in barbican +### Create 256-bit (32 bytes) CBC key and store in barbican ``` $ openstack secret order create --name k8s_key --algorithm aes --mode cbc --bit-length 256 --payload-content-type=application/octet-stream key @@ -41,7 +41,7 @@ $ openstack secret order create --name k8s_key --algorithm aes --mode cbc --bit- +----------------+----------------------------------------------------------------------+ ``` -### Get the Key ID, It is the **uuid** in *Secret href* +### Get the key ID, it is the **uuid** in *Secret href* ``` $ openstack secret order get http://hostname:9311/v1/orders/e477a578-4a46-4c3f-b071-79e220207b0e @@ -60,7 +60,7 @@ $ openstack secret order get http://hostname:9311/v1/orders/e477a578-4a46-4c3f-b ``` -### Add the Key ID in your cloud-config file +### Add the key ID in your cloud-config file ```toml [Global] @@ -79,7 +79,7 @@ key-id = "" ### Run the KMS Plugin in your cluster This will provide a socket at `/var/lib/kms/kms.sock` on each of the control -plane node +plane nodes. ``` kubectl apply -f https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/master/manifests/barbican-kms/ds.yaml ``` @@ -87,10 +87,10 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/cloud-provider-ope example `release-1.25` for kubernetes version 1.25. -### Create encrytion configuration +### Create encryption configuration Create `/etc/kubernetes/encryption-config.yaml` on each of your control plane -nodes +nodes. ```yaml kind: EncryptionConfig apiVersion: v1 @@ -108,7 +108,7 @@ resources: ### Update the API server -On each of your control plane nodes you need to edit the kube-apiserver, the +On each of your control plane nodes, you need to edit the kube-apiserver, the configuration is usually found at `/etc/kubernetes/manifests/kube-apiserver.yaml`. You can just edit it and kubernetes will eventually restart the pod with the new configuration. @@ -142,5 +142,5 @@ spec: ### Verify -[Verify the secret data is encrypted](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#verifying-that-data-is-encrypted +[Verify that the secret data is encrypted](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#verifying-that-data-is-encrypted ) diff --git a/docs/cinder-csi-plugin/development.md b/docs/cinder-csi-plugin/development.md index bf90ba89..3b796b74 100644 --- a/docs/cinder-csi-plugin/development.md +++ b/docs/cinder-csi-plugin/development.md @@ -14,4 +14,4 @@ There are two versions (`specVersion` and `Version`) defined at [driver.go](../../pkg/csi/cinder/driver.go) and both of them are in `x.y.z` format. `specVersion` indicates the version of [CSI spec](https://github.com/container-storage-interface/spec) that Cinder CSI supports whereas `Version` is the version of Cinder CSI driver itself. For new each release or major functionalities update such as options/params updated, you need increase `.z` version. If the CSI spec version is upgraded, the Cinder CSI version need bump as well. -For example, `specVersion` is `1.2.0` and `Version` is `1.2.1` then there's a new feature or option added but CSI spec remains same, the `specVersion` need to be kept as `1.2.0` and `Version` need to be bumped to `1.2.2`. If the CSI spec is bumpped to `1.3.0`, the `specVersion` and `Version` need to be bumped to `1.3.0` accordingly. +For example, `specVersion` is `1.2.0` and `Version` is `1.2.1` then there's a new feature or option added but CSI spec remains same, the `specVersion` need to be kept as `1.2.0` and `Version` need to be bumped to `1.2.2`. If the CSI spec is bumped to `1.3.0`, the `specVersion` and `Version` need to be bumped to `1.3.0` accordingly. diff --git a/docs/cinder-csi-plugin/examples.md b/docs/cinder-csi-plugin/examples.md index fa34da0f..6a7e84d5 100644 --- a/docs/cinder-csi-plugin/examples.md +++ b/docs/cinder-csi-plugin/examples.md @@ -15,11 +15,11 @@ # Cinder CSI Driver Usage Examples -All following examples need to be used inside instance(s) provisoned by openstack, otherwise the attach action will fail due to fail to find instance ID from given openstack cloud. +All following examples need to be used inside instance(s) provisioned by openstack, otherwise the attach action will fail due to fail to find instance ID from given openstack cloud. ## Dynamic Volume Provisioning -For dynamic provisoning , create StorageClass, PersistentVolumeClaim and pod to consume it. +For dynamic provisioning , create StorageClass, PersistentVolumeClaim and pod to consume it. Checkout [sample app](../../examples/cinder-csi-plugin/nginx.yaml) definition fore reference. ```kubectl -f examples/cinder-csi-plugin/nginx.yaml create``` @@ -349,7 +349,7 @@ NAME READY STATUS RESTARTS AGE app 1/1 Running 0 5m11s ``` -Of course, A new availability zone `nova1` can be created in openstack side to satisify the requirement as well. +Of course, A new availability zone `nova1` can be created in openstack side to satisfy the requirement as well. ## Disaster recovery of PV and PVC diff --git a/docs/cinder-csi-plugin/features.md b/docs/cinder-csi-plugin/features.md index f044da63..b033ff79 100644 --- a/docs/cinder-csi-plugin/features.md +++ b/docs/cinder-csi-plugin/features.md @@ -22,7 +22,7 @@ ## Dynamic Provisioning -Dynamic Provisoning uses persistence volume claim (PVC) to request the Kuberenetes to create the Cinder volume on behalf of user and consumes the volume from inside container. +Dynamic Provisioning uses persistence volume claim (PVC) to request the Kubernetes to create the Cinder volume on behalf of user and consumes the volume from inside container. For usage, refer [sample app](./examples.md#dynamic-volume-provisioning) @@ -90,7 +90,7 @@ This feature allows CSI volumes to be directly embedded in the Pod specification As of Kubernetes v1.21, this is beta feature and enabled by default. -The key design idea is that the parameters for a volume claim are allowed inside a volume source of the Pod. For sample app, refer [here](../../examples/cinder-csi-plugin/ephemeral/generic-ephemeral-volumes.yaml) +The key design idea is that the parameters for a volume claim are allowed inside a volume source of the Pod. For sample app, refer [here](./examples.md#generic-ephemeral-volumes) ## Volume Cloning diff --git a/docs/cinder-csi-plugin/sidecarcompatibility.md b/docs/cinder-csi-plugin/sidecarcompatibility.md index fdf74a56..6515a5c4 100644 --- a/docs/cinder-csi-plugin/sidecarcompatibility.md +++ b/docs/cinder-csi-plugin/sidecarcompatibility.md @@ -11,6 +11,6 @@ ## Set file type in provisioner -There is a change in [csi-provisioner 2.0](https://github.com/kubernetes-csi/external-provisioner/blob/master/CHANGELOG/CHANGELOG-2.0.md): The fstype on provisioned PVs no longer defaults to "ext4". A defaultFStype arg is added to the provisioner. Admins can also specify this fstype via storage class parameter. If fstype is set in storage class parameter, it will be used. The sidecar arg is only checked if fstype is not set in the SC param. +There is a change in [CSI provisioner 2.0](https://github.com/kubernetes-csi/external-provisioner/blob/master/CHANGELOG/CHANGELOG-2.0.md): The fstype on provisioned PVs no longer defaults to "ext4". A `defaultFSType` argument is added to the provisioner. Admins can also specify this fstype via storage class parameter. If fstype is set in storage class parameter, it will be used. The sidecar argument is only checked if fstype is not set in the SC param. -By default, in the manifest file a `--default-fstype=ext4` default settings are added to [manifests](../../manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml), if you want to update it , please add a `fsType: ext4` into the storageclass definition. +By default, in the manifest file a `--default-fstype=ext4` default settings are added to [manifests](../../manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml), if you want to update it , please add a `fsType: ext4` into the storage class definition. diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md index 8152ea89..0d5f47a6 100644 --- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md +++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md @@ -32,7 +32,7 @@ The Cinder CSI Driver is a CSI Specification compliant driver used by Container ## CSI Compatibility -This plugin is compatible with CSI versions v1.3.0, v1.2.0 , v1.1.0, and v1.0.0 +This plugin is compatible with CSI v1.8.0 ## Downloads @@ -95,6 +95,15 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f Address of Constellation's KMS. Used to request keys. + +
--http-endpoint <HTTP server>
+
+ This argument is optional. + + The TCP network address where the HTTP server for providing metrics for diagnostics, will listen (example: `:8080`). + + The default is empty string, which means the server is disabled. +
## Driver Config diff --git a/docs/developers-guide.md b/docs/developers-guide.md index 8335244b..0226ca3f 100644 --- a/docs/developers-guide.md +++ b/docs/developers-guide.md @@ -42,6 +42,65 @@ Choose the one you are familiar with and easy to customize. Config the cluster w Using kubeadm, openstack-cloud-controller-manager can be deployed easily with predefined manifests, see the [deployment guide with kubeadm](openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#deploy-a-kubernetes-cluster-with-openstack-cloud-controller-manager-using-kubeadm). +### DevStack-based testing environment +You can also use our CI scripts to setup a simple development environment based on DevStack and k3s. To do so you need a fresh VM with Ubuntu 22.04. We've tested this with 4 vCPUs and 16 GB of RAM and that's recommended, but we never tested the lower bound, so feel free to try with less resources. + +Once the VM is up make sure your SSH keys allow logging in as `ubuntu` user and from your PC and cloud-provider-openstack directory run: + +``` +ansible-playbook -v \ + --user ubuntu \ + --inventory , \ + --ssh-common-args "-o StrictHostKeyChecking=no" \ + tests/playbooks/test-occm-e2e.yaml \ + -e octavia_provider=amphora \ + -e run_e2e=false +``` + +After it finishes you should be able to access both DevStack and Kubernetes: + +``` +# SSH to the VM +$ ssh ubuntu@ + +# Apparently we install K8s in root +$ sudo su + +# Load OpenStack credentials +$ source /home/stack/devstack/openrc admin admin + +# List all pods in K8s +$ kubectl get pods -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system openstack-cloud-controller-manager-55h4w 1/1 Running 0 56m +kube-system local-path-provisioner-5d56847996-5fqmn 1/1 Running 0 60m +kube-system coredns-5c6b6c5476-l5dz4 1/1 Running 0 60m + +# Deploy a simple pod +$ kubectl create deploy test --image quay.io/kuryr/demo:latest +deployment.apps/test created + +# Expose it as a LoadBalancer Service +$ kubectl expose deploy test --type LoadBalancer --target-port 8080 --port 80 +service/test exposed + +# Check if LB got created +$ openstack loadbalancer list ++--------------------------------------+--------------------------------------+----------------------------------+-------------+---------------------+------------------+----------+ +| id | name | project_id | vip_address | provisioning_status | operating_status | provider | ++--------------------------------------+--------------------------------------+----------------------------------+-------------+---------------------+------------------+----------+ +| 9873d6d7-8ff1-4b5e-a8f0-6bb61f4dd58f | kube_service_kubernetes_default_test | deca4a226df049a689992105a65fbb66 | 10.1.0.36 | ACTIVE | ONLINE | amphora | ++--------------------------------------+--------------------------------------+----------------------------------+-------------+---------------------+------------------+----------+ + +# Get the external IP of the service +$ kubectl get svc test +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +test LoadBalancer 10.43.71.235 172.24.5.121 80:30427/TCP 41m + +# Call the LB +$ curl 172.24.5.121 +test-846c6ffb69-w52vq: HELLO! I AM ALIVE!!! +``` ## Contribution Now you should have a kubernetes cluster running in openstack and openstack-cloud-controller-manager is deployed in the cluster. Over time, you may find a bug or have some feature requirements, it's time for contribution! @@ -106,7 +165,7 @@ In cloud-provider-openstack repo directory, run: ``` REGISTRY= \ VERSION= \ -make image-openstack-cloud-controller-manager +make build-local-image-openstack-cloud-controller-manager ``` The above command builds a container image locally with the name: @@ -121,7 +180,7 @@ You may notice there is a suffix `-amd64` because cloud-provider-openstack suppo ARCH=amd64 \ REGISTRY= \ VERSION= \ -make image-openstack-cloud-controller-manager +make build-local-image-openstack-cloud-controller-manager ``` If the kubernetes cluster can't access the image locally, you need to upload the image to container registry first by running `docker push`. diff --git a/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md b/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md index 85c68810..23c4f97c 100644 --- a/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md +++ b/docs/keystone-auth/using-keystone-webhook-authenticator-and-authorizer.md @@ -252,7 +252,7 @@ it as a service. There are several things we need to notice in the deployment manifest: - We are using image - `registry.k8s.io/provider-os/k8s-keystone-auth:v1.27.0` + `registry.k8s.io/provider-os/k8s-keystone-auth:v1.29.0` - We use `k8s-auth-policy` configmap created above. - The pod uses service account `keystone-auth` created above. - We use `keystone-auth-certs` secret created above to inject the @@ -679,7 +679,7 @@ Next you have several ways to specify additional auth parameters: 2. Specify auth parameters in the `~/.kube/config` file. For more information read - [client keystone auth configuaration doc](./using-client-keystone-auth.md) + [client keystone auth configuration doc](./using-client-keystone-auth.md) and [credential plugins documentation](https://kubernetes.io/docs/admin/authentication/#client-go-credential-plugins) 3. Use the interactive mode. If auth parameters are not specified initially, diff --git a/docs/magnum-auto-healer/using-magnum-auto-healer.md b/docs/magnum-auto-healer/using-magnum-auto-healer.md index 4e3e51dd..1748438e 100644 --- a/docs/magnum-auto-healer/using-magnum-auto-healer.md +++ b/docs/magnum-auto-healer/using-magnum-auto-healer.md @@ -73,7 +73,7 @@ user_id=ceb61464a3d341ebabdf97d1d4b97099 user_project_id=b23a5e41d1af4c20974bf58b4dff8e5a password=password region=RegionOne -image=registry.k8s.io/provider-os/magnum-auto-healer:v1.27.0 +image=registry.k8s.io/provider-os/magnum-auto-healer:v1.29.0 cat < /etc/kubernetes/octavia-ingress-controller/deployment.yaml --- diff --git a/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md b/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md index 6c5bfaac..219adc4e 100644 --- a/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md +++ b/docs/openstack-cloud-controller-manager/expose-applications-using-loadbalancer-type-service.md @@ -21,7 +21,7 @@ This page shows how to create Services of LoadBalancer type in Kubernetes cluste A LoadBalancer type Service is a typical way to expose an application to the internet. It relies on the cloud provider to create an external load balancer with an IP address in the relevant network space. Any traffic that is then directed to this IP address is forwarded on to the application’s service. -**NOTE: for test/PoC with only 1 master node environment, you need remove the label `node.kubernetes.io/exclude-from-external-load-balancers` of the master node otherwise the loadbalancer will not be created. search the label [here](https://pkg.go.dev/k8s.io/api/core/v1) for further information.** +**NOTE: for test/PoC with only 1 master node environment, you need remove the label `node.kubernetes.io/exclude-from-external-load-balancers` of the master node otherwise the loadbalancer will not be created. Refer to [here](https://kubernetes.io/docs/reference/labels-annotations-taints/#node-kubernetes-io-exclude-from-external-load-balancers) for further information.** > Note: Different cloud providers may support different Service annotations and features. @@ -33,7 +33,7 @@ Create an application of Deployment as the Service backend: kubectl run echoserver --image=gcr.io/google-containers/echoserver:1.10 --port=8080 ``` -To provide the echoserver application with an internet-facing loadbalancer we can simply run the following: +To provide the echoserver application with an internet-facing loadbalancer, we can simply run the following: ```shell cat < - -**NOTE: Now, the openstack-cloud-controller-manager implementation is based on OpenStack Octavia, Neutron-LBaaS has been deprecated in OpenStack since Queens release and no longer maintained in openstack-cloud-controller-manager. So make sure to use Octavia if upgrade to the latest openstack-cloud-controller-manager docker image.** +**NOTE: Now, the openstack-cloud-controller-manager implementation is based on OpenStack Octavia, Neutron-LBaaS has been removed in openstack-cloud-controller-manager since v1.26.0. So make sure to use Octavia if upgrade to the latest openstack-cloud-controller-manager docker image.** ## Deploy a Kubernetes cluster with openstack-cloud-controller-manager using kubeadm @@ -59,7 +59,7 @@ The following guide has been tested to install Kubernetes v1.17 on Ubuntu 18.04. kubectl create secret -n kube-system generic cloud-config --from-file=cloud.conf ``` -- Create RBAC resources and openstack-cloud-controller-manager deamonset. +- Create RBAC resources and openstack-cloud-controller-manager daemonset. ```shell kubectl apply -f https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/master/manifests/controller-manager/cloud-controller-manager-roles.yaml @@ -85,7 +85,6 @@ Implementation of openstack-cloud-controller-manager relies on several OpenStack |--------------------------------|----------------|------------|----------| | Identity (Keystone) | v3 | No | Yes | | Compute (Nova) | v2 | No | Yes | -| Load Balancing (Neutron-LBaaS) | v1, v2 | Yes | No | | Load Balancing (Octavia) | v2 | No | Yes | | Key Manager (Barbican) | v1 | No | No | @@ -174,14 +173,16 @@ The options in `Global` section are used for openstack-cloud-controller-manager For example, this option can be useful when having multiple or dual-stack interfaces attached to a node and needing a user-controlled, deterministic way of sorting the addresses. Default: "" -### Router +### Route * `router-id` - Specifies the Neutron router ID to manage Kubernetes cluster routes, e.g. for load balancers or compute instances that are not part of the Kubernetes cluster. + Specifies the Neutron router ID to activate [route controller](https://kubernetes.io/docs/concepts/architecture/cloud-controller/#route-controller) to manage Kubernetes cluster routes. + + **NOTE: This require openstack-cloud-controller-manager's `--cluster-cidr` flag to be set.** ### Load Balancer -Although the openstack-cloud-controller-manager was initially implemented with Neutron-LBaaS support, Octavia is recommended now because Neutron-LBaaS has been deprecated since Queens OpenStack release cycle and no longer accepted new feature enhancements. As a result, lots of advanced features in openstack-cloud-controller-manager rely on Octavia, even the CI is running based on Octavia enabled OpenStack environment. Functionalities are not guaranteed if using Neutron-LBaaS. +Although the openstack-cloud-controller-manager was initially implemented with Neutron-LBaaS support, Octavia is mandatory now because Neutron-LBaaS has been deprecated since Queens OpenStack release cycle and no longer accepted new feature enhancements. As a result, since v1.26.0 the Neutron-LBaaS is not supported in openstack-cloud-controller-manager and removed from code repo. * `enabled` Whether or not to enable the LoadBalancer type of Services integration at all. @@ -200,10 +201,17 @@ Although the openstack-cloud-controller-manager was initially implemented with N Optional. Tags for the external network subnet used to create floating IP for the load balancer VIP. Can be overridden by the Service annotation `loadbalancer.openstack.org/floating-subnet-tags`. If multiple subnets match the first one with still available IPs is used. * `lb-method` - The load balancing algorithm used to create the load balancer pool. The value can be `ROUND_ROBIN`, `LEAST_CONNECTIONS`, or `SOURCE_IP`. Default: `ROUND_ROBIN` + The load balancing algorithm used to create the load balancer pool. + + If `lb-provider` is set to "amphora" or "octavia" the value can be one of: + * `ROUND_ROBIN` (default) + * `LEAST_CONNECTIONS` + * `SOURCE_IP` + + If `lb-provider` is set to "ovn" the value must be set to `SOURCE_IP_PORT`. * `lb-provider` - Optional. Used to specify the provider of the load balancer, e.g. "amphora" or "octavia". Only "amphora" or "octavia" provider are officially tested, other provider will cause a warning log. + Optional. Used to specify the provider of the load balancer, e.g. "amphora" (default), "octavia" (deprecated alias for "amphora"), or "ovn". Only the "amphora", "octavia", and "ovn" providers are officially tested, other providers will cause a warning log. * `lb-version` Optional. If specified, only "v2" is supported. @@ -223,12 +231,17 @@ Although the openstack-cloud-controller-manager was initially implemented with N * `create-monitor` Indicates whether or not to create a health monitor for the service load balancer. A health monitor required for services that declare `externalTrafficPolicy: Local`. Default: false + NOTE: Health monitors for the `ovn` provider are only supported on OpenStack Wallaby and later. + * `monitor-delay` The time, in seconds, between sending probes to members of the load balancer. Default: 5 * `monitor-max-retries` The number of successful checks before changing the operating status of the load balancer member to ONLINE. A valid value is from 1 to 10. Default: 1 +* `monitor-max-retries-down` + The number of unsuccessful checks before changing the operating status of the load balancer member to ERROR. A valid value is from 1 to 10. Default: 3 + * `monitor-timeout` The maximum time, in seconds, that a monitor waits to connect backend before it times out. Default: 3 @@ -268,9 +281,9 @@ Although the openstack-cloud-controller-manager was initially implemented with N This option is currently a workaround for the issue https://github.com/kubernetes/ingress-nginx/issues/3996, should be removed or refactored after the Kubernetes [KEP-1860](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1860-kube-proxy-IP-node-binding) is implemented. * `default-tls-container-ref` - Reference to a tls container. This option works with Octavia, when this option is set then the cloud provider will create an Octavia Listener of type TERMINATED_HTTPS for a TLS Terminated loadbalancer. + Reference to a tls container or secret. This option works with Octavia, when this option is set then the cloud provider will create an Octavia Listener of type TERMINATED_HTTPS for a TLS Terminated loadbalancer. - Format for tls container ref: `https://{keymanager_host}/v1/containers/{uuid}` + Accepted format for tls container ref are `https://{keymanager_host}/v1/containers/{uuid}` and `https://{keymanager_host}/v1/secrets/{uuid}`. Check `container-store` parameter if you want to disable validation. * `container-store` @@ -282,9 +295,16 @@ Although the openstack-cloud-controller-manager was initially implemented with N * `max-shared-lb` The maximum number of Services that share a load balancer. Default: 2 +* `provider-requires-serial-api-calls` + Some Octavia providers do not support creating fully-populated loadbalancers using a single [API + call](https://docs.openstack.org/api-ref/load-balancer/v2/?expanded=create-a-load-balancer-detail#creating-a-fully-populated-load-balancer). + Setting this option to true will create loadbalancers using serial API calls which first create an unpopulated + loadbalancer, then populate its listeners, pools and members. This is a compatibility option at the expense of + increased load on the OpenStack API. Default: false + NOTE: -* When using `ovn` provider service has limited scope - `create_monitor` is not supported and only supported `lb-method` is `SOURCE_IP`. +* environment variable `OCCM_WAIT_LB_ACTIVE_STEPS` is used to provide steps of waiting loadbalancer to be ready. Current default wait steps is 23 and setup the environment variable overrides default value. Refer to [Backoff.Steps](https://pkg.go.dev/k8s.io/apimachinery/pkg/util/wait#Backoff) for further information. ### Metadata diff --git a/examples/manila-csi-plugin/nfs/auto-topology-aware/pod.yaml b/examples/manila-csi-plugin/nfs/auto-topology-aware/pod.yaml new file mode 100644 index 00000000..2b7451e2 --- /dev/null +++ b/examples/manila-csi-plugin/nfs/auto-topology-aware/pod.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: new-nfs-share-pod +spec: + containers: + - name: web-server + image: nginx + imagePullPolicy: IfNotPresent + volumeMounts: + - name: mypvc + mountPath: /var/lib/www + nodeSelector: + topology.kubernetes.io/zone: zone-1 + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: new-nfs-share-pvc + readOnly: false diff --git a/examples/manila-csi-plugin/nfs/auto-topology-aware/pvc.yaml b/examples/manila-csi-plugin/nfs/auto-topology-aware/pvc.yaml new file mode 100644 index 00000000..720ff2ad --- /dev/null +++ b/examples/manila-csi-plugin/nfs/auto-topology-aware/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: new-nfs-share-pvc +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi + storageClassName: csi-manila-nfs diff --git a/examples/manila-csi-plugin/nfs/auto-topology-aware/storageclass.yaml b/examples/manila-csi-plugin/nfs/auto-topology-aware/storageclass.yaml new file mode 100644 index 00000000..b2b847ca --- /dev/null +++ b/examples/manila-csi-plugin/nfs/auto-topology-aware/storageclass.yaml @@ -0,0 +1,29 @@ +# Topology constraints example: +# +# Let's have two Manila AZs: zone-{1..2} +# Let's have six Nova AZs: zone-{1..6} +# +# Manila zone-1 is accessible from nodes in zone-1 only +# Manila zone-2 is accessible from nodes in zone-2 only +# +# We're provisioning into zone-1 +# availability parameter and allowedTopologies are empty, therefore the dynamic +# share provisioning with automatic availability zone selection takes place. +# The "volumeBindingMode" must be set to "WaitForFirstConsumer". + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-manila-nfs +provisioner: nfs.manila.csi.openstack.org +volumeBindingMode: WaitForFirstConsumer +allowVolumeExpansion: true +parameters: + type: default + autoTopology: "true" + csi.storage.k8s.io/provisioner-secret-name: csi-manila-secrets + csi.storage.k8s.io/provisioner-secret-namespace: default + csi.storage.k8s.io/node-stage-secret-name: csi-manila-secrets + csi.storage.k8s.io/node-stage-secret-namespace: default + csi.storage.k8s.io/node-publish-secret-name: csi-manila-secrets + csi.storage.k8s.io/node-publish-secret-namespace: default diff --git a/examples/manila-csi-plugin/nfs/dynamic-provisioning/pvc.yaml b/examples/manila-csi-plugin/nfs/dynamic-provisioning/pvc.yaml index 5b95d348..720ff2ad 100644 --- a/examples/manila-csi-plugin/nfs/dynamic-provisioning/pvc.yaml +++ b/examples/manila-csi-plugin/nfs/dynamic-provisioning/pvc.yaml @@ -9,4 +9,3 @@ spec: requests: storage: 1Gi storageClassName: csi-manila-nfs - diff --git a/examples/manila-csi-plugin/nfs/topology-aware/storageclass.yaml b/examples/manila-csi-plugin/nfs/topology-aware/storageclass.yaml index 5c9fd898..ddd90ad8 100644 --- a/examples/manila-csi-plugin/nfs/topology-aware/storageclass.yaml +++ b/examples/manila-csi-plugin/nfs/topology-aware/storageclass.yaml @@ -24,6 +24,7 @@ parameters: csi.storage.k8s.io/node-stage-secret-namespace: default csi.storage.k8s.io/node-publish-secret-name: csi-manila-secrets csi.storage.k8s.io/node-publish-secret-namespace: default +allowVolumeExpansion: true allowedTopologies: - matchLabelExpressions: - key: topology.manila.csi.openstack.org/zone diff --git a/examples/webhook/keystone-deployment.yaml b/examples/webhook/keystone-deployment.yaml index fce66b7e..d4a485a6 100644 --- a/examples/webhook/keystone-deployment.yaml +++ b/examples/webhook/keystone-deployment.yaml @@ -18,7 +18,7 @@ spec: serviceAccountName: k8s-keystone containers: - name: k8s-keystone-auth - image: registry.k8s.io/provider-os/k8s-keystone-auth:v1.27.0 + image: registry.k8s.io/provider-os/k8s-keystone-auth:v1.29.0 args: - ./bin/k8s-keystone-auth - --tls-cert-file diff --git a/go.mod b/go.mod index 61c42220..2c84b8b9 100644 --- a/go.mod +++ b/go.mod @@ -1,55 +1,69 @@ module k8s.io/cloud-provider-openstack -go 1.20 +go 1.21 require ( github.com/container-storage-interface/spec v1.8.0 - github.com/edgelesssys/constellation/v2 v2.7.1 + github.com/edgelesssys/constellation/v2 v2.15.0 github.com/go-chi/chi/v5 v5.0.8 - github.com/gophercloud/gophercloud v1.3.0 - github.com/gophercloud/utils v0.0.0-20230418172808-6eab72e966e1 + github.com/gophercloud/gophercloud v1.6.0 + github.com/gophercloud/utils v0.0.0-20231010081019-80377eca5d56 github.com/hashicorp/go-version v1.6.0 github.com/kubernetes-csi/csi-lib-utils v0.13.0 github.com/kubernetes-csi/csi-test/v5 v5.0.0 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/onsi/ginkgo/v2 v2.9.4 - github.com/onsi/gomega v1.27.6 + github.com/onsi/ginkgo/v2 v2.13.0 + github.com/onsi/gomega v1.29.0 github.com/pborman/uuid v1.2.1 - github.com/sirupsen/logrus v1.9.0 - github.com/spf13/cobra v1.7.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/stretchr/testify v1.8.2 - golang.org/x/net v0.9.0 - golang.org/x/sys v0.8.0 - golang.org/x/term v0.8.0 - google.golang.org/grpc v1.55.0 - google.golang.org/protobuf v1.30.0 + github.com/stretchr/testify v1.8.4 + go.uber.org/goleak v1.3.0 + golang.org/x/net v0.19.0 + golang.org/x/sys v0.15.0 + golang.org/x/term v0.15.0 + google.golang.org/grpc v1.60.0 + google.golang.org/protobuf v1.31.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/godo.v2 v2.0.9 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.27.1 - k8s.io/apimachinery v0.27.1 - k8s.io/apiserver v0.27.1 - k8s.io/client-go v1.5.2 - k8s.io/cloud-provider v0.27.1 - k8s.io/component-base v0.27.1 - k8s.io/klog/v2 v2.100.1 - k8s.io/kms v0.27.1 - k8s.io/kubernetes v1.26.3 - k8s.io/mount-utils v0.27.1 - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 + k8s.io/api v0.29.1 + k8s.io/apimachinery v0.29.1 + k8s.io/apiserver v0.29.1 + k8s.io/client-go v0.29.1 + k8s.io/cloud-provider v0.29.1 + k8s.io/component-base v0.29.1 + k8s.io/klog/v2 v2.110.1 + k8s.io/kms v0.29.1 + k8s.io/kubernetes v1.29.1 + k8s.io/mount-utils v0.29.1 + k8s.io/utils v0.0.0-20231127182322-b307cd553661 software.sslmate.com/src/go-pkcs12 v0.2.0 ) +// the below fixes the "go list -m all" execution +replace ( + github.com/martinjungblut/go-cryptsetup => github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.1 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.1 + k8s.io/endpointslice => k8s.io/endpointslice v0.29.1 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.1 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.1 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.1 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.1 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.1 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.1 +) + require ( github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/MichaelTJones/walk v0.0.0-20161122175330-4748e29d5718 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.44.257 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect @@ -57,33 +71,33 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/cel-go v0.15.0 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/cel-go v0.17.7 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -97,92 +111,62 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/pelletier/go-toml/v2 v2.0.7 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.43.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/spf13/afero v1.9.5 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/spf13/afero v1.10.0 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect - go.etcd.io/etcd/api/v3 v3.5.8 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.8 // indirect - go.etcd.io/etcd/client/v3 v3.5.8 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.41.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.41.1 // indirect - go.opentelemetry.io/otel v1.15.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.15.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.15.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.15.1 // indirect - go.opentelemetry.io/otel/metric v0.38.1 // indirect - go.opentelemetry.io/otel/sdk v1.15.1 // indirect - go.opentelemetry.io/otel/trace v1.15.1 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.11.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/v3 v3.5.10 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.8.0 // indirect - golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 // indirect - golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/text v0.9.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 // indirect + golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + golang.org/x/tools v0.16.1 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.27.1 // indirect - k8s.io/component-helpers v0.27.1 // indirect - k8s.io/controller-manager v0.27.1 // indirect - k8s.io/csi-translation-lib v0.27.1 // indirect - k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect - k8s.io/kubectl v0.27.1 // indirect - k8s.io/pod-security-admission v0.27.1 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + k8s.io/apiextensions-apiserver v0.29.1 // indirect + k8s.io/component-helpers v0.29.1 // indirect + k8s.io/controller-manager v0.29.1 // indirect + k8s.io/csi-translation-lib v0.29.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubectl v0.29.1 // indirect + k8s.io/kubelet v0.29.1 // indirect + k8s.io/pod-security-admission v0.29.1 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect -) - -replace ( - github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.9.1 - k8s.io/api => k8s.io/api v0.27.1 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.1 - k8s.io/apimachinery => k8s.io/apimachinery v0.27.1 - k8s.io/apiserver => k8s.io/apiserver v0.27.1 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.1 - k8s.io/client-go => k8s.io/client-go v0.27.1 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.1 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.1 - k8s.io/code-generator => k8s.io/code-generator v0.27.1 - k8s.io/component-base => k8s.io/component-base v0.27.1 - k8s.io/component-helpers => k8s.io/component-helpers v0.27.1 - k8s.io/controller-manager => k8s.io/controller-manager v0.27.1 - k8s.io/cri-api => k8s.io/cri-api v0.27.1 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.1 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.1 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.1 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.1 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.1 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.1 - k8s.io/kubectl => k8s.io/kubectl v0.27.1 - k8s.io/kubelet => k8s.io/kubelet v0.27.1 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.1 - k8s.io/metrics => k8s.io/metrics v0.27.1 - k8s.io/mount-utils => k8s.io/mount-utils v0.27.1 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.1 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.1 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.1 - k8s.io/sample-controller => k8s.io/sample-controller v0.27.1 + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index adf9119b..084ec319 100644 --- a/go.sum +++ b/go.sum @@ -17,15 +17,17 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -47,25 +49,20 @@ github.com/MichaelTJones/walk v0.0.0-20161122175330-4748e29d5718 h1:FSsoaa1q4jAa github.com/MichaelTJones/walk v0.0.0-20161122175330-4748e29d5718/go.mod h1:VVwKsx9Dc8rNG55BWqogoJzGubjKnRoXdUvpGbWqeCc= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 h1:X8MJ0fnN5FPdcGF5Ij2/OW+HgiJrRg3AfHAx1PJtIzM= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.44.257 h1:HwelXYZZ8c34uFFhgVw3ybu2gB5fkk8KLj2idTvzZb8= -github.com/aws/aws-sdk-go v1.44.257/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -77,11 +74,11 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/container-storage-interface/spec v1.6.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= @@ -89,37 +86,42 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c h1:ToajP6trZoiqlZ3Z4uoG1P02/wtqSw1AcowOXOYjATk= +github.com/daniel-weisse/go-cryptsetup v0.0.0-20230705150314-d8c07bd1723c/go.mod h1:gZoZ0+POlM1ge/VUxWpMmZVNPzzMJ7l436CgkQ5+qzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/edgelesssys/constellation/v2 v2.7.1 h1:1v7CxFm1Ynm4/o3L3EAx+lcRgqnOvJXLCFrMnmMkTA8= -github.com/edgelesssys/constellation/v2 v2.7.1/go.mod h1:BkAahvBkSZivWW+uhGzLB/yh0ZohTyWE3a0d03106mE= -github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= -github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/edgelesssys/constellation/v2 v2.15.0 h1:xQoA4DjETJMD6YiQeS7PdUnU8BxXuEAkQKsUHIz+9iY= +github.com/edgelesssys/constellation/v2 v2.15.0/go.mod h1:cYR8gYqtYfn6eCmMc3D5QPX7aRspaiuiVyyxigGcI4Q= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= @@ -128,18 +130,19 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -147,9 +150,10 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -184,10 +188,11 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/cel-go v0.15.0 h1:OLNhWn8gPWnQz78aSp8RWjCV4sMVHciQqHj53adYUGU= -github.com/google/cel-go v0.15.0/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= +github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -200,8 +205,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -224,25 +230,28 @@ github.com/google/pprof v0.0.0-20230502171905-255e3b9b56de/go.mod h1:79YE0hCXdHa github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.3.0 h1:RUKyCMiZoQR3VlVR5E3K7PK1AC3/qppsWYo6dtBiqs8= github.com/gophercloud/gophercloud v1.3.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gophercloud/utils v0.0.0-20230418172808-6eab72e966e1 h1:vJyXd9+MB5vAKxpOo4z/PDSiPgKmEyJwHIDOdV4Y0KY= -github.com/gophercloud/utils v0.0.0-20230418172808-6eab72e966e1/go.mod h1:VSalo4adEk+3sNkmVJLnhHoOyOYYS8sTWLG4mv5BKto= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gophercloud/gophercloud v1.6.0 h1:JwJN1bauRnWPba5ueWs9IluONHteXPWjjK+MvfM4krY= +github.com/gophercloud/gophercloud v1.6.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/utils v0.0.0-20231010081019-80377eca5d56 h1:sH7xkTfYzxIEgzq1tDHIMKRh1vThOEOGNsettdEeLbE= +github.com/gophercloud/utils v0.0.0-20231010081019-80377eca5d56/go.mod h1:VSalo4adEk+3sNkmVJLnhHoOyOYYS8sTWLG4mv5BKto= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= @@ -251,17 +260,15 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -272,9 +279,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -287,8 +294,6 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/martinjungblut/go-cryptsetup v0.0.0-20220520180014-fd0874fd07a6 h1:YDjLk3wsL5ZLhLC4TIwIvT2NkSCAdAV6pzzZaRfj4jk= -github.com/martinjungblut/go-cryptsetup v0.0.0-20220520180014-fd0874fd07a6/go.mod h1:gZoZ0+POlM1ge/VUxWpMmZVNPzzMJ7l436CgkQ5+qzU= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/str v1.2.0 h1:4IzWSdIz9qPQWLfKZ0rJcV0jcUDpxvP4JVZ4GXQyvSw= @@ -310,12 +315,24 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= -github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= @@ -329,36 +346,36 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.43.0 h1:iq+BVjvYLei5f27wiuNiB1DN6DYQkp1c8Bx0Vykh5us= -github.com/prometheus/common v0.43.0/go.mod h1:NCvr5cQIh3Y/gy73/RdVtC9r8xxrxwJnB+2lB3BxrFc= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -374,66 +391,68 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4= -go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= -go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M= -go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= -go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= -go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4= -go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc= -go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= -go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= -go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.41.1 h1:Ei1FUQ5CbSNkl2o/XAiksXSyQNAeJBX3ivqJpJ254Ak= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.41.1/go.mod h1:f7TOPTlEcliCBlOYPuNnZTuND71MVTAoINWIt1SmP/c= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.41.1 h1:pX+lppB8PArapyhS6nBStyQmkaDUPWdQf0UmEGRCQ54= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.41.1/go.mod h1:2FmkXne0k9nkp27LD/m+uoh8dNlstsiCJ7PLc/S72aI= -go.opentelemetry.io/otel v1.15.1 h1:3Iwq3lfRByPaws0f6bU3naAqOR1n5IeDWd9390kWHa8= -go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.15.1 h1:XYDQtNzdb2T4uM1pku2m76eSMDJgqhJ+6KzkqgQBALc= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.15.1/go.mod h1:uOTV75+LOzV+ODmL8ahRLWkFA3eQcSC2aAsbxIu4duk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.15.1 h1:tyoeaUh8REKay72DVYsSEBYV18+fGONe+YYPaOxgLoE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.15.1/go.mod h1:HUSnrjQQ19KX9ECjpQxufsF+3ioD3zISPMlauTPZu2g= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.15.1 h1:pIfoG5IAZFzp9EUlJzdSkpUwpaUAAnD+Ru1nBLTACIQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.15.1/go.mod h1:poNKBqF5+nR/6ke2oGTDjHfksrsHDOHXAl2g4+9ONsY= -go.opentelemetry.io/otel/metric v0.38.1 h1:2MM7m6wPw9B8Qv8iHygoAgkbejed59uUR6ezR5T3X2s= -go.opentelemetry.io/otel/metric v0.38.1/go.mod h1:FwqNHD3I/5iX9pfrRGZIlYICrJv0rHEUl2Ln5vdIVnQ= -go.opentelemetry.io/otel/sdk v1.15.1 h1:5FKR+skgpzvhPQHIEfcwMYjCBr14LWzs3uSqKiQzETI= -go.opentelemetry.io/otel/sdk v1.15.1/go.mod h1:8rVtxQfrbmbHKfqzpQkT5EzZMcbMBwTzNAggbEAM0KA= -go.opentelemetry.io/otel/trace v1.15.1 h1:uXLo6iHJEzDfrNC0L0mNjItIp06SyaBQxu5t3xMlngY= -go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -444,8 +463,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -456,8 +475,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 h1:EDuYyU/MkFXllv9QF9819VlI9a4tzGuCbhG0ExK9o1U= +golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -482,11 +501,11 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -507,6 +526,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -517,17 +537,15 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -537,9 +555,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -552,10 +569,10 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -564,7 +581,10 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -585,6 +605,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -593,41 +614,34 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -678,16 +692,16 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -717,8 +731,9 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -757,10 +772,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -779,11 +796,9 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= +google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -798,14 +813,14 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/godo.v2 v2.0.9 h1:jnbznTzXVk0JDKOxN3/LJLDPYJzIl0734y+Z0cEJb4A= @@ -816,15 +831,17 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -834,53 +851,55 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0= -k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E= -k8s.io/apiextensions-apiserver v0.27.1 h1:Hp7B3KxKHBZ/FxmVFVpaDiXI6CCSr49P1OJjxKO6o4g= -k8s.io/apiextensions-apiserver v0.27.1/go.mod h1:8jEvRDtKjVtWmdkhOqE84EcNWJt/uwF8PC4627UZghY= -k8s.io/apimachinery v0.27.1 h1:EGuZiLI95UQQcClhanryclaQE6xjg1Bts6/L3cD7zyc= -k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= -k8s.io/apiserver v0.27.1 h1:phY+BtXjjzd+ta3a4kYbomC81azQSLa1K8jo9RBw7Lg= -k8s.io/apiserver v0.27.1/go.mod h1:UGrOjLY2KsieA9Fw6lLiTObxTb8Z1xEba4uqSuMY0WU= -k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8= -k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA= -k8s.io/cloud-provider v0.27.1 h1:482W9e2Yp8LDgTUKrXAxT+nH4pHS2TiBElI/CnfGWac= -k8s.io/cloud-provider v0.27.1/go.mod h1:oN7Zci2Ls2dorwSNd2fMiW/6DA40+F4o2QL70p63bqo= -k8s.io/component-base v0.27.1 h1:kEB8p8lzi4gCs5f2SPU242vOumHJ6EOsOnDM3tTuDTM= -k8s.io/component-base v0.27.1/go.mod h1:UGEd8+gxE4YWoigz5/lb3af3Q24w98pDseXcXZjw+E0= -k8s.io/component-helpers v0.27.1 h1:uY63v834MAHuf3fBiKGQGPq/cToU5kY5SW/58Xv0gl4= -k8s.io/component-helpers v0.27.1/go.mod h1:oOpwSYW1AdL+pU7abHADwX1ZcJl+5c8mnIkvoFZNFWA= -k8s.io/controller-manager v0.27.1 h1:+4OGWAzg4JVLEauPSmyQFIfrYrYQoUsC4MbHmRuPaFU= -k8s.io/controller-manager v0.27.1/go.mod h1:oe9vKl0RPiedlCXmeVbhkDV2yX8r7C4K/B8OGaKdYtY= -k8s.io/csi-translation-lib v0.27.1 h1:D9Hw2iBZzFPriFH0FDyUFdfflYAW6S032P6Yps9sKq8= -k8s.io/csi-translation-lib v0.27.1/go.mod h1:MyBDHVDz24OOSc4FdmSZA2nkfNu+Ysu8BqjdOAcKoT8= +k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= +k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= +k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= +k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= +k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= +k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g= +k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw= +k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= +k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/cloud-provider v0.29.1 h1:bDLpOSpysWrtU2PCkvyP2sUTwRBa6MGCmxt68CRRW/8= +k8s.io/cloud-provider v0.29.1/go.mod h1:u50Drm6AbuoKpsVbAstNiFHGgbSVHuJV4TWN5imdM2w= +k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= +k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= +k8s.io/component-helpers v0.29.1 h1:54MMEDu6xeJmMtAKztsPwu0kJKr4+jCUzaEIn2UXRoc= +k8s.io/component-helpers v0.29.1/go.mod h1:+I7xz4kfUgxWAPJIVKrqe4ml4rb9UGpazlOmhXYo+cY= +k8s.io/controller-manager v0.29.1 h1:bTnJFF/OWooRVeJ4QLA1ApuPH+fjHSmcVMMeL7qvI2E= +k8s.io/controller-manager v0.29.1/go.mod h1:fVhGGuBiB0B2yT2+OHXZaA88owVn5zkv18A+G9E9Qlw= +k8s.io/csi-translation-lib v0.29.1 h1:b2tYZnnHyrQVHG6GYel7egmVvKeIlX/xbTNm9ynBSUg= +k8s.io/csi-translation-lib v0.29.1/go.mod h1:Zglui6PgFSew8ux50djwZ3PFK6eNrWktid66D7pHDDo= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.27.1 h1:JTSQbJb+mcobScQwF0bOmZhIwP17k8GvBsiLlA6SQqw= -k8s.io/kms v0.27.1/go.mod h1:VuTsw0uHlSycKLCkypCGxfFCjLfzf/5YMeATECd/zJA= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= -k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/kubectl v0.27.1 h1:9T5c5KdpburYiW8XKQSH0Uly1kMNE90aGSnbYUZNdcA= -k8s.io/kubectl v0.27.1/go.mod h1:QsAkSmrRsKTPlAFzF8kODGDl4p35BIwQnc9XFhkcsy8= -k8s.io/kubernetes v1.26.3 h1:LtjNGNNpCTRyrWhDJMwTWDX+4h+GLwfULS8pu0xzSdk= -k8s.io/kubernetes v1.26.3/go.mod h1:NxzR7U7mS+OGa3J/qweI86Pek//mlfHqDgt6NNGdz8g= -k8s.io/mount-utils v0.27.1 h1:RSd0wslbIuwLRaGGNAGMZ3m9FLcvukxJ3FWlOm76W2A= -k8s.io/mount-utils v0.27.1/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs= -k8s.io/pod-security-admission v0.27.1 h1:if4d1zzcpNOZNvljvJ0nTCshFPUmnkIsy7KYJg7FP08= -k8s.io/pod-security-admission v0.27.1/go.mod h1:dICAHAC4DE0q+yrGuPJ8kuJ5dEsWtqNkclzCDckHj/s= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kms v0.29.1 h1:6dMOaxllwiAZ8p3Hys65b78MDG+hONpBBpk1rQsaEtk= +k8s.io/kms v0.29.1/go.mod h1:Hqkx3zEGWThUTbcSkK508DUv4c1HOJOB5qihSoLBWgU= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.1 h1:rWnW3hi/rEUvvg7jp4iYB68qW5un/urKbv7fu3Vj0/s= +k8s.io/kubectl v0.29.1/go.mod h1:SZzvLqtuOJYSvZzPZR9weSuP0wDQ+N37CENJf0FhDF4= +k8s.io/kubelet v0.29.1 h1:cso8Dk8dymkj8q+EvW/aCbIYU2aOkH27gho48tYza/8= +k8s.io/kubelet v0.29.1/go.mod h1:hTl/naFcCVG1Ku17fMgj/krbheBwBkf3gnFhaboMx7E= +k8s.io/kubernetes v1.29.1 h1:fxJFVb8uqbYZDYHpwIsAndBQs360cQGb0xa1gYFh3fo= +k8s.io/kubernetes v1.29.1/go.mod h1:xZPKU0yO0CBbLTnbd+XGyRmmtmaVuJykDb8gNCkeeUE= +k8s.io/mount-utils v0.29.1 h1:veXlIm52Y4tm3H0pG03cOdkw0KOJxYDa0fQqhJCoqvQ= +k8s.io/mount-utils v0.29.1/go.mod h1:9IWJTMe8tG0MYMLEp60xK9GYVeCdA3g4LowmnVi+t9Y= +k8s.io/pod-security-admission v0.29.1 h1:PkIm6Di3Cd4cPmxSPeZhq7BLts5dq+xXyXbwCY67PIk= +k8s.io/pod-security-admission v0.29.1/go.mod h1:ecYSuWWsZbeM6shzommS6ZNVvQyr8sOJ9dUoGRt9gHM= +k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= +k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE= software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ= diff --git a/hack/bump-release.sh b/hack/bump-release.sh new file mode 100755 index 00000000..0c1c2a15 --- /dev/null +++ b/hack/bump-release.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM_MAJOR="${1:?FROM_MAJOR (1st arg) not set or empty}" +TO_MAJOR="${2:?TO_MAJOR (2nd arg) not set or empty}" +TO_MINOR="${3:?TO_MINOR (3rd arg) not set or empty}" + +# example usage: hack/bump-release.sh 28 28 1 +# should replace 1.28.x with 1.28.1 / 2.28.x with 2.28.1 + +find charts docs manifests tests examples -type f -exec sed -i -re 's/((ersion)?: ?v?)?([1-2]\.)'${FROM_MAJOR}'\.([0-9][0-9a-zA-Z.-]*)/\1\3'${TO_MAJOR}'.'${TO_MINOR}'/g' "{}" \; diff --git a/hack/make.sh b/hack/make.sh index a89488ea..a3c7a954 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -20,4 +20,4 @@ # GOPATH and handle all of the Go ENV stuff for you. All you need is Docker docker run -it -v "$PWD":/go/src/k8s.io/cloud-provider-openstack:z \ -w /go/src/k8s.io/cloud-provider-openstack \ - golang:1.13 make $1 + golang:1.25 make $1 diff --git a/manifests/barbican-kms/ds.yaml b/manifests/barbican-kms/ds.yaml index 86e1c910..bf197687 100644 --- a/manifests/barbican-kms/ds.yaml +++ b/manifests/barbican-kms/ds.yaml @@ -30,7 +30,7 @@ spec: serviceAccountName: cloud-controller-manager containers: - name: barbican-kms - image: registry.k8s.io/provider-os/barbican-kms-plugin:v1.27.0 + image: registry.k8s.io/provider-os/barbican-kms-plugin:v1.29.0 args: - /bin/barbican-kms-plugin - --socketpath=$(KMS_ENDPOINT) diff --git a/manifests/barbican-kms/pod.yaml b/manifests/barbican-kms/pod.yaml index 1dcb78bf..2ab64627 100644 --- a/manifests/barbican-kms/pod.yaml +++ b/manifests/barbican-kms/pod.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - name: barbican-kms - image: registry.k8s.io/provider-os/barbican-kms-plugin:v1.27.0 + image: registry.k8s.io/provider-os/barbican-kms-plugin:v1.29.0 args: - "--socketpath=/kms/kms.sock" - "--cloud-config=/etc/kubernetes/cloud-config" diff --git a/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml b/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml index 26579853..f5ddc2e0 100644 --- a/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml +++ b/manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml @@ -38,7 +38,7 @@ spec: serviceAccountName: cloud-controller-manager containers: - name: openstack-cloud-controller-manager - image: registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.27.0 + image: registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.29.0 args: - /bin/openstack-cloud-controller-manager - --v=1 diff --git a/manifests/controller-manager/openstack-cloud-controller-manager-pod.yaml b/manifests/controller-manager/openstack-cloud-controller-manager-pod.yaml index 9c66611e..dfcc05dc 100644 --- a/manifests/controller-manager/openstack-cloud-controller-manager-pod.yaml +++ b/manifests/controller-manager/openstack-cloud-controller-manager-pod.yaml @@ -11,7 +11,7 @@ metadata: spec: containers: - name: openstack-cloud-controller-manager - image: registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.27.0 + image: registry.k8s.io/provider-os/openstack-cloud-controller-manager:v1.29.0 args: - /bin/openstack-cloud-controller-manager - --v=1 diff --git a/manifests/magnum-auto-healer/magnum-auto-healer.yaml b/manifests/magnum-auto-healer/magnum-auto-healer.yaml index e8df56b4..9c422721 100644 --- a/manifests/magnum-auto-healer/magnum-auto-healer.yaml +++ b/manifests/magnum-auto-healer/magnum-auto-healer.yaml @@ -88,7 +88,7 @@ spec: node-role.kubernetes.io/control-plane: "" containers: - name: magnum-auto-healer - image: registry.k8s.io/provider-os/magnum-auto-healer:v1.27.0 + image: registry.k8s.io/provider-os/magnum-auto-healer:v1.29.0 imagePullPolicy: Always args: - /bin/magnum-auto-healer diff --git a/manifests/manila-csi-plugin/csi-controllerplugin.yaml b/manifests/manila-csi-plugin/csi-controllerplugin.yaml index e1430f37..a60e5234 100644 --- a/manifests/manila-csi-plugin/csi-controllerplugin.yaml +++ b/manifests/manila-csi-plugin/csi-controllerplugin.yaml @@ -60,7 +60,7 @@ spec: - name: plugin-dir mountPath: /var/lib/kubelet/plugins/manila.csi.openstack.org - name: resizer - image: "registry.k8s.io/sig-storage/csi-resizer:v1.3.0" + image: "registry.k8s.io/sig-storage/csi-resizer:v1.8.0" args: - "--csi-address=$(ADDRESS)" - "--handle-volume-inuse-error=false" @@ -77,7 +77,7 @@ spec: capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true - image: registry.k8s.io/provider-os/manila-csi-plugin:v1.27.0 + image: registry.k8s.io/provider-os/manila-csi-plugin:v1.29.0 command: ["/bin/sh", "-c", '/bin/manila-csi-plugin --nodeid=$(NODE_ID) diff --git a/manifests/manila-csi-plugin/csi-nodeplugin.yaml b/manifests/manila-csi-plugin/csi-nodeplugin.yaml index 3017f7ff..afc0d66c 100644 --- a/manifests/manila-csi-plugin/csi-nodeplugin.yaml +++ b/manifests/manila-csi-plugin/csi-nodeplugin.yaml @@ -50,7 +50,7 @@ spec: capabilities: add: ["SYS_ADMIN"] allowPrivilegeEscalation: true - image: registry.k8s.io/provider-os/manila-csi-plugin:v1.27.0 + image: registry.k8s.io/provider-os/manila-csi-plugin:v1.29.0 command: ["/bin/sh", "-c", '/bin/manila-csi-plugin --nodeid=$(NODE_ID) diff --git a/pkg/autohealing/cloudprovider/openstack/provider.go b/pkg/autohealing/cloudprovider/openstack/provider.go index d579b092..70a27c9d 100644 --- a/pkg/autohealing/cloudprovider/openstack/provider.go +++ b/pkg/autohealing/cloudprovider/openstack/provider.go @@ -35,10 +35,12 @@ import ( "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" "github.com/gophercloud/gophercloud/pagination" uuid "github.com/pborman/uuid" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" log "k8s.io/klog/v2" "k8s.io/cloud-provider-openstack/pkg/autohealing/config" @@ -208,12 +210,12 @@ func (provider CloudProvider) waitForServerDetachVolumes(serverID string, timeou for _, attachment := range attachments { volume, err := volumes.Get(provider.Cinder, attachment.VolumeID).Extract() if err != nil { - return false, fmt.Errorf("failed to get volume %s, error: %s", attachment.VolumeID, err) + return false, fmt.Errorf("failed to get volume %s, error: %v", attachment.VolumeID, err) } bootable, err := strconv.ParseBool(volume.Bootable) if err != nil { - log.Warningf("Unexpected value for bootable volume %s in volume %s, error %s", volume.Bootable, volume, err) + log.Warningf("Unexpected value for bootable volume %s in volume %v, error %v", volume.Bootable, *volume, err) } log.Infof("volume %s is bootable %t", attachment.VolumeID, bootable) @@ -222,7 +224,7 @@ func (provider CloudProvider) waitForServerDetachVolumes(serverID string, timeou log.Infof("detaching volume %s for instance %s", attachment.VolumeID, serverID) err := volumeattach.Delete(provider.Nova, serverID, attachment.ID).ExtractErr() if err != nil { - return false, fmt.Errorf("failed to detach volume %s from instance %s, error: %s", attachment.VolumeID, serverID, err) + return false, fmt.Errorf("failed to detach volume %s from instance %s, error: %v", attachment.VolumeID, serverID, err) } } else { rootVolumeID = attachment.VolumeID @@ -288,15 +290,36 @@ func (provider CloudProvider) firstTimeRepair(n healthcheck.NodeInfo, serverID s // Uncordon the node if n.IsWorker { nodeName := n.KubeNode.Name - newNode := n.KubeNode.DeepCopy() - newNode.Spec.Unschedulable = false - if _, err := provider.KubeClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}); err != nil { - log.Errorf("Failed to cordon node %s, error: %v", nodeName, err) - } else { - log.Infof("Node %s is cordoned", nodeName) + // timeout for wait.Poll + ctx := context.Background() + errServiceUp := wait.PollUntilContextTimeout(ctx, 3*time.Second, provider.Config.RebuildDelayAfterReboot, false, + func(ctx context.Context) (bool, error) { + repairedNode, getErr := provider.KubeClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if getErr != nil { + log.Errorf("Failed to get node %s, error: %v", nodeName, getErr) + return false, getErr + } + if CheckNodeCondition(repairedNode, apiv1.NodeReady, apiv1.ConditionTrue) { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Retrieve the latest version of Node before attempting update + // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver + repairedNode.Spec.Unschedulable = false + if _, updateErr := provider.KubeClient.CoreV1().Nodes().Update(ctx, repairedNode, metav1.UpdateOptions{}); updateErr != nil { + log.Warningf("Failed to uncordon node %s, error: %v", nodeName, updateErr) + return updateErr + } else { + log.Infof("Node %s is uncordoned", nodeName) + return nil + } + }) + return true, retryErr + } + return false, nil + }) + if errServiceUp != nil { + log.Infof("Reboot doesn't repair Node %s error: %v", nodeName, errServiceUp) } } - n.RebootAt = time.Now() firstTimeRebootNodes[serverID] = n unHealthyNodes[serverID] = n @@ -347,7 +370,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { err := provider.UpdateHealthStatus(masters, workers) if err != nil { - return fmt.Errorf("failed to update the helath status of cluster %s, error: %v", clusterName, err) + return fmt.Errorf("failed to update the health status of cluster %s, error: %v", clusterName, err) } cluster, err := clusters.Get(provider.Magnum, clusterName).Extract() @@ -604,7 +627,7 @@ func (provider CloudProvider) Enabled() bool { } if _, isPresent := cluster.Labels[ClusterAutoHealingLabel]; !isPresent { - log.Infof("Autohealing is disalbed for cluster %s", clusterName) + log.Infof("Autohealing is disabled for cluster %s", clusterName) return false } autoHealingEnabled, err := strconv.ParseBool(cluster.Labels[ClusterAutoHealingLabel]) @@ -613,7 +636,7 @@ func (provider CloudProvider) Enabled() bool { return false } if !autoHealingEnabled { - log.Infof("Autohealing is disalbed for cluster %s", clusterName) + log.Infof("Autohealing is disabled for cluster %s", clusterName) return false } @@ -635,3 +658,16 @@ func (provider CloudProvider) Enabled() bool { return true } + +// CheckNodeCondition check if a node's condition list contains the given condition type and status +func CheckNodeCondition(node *apiv1.Node, conditionType apiv1.NodeConditionType, conditionStatus apiv1.ConditionStatus) bool { + if len(node.Status.Conditions) == 0 { + return false + } + for _, cond := range node.Status.Conditions { + if cond.Type == conditionType && cond.Status == conditionStatus { + return true + } + } + return false +} diff --git a/pkg/autohealing/cmd/root.go b/pkg/autohealing/cmd/root.go index 34158401..549107bc 100644 --- a/pkg/autohealing/cmd/root.go +++ b/pkg/autohealing/cmd/root.go @@ -35,6 +35,7 @@ import ( "k8s.io/cloud-provider-openstack/pkg/autohealing/config" "k8s.io/cloud-provider-openstack/pkg/autohealing/controller" + "k8s.io/cloud-provider-openstack/pkg/version" ) var ( @@ -82,6 +83,7 @@ var rootCmd = &cobra.Command{ signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) <-sigCh }, + Version: version.Version, } // Execute adds all child commands to the root command sets flags appropriately. @@ -122,7 +124,7 @@ func initConfig() { // If a config file is found, read it in. if err := viper.ReadInConfig(); err != nil { - log.Fatalf("Failed to read config file, error: %s", err) + log.Fatalf("Failed to read config file, error: %v", err) } log.Infof("Using config file %s", viper.ConfigFileUsed()) diff --git a/pkg/autohealing/controller/controller.go b/pkg/autohealing/controller/controller.go index 0eeb72c1..c7f79f06 100644 --- a/pkg/autohealing/controller/controller.go +++ b/pkg/autohealing/controller/controller.go @@ -34,6 +34,7 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" log "k8s.io/klog/v2" "k8s.io/cloud-provider-openstack/pkg/autohealing/cloudprovider" @@ -221,8 +222,7 @@ func (c *Controller) GetLeaderElectionLock() (resourcelock.Interface, error) { id = id + "_" + string(uuid.NewUUID()) rl, err := resourcelock.New( - //TODO(acumino): Migrate configmapsleases to leases in vesrion 1.24. - resourcelock.ConfigMapsLeasesResourceLock, + resourcelock.LeasesResourceLock, leaderElectionResourceLockNamespace, leaderElectionResourceLockName, c.leaderElectionClient.CoreV1(), @@ -327,17 +327,31 @@ func (c *Controller) repairNodes(unhealthyNodes []healthcheck.NodeInfo) { // Cordon the nodes before repair. for _, node := range unhealthyNodes { nodeName := node.KubeNode.Name - newNode := node.KubeNode.DeepCopy() - newNode.Spec.Unschedulable = true // Skip cordon for master node if !node.IsWorker { continue } - if _, err := c.kubeClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}); err != nil { - log.Errorf("Failed to cordon node %s, error: %v", nodeName, err) - } else { - log.Infof("Node %s is cordoned", nodeName) + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Retrieve the latest version of Node before attempting update + // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver + newNode, err := c.kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + log.Errorf("Failed to get node %s, error: %v before update", nodeName, err) + return err + } + newNode.Spec.Unschedulable = true + if _, updateErr := c.kubeClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}); updateErr != nil { + log.Warningf("Failed in retry to cordon node %s, error: %v", nodeName, updateErr) + return updateErr + } else { + log.Infof("Node %s is cordoned", nodeName) + return nil + } + }) + if retryErr != nil { + log.Errorf("Failed to cordon node %s, error: %v", nodeName, retryErr) + } } diff --git a/pkg/client/client.go b/pkg/client/client.go index 2b6434a4..721f548f 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -156,7 +156,7 @@ func (authOpts AuthOpts) ToAuthOptions() gophercloud.AuthOptions { ao, err := clientconfig.AuthOptions(&opts) if err != nil { - klog.V(1).Infof("Error parsing auth: %s", err) + klog.V(1).Infof("Error parsing auth: %v", err) return gophercloud.AuthOptions{} } @@ -256,7 +256,7 @@ func NewOpenStackClient(cfg *AuthOpts, userAgent string, extraUserAgent ...strin // read and parse CA certificate from file caPool, err = cert.NewPool(cfg.CAFile) if err != nil { - return nil, fmt.Errorf("failed to read and parse %s certificate: %s", cfg.CAFile, err) + return nil, fmt.Errorf("failed to read and parse %s certificate: %v", cfg.CAFile, err) } } else if cfg.CAFileContents != "" { // parse CA certificate from the contents @@ -277,7 +277,7 @@ func NewOpenStackClient(cfg *AuthOpts, userAgent string, extraUserAgent ...strin if cfg.CertFile != "" && cfg.KeyFile != "" { cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile) if err != nil { - return nil, fmt.Errorf("error loading TLS key pair: %s", err) + return nil, fmt.Errorf("error loading TLS key pair: %v", err) } config.Certificates = []tls.Certificate{cert} } diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 1695e71c..110d3e3e 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -17,7 +17,6 @@ limitations under the License. package cinder import ( - "fmt" "strconv" "github.com/container-storage-interface/spec/lib/go/csi" @@ -69,16 +68,13 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol // Volume Type volType := req.GetParameters()["type"] - var volAvailability string - // First check if volAvailability is already specified, if not get preferred from Topology // Required, incase vol AZ is different from node AZ - volAvailability = req.GetParameters()["availability"] - - if len(volAvailability) == 0 { + volAvailability := req.GetParameters()["availability"] + if volAvailability == "" { // Check from Topology if req.GetAccessibilityRequirements() != nil { - volAvailability = getAZFromTopology(req.GetAccessibilityRequirements()) + volAvailability = util.GetAZFromTopology(topologyKey, req.GetAccessibilityRequirements()) } } @@ -89,7 +85,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol volumes, err := cloud.GetVolumesByName(volName) if err != nil { klog.Errorf("Failed to query for existing Volume during CreateVolume: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to get volumes: %s", err)) + return nil, status.Errorf(codes.Internal, "Failed to get volumes: %v", err) } if len(volumes) == 1 { @@ -142,7 +138,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol if err != nil { klog.Errorf("Failed to CreateVolume: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "CreateVolume failed with error %v", err) } @@ -166,7 +162,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol return &csi.DeleteVolumeResponse{}, nil } klog.Errorf("Failed to DeleteVolume: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "DeleteVolume failed with error %v", err) } klog.V(4).Infof("DeleteVolume: Successfully deleted volume %s", volID) @@ -197,7 +193,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "[ControllerPublishVolume] Volume %s not found", volumeID) } - return nil, status.Error(codes.Internal, fmt.Sprintf("[ControllerPublishVolume] get volume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] get volume failed with error %v", err) } _, err = cs.Cloud.GetInstanceByID(instanceID) @@ -205,26 +201,26 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "[ControllerPublishVolume] Instance %s not found", instanceID) } - return nil, status.Error(codes.Internal, fmt.Sprintf("[ControllerPublishVolume] GetInstanceByID failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] GetInstanceByID failed with error %v", err) } _, err = cs.Cloud.AttachVolume(instanceID, volumeID) if err != nil { klog.Errorf("Failed to AttachVolume: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("[ControllerPublishVolume] Attach Volume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] Attach Volume failed with error %v", err) } err = cs.Cloud.WaitDiskAttached(instanceID, volumeID) if err != nil { klog.Errorf("Failed to WaitDiskAttached: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("[ControllerPublishVolume] failed to attach volume: %v", err)) + return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] failed to attach volume: %v", err) } devicePath, err := cs.Cloud.GetAttachmentDiskPath(instanceID, volumeID) if err != nil { klog.Errorf("Failed to GetAttachmentDiskPath: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("[ControllerPublishVolume] failed to get device path of attached volume : %v", err)) + return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] failed to get device path of attached volume: %v", err) } klog.V(4).Infof("ControllerPublishVolume %s on %s is successful", volumeID, instanceID) @@ -254,7 +250,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * klog.V(3).Infof("ControllerUnpublishVolume assuming volume %s is detached, because node %s does not exist", volumeID, instanceID) return &csi.ControllerUnpublishVolumeResponse{}, nil } - return nil, status.Error(codes.Internal, fmt.Sprintf("[ControllerUnpublishVolume] GetInstanceByID failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "[ControllerUnpublishVolume] GetInstanceByID failed with error %v", err) } err = cs.Cloud.DetachVolume(instanceID, volumeID) @@ -264,7 +260,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return &csi.ControllerUnpublishVolumeResponse{}, nil } klog.Errorf("Failed to DetachVolume: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerUnpublishVolume Detach Volume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "ControllerUnpublishVolume Detach Volume failed with error %v", err) } err = cs.Cloud.WaitDiskDetached(instanceID, volumeID) @@ -274,7 +270,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * klog.V(3).Infof("ControllerUnpublishVolume assuming volume %s is detached, because it was deleted in the meanwhile", volumeID) return &csi.ControllerUnpublishVolumeResponse{}, nil } - return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerUnpublishVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "ControllerUnpublishVolume failed with error %v", err) } klog.V(4).Infof("ControllerUnpublishVolume %s on %s", volumeID, instanceID) @@ -283,10 +279,10 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * } func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { + klog.V(4).Infof("ListVolumes: called with %+#v request", req) if req.MaxEntries < 0 { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf( - "[ListVolumes] Invalid max entries request %v, must not be negative ", req.MaxEntries)) + return nil, status.Errorf(codes.InvalidArgument, "[ListVolumes] Invalid max entries request %v, must not be negative ", req.MaxEntries) } maxEntries := int(req.MaxEntries) @@ -296,10 +292,10 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume if cpoerrors.IsInvalidError(err) { return nil, status.Errorf(codes.Aborted, "[ListVolumes] Invalid request: %v", err) } - return nil, status.Error(codes.Internal, fmt.Sprintf("ListVolumes failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "ListVolumes failed with error %v", err) } - var ventries []*csi.ListVolumesResponse_Entry + ventries := make([]*csi.ListVolumesResponse_Entry, 0, len(vlist)) for _, v := range vlist { ventry := csi.ListVolumesResponse_Entry{ Volume: &csi.Volume{ @@ -309,6 +305,7 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume } status := &csi.ListVolumesResponse_VolumeStatus{} + status.PublishedNodeIds = make([]string, 0, len(v.Attachments)) for _, attachment := range v.Attachments { status.PublishedNodeIds = append(status.PublishedNodeIds, attachment.ServerID) } @@ -316,6 +313,8 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume ventries = append(ventries, &ventry) } + + klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(ventries), nextPageToken) return &csi.ListVolumesResponse{ Entries: ventries, NextToken: nextPageToken, @@ -377,7 +376,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS snap, err = cs.Cloud.CreateSnapshot(name, volumeID, &properties) if err != nil { klog.Errorf("Failed to Create snapshot: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error %v", err) } klog.V(3).Infof("CreateSnapshot %s from volume with ID: %s", name, volumeID) @@ -391,7 +390,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS err = cs.Cloud.WaitSnapshotReady(snap.ID) if err != nil { klog.Errorf("Failed to WaitSnapshotReady: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error %v", err) } return &csi.CreateSnapshotResponse{ @@ -422,7 +421,7 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS return &csi.DeleteSnapshotResponse{}, nil } klog.Errorf("Failed to Delete snapshot: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteSnapshot failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "DeleteSnapshot failed with error %v", err) } return &csi.DeleteSnapshotResponse{}, nil } @@ -437,7 +436,7 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap klog.V(3).Infof("Snapshot %s not found", snapshotID) return &csi.ListSnapshotsResponse{}, nil } - return nil, status.Errorf(codes.Internal, "Failed to GetSnapshot %s : %v", snapshotID, err) + return nil, status.Errorf(codes.Internal, "Failed to GetSnapshot %s: %v", snapshotID, err) } ctime := timestamppb.New(snap.CreatedAt) @@ -481,7 +480,7 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap return nil, status.Errorf(codes.Internal, "ListSnapshots failed with error %v", err) } - var sentries []*csi.ListSnapshotsResponse_Entry + sentries := make([]*csi.ListSnapshotsResponse_Entry, 0, len(slist)) for _, v := range slist { ctime := timestamppb.New(v.CreatedAt) if err := ctime.CheckValid(); err != nil { @@ -531,14 +530,14 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req _, err := cs.Cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("ValidateVolumeCapabiltites Volume %s not found", volumeID)) + return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities Volume %s not found", volumeID) } - return nil, status.Error(codes.Internal, fmt.Sprintf("ValidateVolumeCapabiltites %v", err)) + return nil, status.Errorf(codes.Internal, "ValidateVolumeCapabilities %v", err) } for _, cap := range reqVolCap { if cap.GetAccessMode().GetMode() != cs.Driver.vcap[0].Mode { - return &csi.ValidateVolumeCapabilitiesResponse{Message: "Requested Volume Capabilty not supported"}, nil + return &csi.ValidateVolumeCapabilitiesResponse{Message: "Requested Volume Capability not supported"}, nil } } @@ -574,7 +573,7 @@ func (cs *controllerServer) ControllerGetVolume(ctx context.Context, req *csi.Co if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "Volume %s not found", volumeID) } - return nil, status.Error(codes.Internal, fmt.Sprintf("ControllerGetVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "ControllerGetVolume failed with error %v", err) } ventry := csi.ControllerGetVolumeResponse{ @@ -585,6 +584,7 @@ func (cs *controllerServer) ControllerGetVolume(ctx context.Context, req *csi.Co } status := &csi.ControllerGetVolumeResponse_VolumeStatus{} + status.PublishedNodeIds = make([]string, 0, len(volume.Attachments)) for _, attachment := range volume.Attachments { status.PublishedNodeIds = append(status.PublishedNodeIds, attachment.ServerID) } @@ -618,7 +618,7 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi if cpoerrors.IsNotFound(err) { return nil, status.Error(codes.NotFound, "Volume not found") } - return nil, status.Error(codes.Internal, fmt.Sprintf("GetVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) } if volume.Size >= volSizeGB { @@ -632,7 +632,7 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi err = cs.Cloud.ExpandVolume(volumeID, volume.Status, volSizeGB) if err != nil { - return nil, status.Errorf(codes.Internal, fmt.Sprintf("Could not resize volume %q to size %v: %v", volumeID, volSizeGB, err)) + return nil, status.Errorf(codes.Internal, "Could not resize volume %q to size %v: %v", volumeID, volSizeGB, err) } // we need wait for the volume to be available or InUse, it might be error_extending in some scenario @@ -640,7 +640,7 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi err = cs.Cloud.WaitVolumeTargetStatus(volumeID, targetStatus) if err != nil { klog.Errorf("Failed to WaitVolumeTargetStatus of volume %s: %v", volumeID, err) - return nil, status.Error(codes.Internal, fmt.Sprintf("[ControllerExpandVolume] Volume %s not in target state after resize operation : %v", volumeID, err)) + return nil, status.Errorf(codes.Internal, "[ControllerExpandVolume] Volume %s not in target state after resize operation: %v", volumeID, err) } klog.V(4).Infof("ControllerExpandVolume resized volume %v to size %v", volumeID, volSizeGB) @@ -651,23 +651,6 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi }, nil } -func getAZFromTopology(requirement *csi.TopologyRequirement) string { - for _, topology := range requirement.GetPreferred() { - zone, exists := topology.GetSegments()[topologyKey] - if exists { - return zone - } - } - - for _, topology := range requirement.GetRequisite() { - zone, exists := topology.GetSegments()[topologyKey] - if exists { - return zone - } - } - return "" -} - func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibleTopologyReq *csi.TopologyRequirement) *csi.CreateVolumeResponse { var volsrc *csi.VolumeContentSource diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index 7032d9aa..afa0b02c 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -441,7 +441,9 @@ func TestListVolumes(t *testing.T) { VolumeId: FakeVol3.ID, CapacityBytes: int64(FakeVol3.Size * 1024 * 1024 * 1024), }, - Status: &csi.ListVolumesResponse_VolumeStatus{}, + Status: &csi.ListVolumesResponse_VolumeStatus{ + PublishedNodeIds: []string{}, + }, }, }, NextToken: "", @@ -653,18 +655,18 @@ func TestValidateVolumeCapabilities(t *testing.T) { }, } - expectedRes2 := &csi.ValidateVolumeCapabilitiesResponse{Message: "Requested Volume Capabilty not supported"} + expectedRes2 := &csi.ValidateVolumeCapabilitiesResponse{Message: "Requested Volume Capability not supported"} - // Invoke ValidateVolumeCapabilties + // Invoke ValidateVolumeCapabilities actualRes, err := fakeCs.ValidateVolumeCapabilities(FakeCtx, fakereq) if err != nil { - t.Errorf("failed to ValidateVolumeCapabilties: %v", err) + t.Errorf("failed to ValidateVolumeCapabilities: %v", err) } actualRes2, err := fakeCs.ValidateVolumeCapabilities(FakeCtx, fakereq2) if err != nil { - t.Errorf("failed to ValidateVolumeCapabilties: %v", err) + t.Errorf("failed to ValidateVolumeCapabilities: %v", err) } // assert diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go index 10d37ccf..9b5d0640 100644 --- a/pkg/csi/cinder/driver.go +++ b/pkg/csi/cinder/driver.go @@ -56,7 +56,7 @@ const ( var ( // CSI spec version - specVersion = "1.3.0" + specVersion = "1.8.0" // Driver version // Version history: @@ -123,7 +123,10 @@ func NewDriver(endpoint, cluster string) *Driver { csi.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES, csi.ControllerServiceCapability_RPC_GET_VOLUME, }) - d.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER}) + d.AddVolumeCapabilityAccessModes( + []csi.VolumeCapability_AccessMode_Mode{ + csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }) // ignoring error, because AddNodeServiceCapabilities is public // and so potentially used somewhere else. @@ -138,7 +141,7 @@ func NewDriver(endpoint, cluster string) *Driver { } func (d *Driver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) { - var csc []*csi.ControllerServiceCapability + csc := make([]*csi.ControllerServiceCapability, 0, len(cl)) for _, c := range cl { klog.Infof("Enabling controller service capability: %v", c.String()) @@ -149,22 +152,28 @@ func (d *Driver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapa } func (d *Driver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode { - var vca []*csi.VolumeCapability_AccessMode + vca := make([]*csi.VolumeCapability_AccessMode, 0, len(vc)) + for _, c := range vc { klog.Infof("Enabling volume access mode: %v", c.String()) vca = append(vca, NewVolumeCapabilityAccessMode(c)) } + d.vcap = vca + return vca } func (d *Driver) AddNodeServiceCapabilities(nl []csi.NodeServiceCapability_RPC_Type) error { - var nsc []*csi.NodeServiceCapability + nsc := make([]*csi.NodeServiceCapability, 0, len(nl)) + for _, n := range nl { klog.Infof("Enabling node service capability: %v", n.String()) nsc = append(nsc, NewNodeServiceCapability(n)) } + d.nscap = nsc + return nil } @@ -178,6 +187,7 @@ func (d *Driver) ValidateControllerServiceRequest(c csi.ControllerServiceCapabil return nil } } + return status.Error(codes.InvalidArgument, c.String()) } diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go index 3fc2721c..8b45eda5 100644 --- a/pkg/csi/cinder/nodeserver.go +++ b/pkg/csi/cinder/nodeserver.go @@ -86,6 +86,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true" if ephemeralVolume { + // See https://github.com/kubernetes/cloud-provider-openstack/issues/1493 klog.Warningf("CSI inline ephemeral volumes support is deprecated in 1.24 release.") return nodePublishEphemeral(ctx, req, ns) } @@ -99,7 +100,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis if cpoerrors.IsNotFound(err) { return nil, status.Error(codes.NotFound, "Volume not found") } - return nil, status.Error(codes.Internal, fmt.Sprintf("GetVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) } mountOptions := []string{"bind"} @@ -151,7 +152,7 @@ func nodePublishEphemeral(ctx context.Context, req *csi.NodePublishVolumeRequest volAvailability, err := ns.Metadata.GetAvailabilityZone() if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("retrieving availability zone from MetaData service failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "retrieving availability zone from MetaData service failed with error %v", err) } size = 1 // default size is 1GB @@ -159,7 +160,7 @@ func nodePublishEphemeral(ctx context.Context, req *csi.NodePublishVolumeRequest size, err = strconv.Atoi(strings.TrimSuffix(capacity, "Gi")) if err != nil { klog.V(3).Infof("Unable to parse capacity: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("Unable to parse capacity %v", err)) + return nil, status.Errorf(codes.Internal, "Unable to parse capacity %v", err) } } @@ -173,7 +174,7 @@ func nodePublishEphemeral(ctx context.Context, req *csi.NodePublishVolumeRequest if err != nil { klog.V(3).Infof("Failed to Create Ephemeral Volume: %v", err) - return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to create Ephemeral Volume %v", err)) + return nil, status.Errorf(codes.Internal, "Failed to create Ephemeral Volume %v", err) } // Wait for volume status to be Available, before attaching @@ -181,7 +182,7 @@ func nodePublishEphemeral(ctx context.Context, req *csi.NodePublishVolumeRequest targetStatus := []string{openstack.VolumeAvailableStatus} err := ns.Cloud.WaitVolumeTargetStatus(evol.ID, targetStatus) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, err.Error()) } } @@ -198,7 +199,7 @@ func nodePublishEphemeral(ctx context.Context, req *csi.NodePublishVolumeRequest _, err = ns.Cloud.AttachVolume(nodeID, evol.ID) if err != nil { - msg := "nodePublishEphemeral: attach volume %s failed with error : %v" + msg := "nodePublishEphemeral: attach volume %s failed with error: %v" klog.V(3).Infof(msg, evol.ID, err) return nil, status.Errorf(codes.Internal, msg, evol.ID, err) } @@ -212,7 +213,7 @@ func nodePublishEphemeral(ctx context.Context, req *csi.NodePublishVolumeRequest devicePath, err := getDevicePath(evol.ID, m) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Unable to find Device path for volume: %v", err)) + return nil, status.Errorf(codes.Internal, "Unable to find Device path for volume: %v", err) } targetPath := req.GetTargetPath() @@ -308,7 +309,7 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu if err != nil { if !cpoerrors.IsNotFound(err) { - return nil, status.Error(codes.Internal, fmt.Sprintf("GetVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) } // if not found by id, try to search by name @@ -318,13 +319,13 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu //if volume not found then GetVolumesByName returns empty list if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("GetVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) } if len(vols) > 0 { vol = &vols[0] ephemeralVolume = true } else { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Volume not found %s", volName)) + return nil, status.Errorf(codes.NotFound, "Volume not found %s", volName) } } @@ -348,7 +349,7 @@ func nodeUnpublishEphemeral(req *csi.NodeUnpublishVolumeRequest, ns *nodeServer, if len(vol.Attachments) > 0 { instanceID = vol.Attachments[0].ServerID } else { - return nil, status.Error(codes.FailedPrecondition, "Volume attachement not found in request") + return nil, status.Error(codes.FailedPrecondition, "Volume attachment not found in request") } // [Edgeless] Unmap the crypt device so we can properly remove the device from the node @@ -400,14 +401,14 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol if cpoerrors.IsNotFound(err) { return nil, status.Error(codes.NotFound, "Volume not found") } - return nil, status.Error(codes.Internal, fmt.Sprintf("GetVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) } m := ns.Mount // Do not trust the path provided by cinder, get the real path on node devicePath, err := getDevicePath(volumeID, m) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Unable to find Device path for volume: %v", err)) + return nil, status.Errorf(codes.Internal, "Unable to find Device path for volume: %v", err) } // Verify whether mounted @@ -466,7 +467,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol if needResize { klog.V(4).Infof("NodeStageVolume: Resizing volume %q created from a snapshot/volume", volumeID) if _, err := r.Resize(devicePath, stagingTarget); err != nil { - return nil, status.Errorf(codes.Internal, "Could not resize volume %q: %v", volumeID, err) + return nil, status.Errorf(codes.Internal, "Could not resize volume %q: %v", volumeID, err) } } } @@ -493,7 +494,7 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag klog.V(4).Infof("NodeUnstageVolume: Unable to find volume: %v", err) return nil, status.Error(codes.NotFound, "Volume not found") } - return nil, status.Error(codes.Internal, fmt.Sprintf("GetVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err) } err = ns.Mount.UnmountPath(stagingTargetPath) @@ -513,12 +514,12 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque nodeID, err := ns.Metadata.GetInstanceID() if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("[NodeGetInfo] unable to retrieve instance id of node %v", err)) + return nil, status.Errorf(codes.Internal, "[NodeGetInfo] unable to retrieve instance id of node %v", err) } zone, err := ns.Metadata.GetAvailabilityZone() if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("[NodeGetInfo] Unable to retrieve availability zone of node %v", err)) + return nil, status.Errorf(codes.Internal, "[NodeGetInfo] Unable to retrieve availability zone of node %v", err) } topology := &csi.Topology{Segments: map[string]string{topologyKey: zone}} @@ -554,14 +555,14 @@ func (ns *nodeServer) NodeGetVolumeStats(_ context.Context, req *csi.NodeGetVolu exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, req.VolumePath) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to check whether volumePath exists: %s", err) + return nil, status.Errorf(codes.Internal, "failed to check whether volumePath exists: %v", err) } if !exists { return nil, status.Errorf(codes.NotFound, "target: %s not found", volumePath) } stats, err := ns.Mount.GetDeviceStats(volumePath) if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get stats by path: %s", err) + return nil, status.Errorf(codes.Internal, "failed to get stats by path: %v", err) } if stats.Block { @@ -598,9 +599,9 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV _, err := ns.Cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { - return nil, status.Error(codes.NotFound, fmt.Sprintf("Volume with ID %s not found", volumeID)) + return nil, status.Errorf(codes.NotFound, "Volume with ID %s not found", volumeID) } - return nil, status.Error(codes.Internal, fmt.Sprintf("NodeExpandVolume failed with error %v", err)) + return nil, status.Errorf(codes.Internal, "NodeExpandVolume failed with error %v", err) } // [Edgeless] Resize LUKS partition @@ -620,7 +621,7 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec) if _, err := r.Resize(devicePath, volumePath); err != nil { - return nil, status.Errorf(codes.Internal, "Could not resize volume %q: %v", volumeID, err) + return nil, status.Errorf(codes.Internal, "Could not resize volume %q: %v", volumeID, err) } return &csi.NodeExpandVolumeResponse{}, nil } diff --git a/pkg/csi/cinder/openstack/openstack_snapshots.go b/pkg/csi/cinder/openstack/openstack_snapshots.go index ca4fdc5b..6337e531 100644 --- a/pkg/csi/cinder/openstack/openstack_snapshots.go +++ b/pkg/csi/cinder/openstack/openstack_snapshots.go @@ -119,11 +119,11 @@ func (os *OpenStack) ListSnapshots(filters map[string]string) ([]snapshots.Snaps } if nextPageURL != "" { - queryParams, err := url.ParseQuery(nextPageURL) + pageURL, err := url.Parse(nextPageURL) if err != nil { return false, err } - nextPageToken = queryParams.Get("marker") + nextPageToken = pageURL.Query().Get("marker") } return false, nil @@ -173,7 +173,7 @@ func (os *OpenStack) WaitSnapshotReady(snapshotID string) error { }) if wait.Interrupted(err) { - err = fmt.Errorf("Timeout, Snapshot %s is still not Ready %v", snapshotID, err) + err = fmt.Errorf("timeout, Snapshot %s is still not Ready %v", snapshotID, err) } return err diff --git a/pkg/csi/cinder/openstack/openstack_volumes.go b/pkg/csi/cinder/openstack/openstack_volumes.go index 79372196..9ef45b45 100644 --- a/pkg/csi/cinder/openstack/openstack_volumes.go +++ b/pkg/csi/cinder/openstack/openstack_volumes.go @@ -96,11 +96,11 @@ func (os *OpenStack) ListVolumes(limit int, startingToken string) ([]volumes.Vol } if nextPageURL != "" { - queryParams, err := url.ParseQuery(nextPageURL) + pageURL, err := url.Parse(nextPageURL) if err != nil { return false, err } - nextPageToken = queryParams.Get("marker") + nextPageToken = pageURL.Query().Get("marker") } return false, nil @@ -206,7 +206,7 @@ func (os *OpenStack) AttachVolume(instanceID, volumeID string) (string, error) { return volume.ID, nil } -// WaitDiskAttached waits for attched +// WaitDiskAttached waits for attached func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error { backoff := wait.Backoff{ Duration: diskAttachInitDelay, @@ -348,7 +348,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, status string, newSize int) e switch status { case VolumeInUseStatus: - // If the user has disabled the use of microversion to be compatibale with + // If the user has disabled the use of microversion to be compatible with // older clouds, we should fail early if os.bsOpts.IgnoreVolumeMicroversion { return fmt.Errorf("volume online resize is not available with ignore-volume-microversion, requires microversion 3.42 or newer") diff --git a/pkg/csi/cinder/server.go b/pkg/csi/cinder/server.go index 244b96b6..55933f14 100644 --- a/pkg/csi/cinder/server.go +++ b/pkg/csi/cinder/server.go @@ -69,6 +69,7 @@ func (s *nonBlockingGRPCServer) ForceStop() { } func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { + defer s.wg.Done() proto, addr, err := ParseEndpoint(endpoint) if err != nil { @@ -78,7 +79,7 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c if proto == "unix" { addr = "/" + addr if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - klog.Fatalf("Failed to remove %s, error: %s", addr, err.Error()) + klog.Fatalf("Failed to remove %s, error: %v", addr, err) } } diff --git a/pkg/csi/cinder/server_test.go b/pkg/csi/cinder/server_test.go new file mode 100644 index 00000000..c14b7117 --- /dev/null +++ b/pkg/csi/cinder/server_test.go @@ -0,0 +1,55 @@ +package cinder + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/goleak" +) + +func TestServerReleaseWaitAfterStopCalling(t *testing.T) { + defer goleak.VerifyNone(t) + + var ( + server = NewNonBlockingGRPCServer() + ch = make(chan struct{}) + ) + server.Start(FakeEndpoint, nil, nil, nil) + + go func() { + server.Wait() + }() + + _, address, err := ParseEndpoint(FakeEndpoint) + require.NoError(t, err) + + // this loop is needed to wait for the server start up + timer := time.NewTimer(2 * time.Second) + defer timer.Stop() + for { + select { + case <-timer.C: + require.Fail(t, "server does not started") + default: + } + + conn, err := net.DialTimeout("tcp", address, 200*time.Millisecond) + if err != nil { + continue + } + if conn == nil { + continue + } + _ = conn.Close() + break + } + + go func() { + server.Stop() + close(ch) + }() + + <-ch +} diff --git a/pkg/csi/manila/capabilities/manilacapabilities.go b/pkg/csi/manila/capabilities/manilacapabilities.go deleted file mode 100644 index a665e624..00000000 --- a/pkg/csi/manila/capabilities/manilacapabilities.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capabilities - -import ( - "fmt" - "strconv" - - "k8s.io/cloud-provider-openstack/pkg/csi/manila/manilaclient" -) - -type ( - ManilaCapability int - ManilaCapabilities map[ManilaCapability]bool -) - -const ( - ManilaCapabilityNone ManilaCapability = iota - ManilaCapabilitySnapshot - ManilaCapabilityShareFromSnapshot - - extraSpecSnapshotSupport = "snapshot_support" - extraSpecCreateShareFromSnapshotSupport = "create_share_from_snapshot_support" -) - -func GetManilaCapabilities(shareType string, manilaClient manilaclient.Interface) (ManilaCapabilities, error) { - shareTypes, err := manilaClient.GetShareTypes() - if err != nil { - return nil, err - } - - for _, t := range shareTypes { - if t.Name == shareType || t.ID == shareType { - return readManilaCaps(t.ExtraSpecs), nil - } - } - - return nil, fmt.Errorf("unknown share type %s", shareType) -} - -func readManilaCaps(extraSpecs map[string]interface{}) ManilaCapabilities { - strToBool := func(ss interface{}) bool { - var b bool - if ss != nil { - if str, ok := ss.(string); ok { - b, _ = strconv.ParseBool(str) - } - } - return b - } - - return ManilaCapabilities{ - ManilaCapabilitySnapshot: strToBool(extraSpecs[extraSpecSnapshotSupport]), - ManilaCapabilityShareFromSnapshot: strToBool(extraSpecs[extraSpecCreateShareFromSnapshotSupport]), - } -} diff --git a/pkg/csi/manila/compatibility/compatibility.go b/pkg/csi/manila/compatibility/compatibility.go deleted file mode 100644 index f588ae46..00000000 --- a/pkg/csi/manila/compatibility/compatibility.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compatibility - -import ( - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/capabilities" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/csiclient" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/manilaclient" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/options" -) - -type Layer interface { - SupplementCapability(compatOpts *options.CompatibilityOptions, dstShare *shares.Share, dstShareAccessRight *shares.AccessRight, req *csi.CreateVolumeRequest, fwdEndpoint string, manilaClient manilaclient.Interface, csiClientBuilder csiclient.Builder) error -} - -// Certain share protocols may not support certain Manila capabilities -// in a given share type. This map forms a compatibility layer which -// fills in the feature gap with in-driver functionality. -var compatCaps = map[string]map[capabilities.ManilaCapability]Layer{} - -func FindCompatibilityLayer(shareProto string, wantsCap capabilities.ManilaCapability, shareTypeCaps capabilities.ManilaCapabilities) Layer { - if layers, ok := compatCaps[shareProto]; ok { - if hasCapability := shareTypeCaps[wantsCap]; !hasCapability { - if compatCapability, ok := layers[wantsCap]; ok { - return compatCapability - } - } - } - - return nil -} diff --git a/pkg/csi/manila/controllerserver.go b/pkg/csi/manila/controllerserver.go index 05f73202..328db62a 100644 --- a/pkg/csi/manila/controllerserver.go +++ b/pkg/csi/manila/controllerserver.go @@ -27,9 +27,9 @@ import ( "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/capabilities" "k8s.io/cloud-provider-openstack/pkg/csi/manila/options" "k8s.io/cloud-provider-openstack/pkg/csi/manila/shareadapters" + "k8s.io/cloud-provider-openstack/pkg/util" clouderrors "k8s.io/cloud-provider-openstack/pkg/util/errors" "k8s.io/klog/v2" ) @@ -54,7 +54,7 @@ var ( } ) -func getVolumeCreator(source *csi.VolumeContentSource, shareOpts *options.ControllerVolumeContext, compatOpts *options.CompatibilityOptions) (volumeCreator, error) { +func getVolumeCreator(source *csi.VolumeContentSource) (volumeCreator, error) { if source == nil { return &blankVolume{}, nil } @@ -122,11 +122,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } - shareTypeCaps, err := capabilities.GetManilaCapabilities(shareOpts.Type, manilaClient) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get Manila capabilities for share type %s: %v", shareOpts.Type, err) - } - requestedSize := req.GetCapacityRange().GetRequiredBytes() if requestedSize == 0 { // At least 1GiB @@ -135,19 +130,36 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol sizeInGiB := bytesToGiB(requestedSize) + var accessibleTopology []*csi.Topology + accessibleTopologyReq := req.GetAccessibilityRequirements() + if cs.d.withTopology && accessibleTopologyReq != nil { + // All requisite/preferred topologies are considered valid. Nodes from those zones are required to be able to reach the storage. + // The operator is responsible for making sure that provided topology keys are valid and present on the nodes of the cluster. + accessibleTopology = accessibleTopologyReq.GetPreferred() + + // When "autoTopology" is enabled and "availability" is empty, obtain the AZ from the target node. + if shareOpts.AvailabilityZone == "" && strings.EqualFold(shareOpts.AutoTopology, "true") { + shareOpts.AvailabilityZone = util.GetAZFromTopology(topologyKey, accessibleTopologyReq) + accessibleTopology = []*csi.Topology{{ + Segments: map[string]string{topologyKey: shareOpts.AvailabilityZone}, + }} + } + } + // Retrieve an existing share or create a new one - volCreator, err := getVolumeCreator(req.GetVolumeContentSource(), shareOpts, cs.d.compatOpts) + volCreator, err := getVolumeCreator(req.GetVolumeContentSource()) if err != nil { return nil, err } - share, err := volCreator.create(req, req.GetName(), sizeInGiB, manilaClient, shareOpts, shareMetadata) + share, err := volCreator.create(manilaClient, req, req.GetName(), sizeInGiB, shareOpts, shareMetadata) if err != nil { return nil, err } - if err = verifyVolumeCompatibility(sizeInGiB, req, share, shareOpts, cs.d.compatOpts, shareTypeCaps); err != nil { + err = verifyVolumeCompatibility(sizeInGiB, req, share, shareOpts) + if err != nil { return nil, status.Errorf(codes.AlreadyExists, "volume %s already exists, but is incompatible with the request: %v", req.GetName(), err) } @@ -164,13 +176,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, status.Errorf(codes.Internal, "failed to grant access to volume %s: %v", share.Name, err) } - var accessibleTopology []*csi.Topology - if cs.d.withTopology { - // All requisite/preferred topologies are considered valid. Nodes from those zones are required to be able to reach the storage. - // The operator is responsible for making sure that provided topology keys are valid and present on the nodes of the cluster. - accessibleTopology = req.GetAccessibilityRequirements().GetPreferred() - } - volCtx := filterParametersForVolumeContext(params, options.NodeVolumeContextFields()) volCtx["shareID"] = share.ID volCtx["shareAccessID"] = accessRight.ID @@ -201,7 +206,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } - if err := deleteShare(req.GetVolumeId(), manilaClient); err != nil { + if err := deleteShare(manilaClient, req.GetVolumeId()); err != nil { return nil, status.Errorf(codes.Internal, "failed to delete volume %s: %v", req.GetVolumeId(), err) } @@ -260,7 +265,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS // Retrieve an existing snapshot or create a new one - snapshot, err := getOrCreateSnapshot(req.GetName(), sourceShare.ID, manilaClient) + snapshot, err := getOrCreateSnapshot(manilaClient, req.GetName(), sourceShare.ID) if err != nil { if wait.Interrupted(err) { return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for snapshot %s of volume %s to become available", snapshot.ID, req.GetSourceVolumeId()) @@ -288,11 +293,11 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS readyToUse = true case snapshotError: // An error occurred, try to roll-back the snapshot - tryDeleteSnapshot(snapshot, manilaClient) + tryDeleteSnapshot(manilaClient, snapshot) - manilaErrMsg, err := lastResourceError(snapshot.ID, manilaClient) + manilaErrMsg, err := lastResourceError(manilaClient, snapshot.ID) if err != nil { - return nil, status.Errorf(codes.Internal, "snapshot %s of volume %s is in error state, error description could not be retrieved: %v", snapshot.ID, req.GetSourceVolumeId(), err.Error()) + return nil, status.Errorf(codes.Internal, "snapshot %s of volume %s is in error state, error description could not be retrieved: %v", snapshot.ID, req.GetSourceVolumeId(), err) } return nil, status.Errorf(manilaErrMsg.errCode.toRPCErrorCode(), "snapshot %s of volume %s is in error state: %s", snapshot.ID, req.GetSourceVolumeId(), manilaErrMsg.message) @@ -333,7 +338,7 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } - if err := deleteSnapshot(req.GetSnapshotId(), manilaClient); err != nil { + if err := deleteSnapshot(manilaClient, req.GetSnapshotId()); err != nil { return nil, status.Errorf(codes.Internal, "failed to delete snapshot %s: %v", req.GetSnapshotId(), err) } @@ -471,7 +476,7 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi }, nil } - share, err = extendShare(share.ID, desiredSizeInGiB, manilaClient) + share, err = extendShare(manilaClient, share.ID, desiredSizeInGiB) if err != nil { return nil, err } diff --git a/pkg/csi/manila/driver.go b/pkg/csi/manila/driver.go index 8ec6d56d..5783a23b 100644 --- a/pkg/csi/manila/driver.go +++ b/pkg/csi/manila/driver.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc" "k8s.io/cloud-provider-openstack/pkg/csi/manila/csiclient" "k8s.io/cloud-provider-openstack/pkg/csi/manila/manilaclient" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/options" "k8s.io/cloud-provider-openstack/pkg/version" "k8s.io/klog/v2" ) @@ -49,8 +48,6 @@ type DriverOpts struct { ManilaClientBuilder manilaclient.Builder CSIClientBuilder csiclient.Builder - - CompatOpts *options.CompatibilityOptions } type Driver struct { @@ -65,8 +62,6 @@ type Driver struct { serverEndpoint string fwdEndpoint string - compatOpts *options.CompatibilityOptions - ids *identityServer cs *controllerServer ns *nodeServer @@ -85,7 +80,7 @@ type nonBlockingGRPCServer struct { } const ( - specVersion = "1.2.0" + specVersion = "1.8.0" driverVersion = "0.9.0" topologyKey = "topology.manila.csi.openstack.org/zone" ) @@ -103,7 +98,14 @@ func argNotEmpty(val, name string) error { } func NewDriver(o *DriverOpts) (*Driver, error) { - for k, v := range map[string]string{"node ID": o.NodeID, "driver name": o.DriverName, "driver endpoint": o.ServerCSIEndpoint, "FWD endpoint": o.FwdCSIEndpoint, "share protocol selector": o.ShareProto} { + m := map[string]string{ + "node ID": o.NodeID, + "driver name": o.DriverName, + "driver endpoint": o.ServerCSIEndpoint, + "FWD endpoint": o.FwdCSIEndpoint, + "share protocol selector": o.ShareProto, + } + for k, v := range m { if err := argNotEmpty(v, k); err != nil { return nil, err } @@ -118,7 +120,6 @@ func NewDriver(o *DriverOpts) (*Driver, error) { serverEndpoint: o.ServerCSIEndpoint, fwdEndpoint: o.FwdCSIEndpoint, shareProto: strings.ToUpper(o.ShareProto), - compatOpts: o.CompatOpts, manilaClientBuilder: o.ManilaClientBuilder, csiClientBuilder: o.CSIClientBuilder, clusterID: o.ClusterID, @@ -170,7 +171,7 @@ func NewDriver(o *DriverOpts) (*Driver, error) { if err != nil { return nil, fmt.Errorf("failed to initialize proxied CSI driver: %v", err) } - var nscaps []csi.NodeServiceCapability_RPC_Type + nscaps := make([]csi.NodeServiceCapability_RPC_Type, 0, len(nodeCapsMap)) for c := range nodeCapsMap { nscaps = append(nscaps, c) @@ -195,7 +196,7 @@ func (d *Driver) Run() { } func (d *Driver) addControllerServiceCapabilities(cs []csi.ControllerServiceCapability_RPC_Type) { - var caps []*csi.ControllerServiceCapability + caps := make([]*csi.ControllerServiceCapability, 0, len(cs)) for _, c := range cs { klog.Infof("Enabling controller service capability: %v", c.String()) @@ -214,7 +215,7 @@ func (d *Driver) addControllerServiceCapabilities(cs []csi.ControllerServiceCapa } func (d *Driver) addVolumeCapabilityAccessModes(vs []csi.VolumeCapability_AccessMode_Mode) { - var caps []*csi.VolumeCapability_AccessMode + caps := make([]*csi.VolumeCapability_AccessMode, 0, len(vs)) for _, c := range vs { klog.Infof("Enabling volume access mode: %v", c.String()) @@ -225,7 +226,7 @@ func (d *Driver) addVolumeCapabilityAccessModes(vs []csi.VolumeCapability_Access } func (d *Driver) addNodeServiceCapabilities(ns []csi.NodeServiceCapability_RPC_Type) { - var caps []*csi.NodeServiceCapability + caps := make([]*csi.NodeServiceCapability, 0, len(ns)) for _, c := range ns { klog.Infof("Enabling node service capability: %v", c.String()) @@ -284,6 +285,8 @@ func (s *nonBlockingGRPCServer) wait() { } func (s *nonBlockingGRPCServer) serve(endpoint string, ids *identityServer, cs *controllerServer, ns *nodeServer) { + defer s.wg.Done() + proto, addr, err := parseGRPCEndpoint(endpoint) if err != nil { klog.Fatalf("couldn't parse GRPC server endpoint address %s: %v", endpoint, err) diff --git a/pkg/csi/manila/identityserver.go b/pkg/csi/manila/identityserver.go index df2b00a8..dae37ca9 100644 --- a/pkg/csi/manila/identityserver.go +++ b/pkg/csi/manila/identityserver.go @@ -18,6 +18,7 @@ package manila import ( "context" + "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" diff --git a/pkg/csi/manila/manilaclient/builder.go b/pkg/csi/manila/manilaclient/builder.go index ef4f16df..2d3bd888 100644 --- a/pkg/csi/manila/manilaclient/builder.go +++ b/pkg/csi/manila/manilaclient/builder.go @@ -28,6 +28,14 @@ import ( "k8s.io/cloud-provider-openstack/pkg/client" ) +const ( + minimumManilaVersion = "2.37" +) + +var ( + manilaMicroversionRegexp = regexp.MustCompile(`^(\d+)\.(\d+)$`) +) + type ClientBuilder struct { UserAgent string ExtraUserAgentData []string @@ -37,13 +45,30 @@ func (cb *ClientBuilder) New(o *client.AuthOpts) (Interface, error) { return New(o, cb.UserAgent, cb.ExtraUserAgentData) } -const ( - minimumManilaVersion = "2.37" -) +func New(o *client.AuthOpts, userAgent string, extraUserAgentData []string) (*Client, error) { + // Authenticate and create Manila v2 client + provider, err := client.NewOpenStackClient(o, userAgent, extraUserAgentData...) + if err != nil { + return nil, fmt.Errorf("failed to authenticate: %v", err) + } -var ( - manilaMicroversionRegexp = regexp.MustCompile(`^(\d+)\.(\d+)$`) -) + client, err := openstack.NewSharedFileSystemV2(provider, gophercloud.EndpointOpts{ + Region: o.Region, + Availability: o.EndpointType, + }) + if err != nil { + return nil, fmt.Errorf("failed to create Manila v2 client: %v", err) + } + + // Check client's and server's versions for compatibility + + client.Microversion = minimumManilaVersion + if err = validateManilaClient(client); err != nil { + return nil, fmt.Errorf("Manila v2 client validation failed: %v", err) + } + + return &Client{c: client}, nil +} func splitManilaMicroversion(microversion string) (major, minor int) { if err := validateManilaMicroversion(microversion); err != nil { @@ -96,32 +121,3 @@ func validateManilaClient(c *gophercloud.ServiceClient) error { return nil } - -func New(o *client.AuthOpts, userAgent string, extraUserAgentData []string) (*Client, error) { - // Authenticate and create Manila v2 client - provider, err := client.NewOpenStackClient(o, userAgent, extraUserAgentData...) - if err != nil { - return nil, fmt.Errorf("failed to authenticate: %v", err) - } - - client, err := openstack.NewSharedFileSystemV2(provider, gophercloud.EndpointOpts{ - Region: o.Region, - Availability: o.EndpointType, - }) - if err != nil { - return nil, fmt.Errorf("failed to create Manila v2 client: %v", err) - } - - // Check client's and server's versions for compatibility - - client.Microversion = minimumManilaVersion - if err = validateManilaClient(client); err != nil { - return nil, fmt.Errorf("Manila v2 client validation failed: %v", err) - } - - return &Client{c: client}, nil -} - -func NewFromServiceClient(c *gophercloud.ServiceClient) *Client { - return &Client{c: c} -} diff --git a/pkg/csi/manila/nodeserver.go b/pkg/csi/manila/nodeserver.go index 10e417a9..1d8dfda2 100644 --- a/pkg/csi/manila/nodeserver.go +++ b/pkg/csi/manila/nodeserver.go @@ -128,8 +128,11 @@ func (ns *nodeServer) buildVolumeContext(volID volumeID, shareOpts *options.Node // Build volume context for fwd plugin sa := getShareAdapter(ns.d.shareProto) - - volumeContext, err = sa.BuildVolumeContext(&shareadapters.VolumeContextArgs{Locations: availableExportLocations, Options: shareOpts}) + opts := &shareadapters.VolumeContextArgs{ + Locations: availableExportLocations, + Options: shareOpts, + } + volumeContext, err = sa.BuildVolumeContext(opts) if err != nil { return nil, nil, status.Errorf(codes.InvalidArgument, "failed to build volume context for volume %s: %v", volID, err) } @@ -138,7 +141,10 @@ func (ns *nodeServer) buildVolumeContext(volID volumeID, shareOpts *options.Node } func buildNodePublishSecret(accessRight *shares.AccessRight, sa shareadapters.ShareAdapter, volID volumeID) (map[string]string, error) { - secret, err := sa.BuildNodePublishSecret(&shareadapters.SecretArgs{AccessRight: accessRight}) + opts := &shareadapters.SecretArgs{ + AccessRight: accessRight, + } + secret, err := sa.BuildNodePublishSecret(opts) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "failed to build publish secret for volume %s: %v", volID, err) } @@ -147,7 +153,10 @@ func buildNodePublishSecret(accessRight *shares.AccessRight, sa shareadapters.Sh } func buildNodeStageSecret(accessRight *shares.AccessRight, sa shareadapters.ShareAdapter, volID volumeID) (map[string]string, error) { - secret, err := sa.BuildNodeStageSecret(&shareadapters.SecretArgs{AccessRight: accessRight}) + opts := &shareadapters.SecretArgs{ + AccessRight: accessRight, + } + secret, err := sa.BuildNodeStageSecret(opts) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "failed to build stage secret for volume %s: %v", volID, err) } @@ -201,7 +210,6 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis secret, err = buildNodePublishSecret(accessRight, getShareAdapter(ns.d.shareProto), volID) } } - if err != nil { return nil, err } @@ -279,7 +287,6 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol } } ns.nodeStageCacheMtx.Unlock() - if err != nil { return nil, err } diff --git a/pkg/csi/manila/options/compatibilityoptions.go b/pkg/csi/manila/options/compatibilityoptions.go deleted file mode 100644 index 0ed4d6cf..00000000 --- a/pkg/csi/manila/options/compatibilityoptions.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import "k8s.io/cloud-provider-openstack/pkg/csi/manila/validator" - -type CompatibilityOptions struct { - CreateShareFromSnapshotEnabled string `name:"CreateShareFromSnapshotEnabled" value:"default:false" matches:"^true|false$"` - CreateShareFromSnapshotRetries string `name:"CreateShareFromSnapshotRetries" value:"default:10" matches:"^[0-9]+$"` - CreateShareFromSnapshotBackoffInterval string `name:"CreateShareFromSnapshotBackoffInterval" value:"default:5" matches:"^[0-9]+$"` -} - -var ( - compatOptionsValidator = validator.New(&CompatibilityOptions{}) -) - -func NewCompatibilityOptions(data map[string]string) (*CompatibilityOptions, error) { - opts := &CompatibilityOptions{} - return opts, compatOptionsValidator.Populate(data, opts) -} diff --git a/pkg/csi/manila/options/shareoptions.go b/pkg/csi/manila/options/shareoptions.go index a3b539a7..f3dd6c0d 100644 --- a/pkg/csi/manila/options/shareoptions.go +++ b/pkg/csi/manila/options/shareoptions.go @@ -24,6 +24,7 @@ type ControllerVolumeContext struct { Protocol string `name:"protocol" matches:"^(?i)CEPHFS|NFS$"` Type string `name:"type" value:"default:default"` ShareNetworkID string `name:"shareNetworkID" value:"optional"` + AutoTopology string `name:"autoTopology" value:"default:false" matches:"(?i)^true|false$"` AvailabilityZone string `name:"availability" value:"optional"` AppendShareMetadata string `name:"appendShareMetadata" value:"optional"` diff --git a/pkg/csi/manila/share.go b/pkg/csi/manila/share.go index 11b013c5..33badf09 100644 --- a/pkg/csi/manila/share.go +++ b/pkg/csi/manila/share.go @@ -60,7 +60,7 @@ func isShareInErrorState(s string) bool { // getOrCreateShare first retrieves an existing share with name=shareName, or creates a new one if it doesn't exist yet. // Once the share is created, an exponential back-off is used to wait till the status of the share is "available". -func getOrCreateShare(shareName string, createOpts *shares.CreateOpts, manilaClient manilaclient.Interface) (*shares.Share, manilaError, error) { +func getOrCreateShare(manilaClient manilaclient.Interface, shareName string, createOpts *shares.CreateOpts) (*shares.Share, manilaError, error) { var ( share *shares.Share err error @@ -90,10 +90,10 @@ func getOrCreateShare(shareName string, createOpts *shares.CreateOpts, manilaCli return share, 0, nil } - return waitForShareStatus(share.ID, []string{shareCreating, shareCreatingFromSnapshot}, shareAvailable, false, manilaClient) + return waitForShareStatus(manilaClient, share.ID, []string{shareCreating, shareCreatingFromSnapshot}, shareAvailable, false) } -func deleteShare(shareID string, manilaClient manilaclient.Interface) error { +func deleteShare(manilaClient manilaclient.Interface, shareID string) error { if err := manilaClient.DeleteShare(shareID); err != nil { if clouderrors.IsNotFound(err) { klog.V(4).Infof("volume with share ID %s not found, assuming it to be already deleted", shareID) @@ -105,7 +105,7 @@ func deleteShare(shareID string, manilaClient manilaclient.Interface) error { return nil } -func tryDeleteShare(share *shares.Share, manilaClient manilaclient.Interface) { +func tryDeleteShare(manilaClient manilaclient.Interface, share *shares.Share) { if share == nil { return } @@ -116,13 +116,13 @@ func tryDeleteShare(share *shares.Share, manilaClient manilaclient.Interface) { return } - _, _, err := waitForShareStatus(share.ID, []string{shareDeleting}, "", true, manilaClient) + _, _, err := waitForShareStatus(manilaClient, share.ID, []string{shareDeleting}, "", true) if err != nil && !wait.Interrupted(err) { klog.Errorf("couldn't retrieve volume %s in a roll-back procedure: %v", share.Name, err) } } -func extendShare(shareID string, newSizeInGiB int, manilaClient manilaclient.Interface) (*shares.Share, error) { +func extendShare(manilaClient manilaclient.Interface, shareID string, newSizeInGiB int) (*shares.Share, error) { opts := shares.ExtendOpts{ NewSize: newSizeInGiB, } @@ -131,7 +131,7 @@ func extendShare(shareID string, newSizeInGiB int, manilaClient manilaclient.Int return nil, err } - share, manilaErrCode, err := waitForShareStatus(shareID, []string{shareExtending}, shareAvailable, false, manilaClient) + share, manilaErrCode, err := waitForShareStatus(manilaClient, shareID, []string{shareExtending}, shareAvailable, false) if err != nil { if wait.Interrupted(err) { return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for volume ID %s to become available", share.Name) @@ -143,7 +143,7 @@ func extendShare(shareID string, newSizeInGiB int, manilaClient manilaclient.Int return share, nil } -func waitForShareStatus(shareID string, validTransientStates []string, desiredStatus string, successOnNotFound bool, manilaClient manilaclient.Interface) (*shares.Share, manilaError, error) { +func waitForShareStatus(manilaClient manilaclient.Interface, shareID string, validTransientStates []string, desiredStatus string, successOnNotFound bool) (*shares.Share, manilaError, error) { var ( backoff = wait.Backoff{ Duration: time.Second * waitForAvailableShareTimeout, @@ -185,7 +185,7 @@ func waitForShareStatus(shareID string, validTransientStates []string, desiredSt } if isShareInErrorState(share.Status) { - manilaErrMsg, err := lastResourceError(shareID, manilaClient) + manilaErrMsg, err := lastResourceError(manilaClient, shareID) if err != nil { return false, fmt.Errorf("share %s is in error state, error description could not be retrieved: %v", shareID, err) } diff --git a/pkg/csi/manila/snapshot.go b/pkg/csi/manila/snapshot.go index afd58c14..5fe20454 100644 --- a/pkg/csi/manila/snapshot.go +++ b/pkg/csi/manila/snapshot.go @@ -38,7 +38,7 @@ const ( // getOrCreateSnapshot retrieves an existing snapshot with name=snapName, or creates a new one if it doesn't exist yet. // Instead of waiting for the snapshot to become available (as getOrCreateShare does), CSI's ready_to_use flag is used to signal readiness -func getOrCreateSnapshot(snapName, sourceShareID string, manilaClient manilaclient.Interface) (*snapshots.Snapshot, error) { +func getOrCreateSnapshot(manilaClient manilaclient.Interface, snapName, sourceShareID string) (*snapshots.Snapshot, error) { var ( snapshot *snapshots.Snapshot err error @@ -72,7 +72,7 @@ func getOrCreateSnapshot(snapName, sourceShareID string, manilaClient manilaclie return snapshot, nil } -func deleteSnapshot(snapID string, manilaClient manilaclient.Interface) error { +func deleteSnapshot(manilaClient manilaclient.Interface, snapID string) error { if err := manilaClient.DeleteSnapshot(snapID); err != nil { if clouderrors.IsNotFound(err) { klog.V(4).Infof("snapshot %s not found, assuming it to be already deleted", snapID) @@ -84,24 +84,24 @@ func deleteSnapshot(snapID string, manilaClient manilaclient.Interface) error { return nil } -func tryDeleteSnapshot(snapshot *snapshots.Snapshot, manilaClient manilaclient.Interface) { +func tryDeleteSnapshot(manilaClient manilaclient.Interface, snapshot *snapshots.Snapshot) { if snapshot == nil { return } - if err := deleteSnapshot(snapshot.ID, manilaClient); err != nil { + if err := deleteSnapshot(manilaClient, snapshot.ID); err != nil { // TODO failure to delete a snapshot in an error state needs proper monitoring support klog.Errorf("couldn't delete snapshot %s in a roll-back procedure: %v", snapshot.ID, err) return } - _, _, err := waitForSnapshotStatus(snapshot.ID, snapshotDeleting, "", true, manilaClient) + _, _, err := waitForSnapshotStatus(manilaClient, snapshot.ID, snapshotDeleting, "", true) if err != nil && !wait.Interrupted(err) { klog.Errorf("couldn't retrieve snapshot %s in a roll-back procedure: %v", snapshot.ID, err) } } -func waitForSnapshotStatus(snapshotID, currentStatus, desiredStatus string, successOnNotFound bool, manilaClient manilaclient.Interface) (*snapshots.Snapshot, manilaError, error) { +func waitForSnapshotStatus(manilaClient manilaclient.Interface, snapshotID, currentStatus, desiredStatus string, successOnNotFound bool) (*snapshots.Snapshot, manilaError, error) { var ( backoff = wait.Backoff{ Duration: time.Second * waitForAvailableShareTimeout, @@ -133,7 +133,7 @@ func waitForSnapshotStatus(snapshotID, currentStatus, desiredStatus string, succ case desiredStatus: isAvailable = true case shareError: - manilaErrMsg, err := lastResourceError(snapshotID, manilaClient) + manilaErrMsg, err := lastResourceError(manilaClient, snapshotID) if err != nil { return false, fmt.Errorf("snapshot %s is in error state, error description could not be retrieved: %v", snapshotID, err) } diff --git a/pkg/csi/manila/util.go b/pkg/csi/manila/util.go index 281bf043..090a6c80 100644 --- a/pkg/csi/manila/util.go +++ b/pkg/csi/manila/util.go @@ -26,7 +26,6 @@ import ( "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares" "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots" "google.golang.org/grpc/codes" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/capabilities" "k8s.io/cloud-provider-openstack/pkg/csi/manila/manilaclient" "k8s.io/cloud-provider-openstack/pkg/csi/manila/options" "k8s.io/klog/v2" @@ -68,6 +67,7 @@ const ( ) var ( + // TODO: add 001 and add a refernce to these codes manilaErrorCodesMap = map[string]manilaError{ "002": manilaErrNoValidHost, "003": manilaErrUnexpectedNetwork, @@ -129,7 +129,7 @@ func bytesToGiB(sizeInBytes int64) int { return sizeInGiB } -func lastResourceError(resourceID string, manilaClient manilaclient.Interface) (manilaErrorMessage, error) { +func lastResourceError(manilaClient manilaclient.Interface, resourceID string) (manilaErrorMessage, error) { msgs, err := manilaClient.GetUserMessages(&messages.ListOpts{ ResourceID: resourceID, MessageLevel: "ERROR", @@ -232,7 +232,7 @@ func coalesceValue(v string) string { return v } -func verifyVolumeCompatibility(sizeInGiB int, req *csi.CreateVolumeRequest, share *shares.Share, shareOpts *options.ControllerVolumeContext, compatOpts *options.CompatibilityOptions, shareTypeCaps capabilities.ManilaCapabilities) error { +func verifyVolumeCompatibility(sizeInGiB int, req *csi.CreateVolumeRequest, share *shares.Share, shareOpts *options.ControllerVolumeContext) error { if share.Size != sizeInGiB { return fmt.Errorf("size mismatch: wanted %d, got %d", share.Size, sizeInGiB) } diff --git a/pkg/csi/manila/volumesource.go b/pkg/csi/manila/volumesource.go index c2e7db0c..634040be 100644 --- a/pkg/csi/manila/volumesource.go +++ b/pkg/csi/manila/volumesource.go @@ -28,12 +28,12 @@ import ( ) type volumeCreator interface { - create(req *csi.CreateVolumeRequest, shareName string, sizeInGiB int, manilaClient manilaclient.Interface, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) + create(manilaClient manilaclient.Interface, req *csi.CreateVolumeRequest, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) } type blankVolume struct{} -func (blankVolume) create(req *csi.CreateVolumeRequest, shareName string, sizeInGiB int, manilaClient manilaclient.Interface, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) { +func (blankVolume) create(manilaClient manilaclient.Interface, req *csi.CreateVolumeRequest, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) { createOpts := &shares.CreateOpts{ AvailabilityZone: shareOpts.AvailabilityZone, ShareProto: shareOpts.Protocol, @@ -45,7 +45,7 @@ func (blankVolume) create(req *csi.CreateVolumeRequest, shareName string, sizeIn Metadata: shareMetadata, } - share, manilaErrCode, err := getOrCreateShare(shareName, createOpts, manilaClient) + share, manilaErrCode, err := getOrCreateShare(manilaClient, shareName, createOpts) if err != nil { if wait.Interrupted(err) { return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for volume %s to become available", shareName) @@ -53,7 +53,7 @@ func (blankVolume) create(req *csi.CreateVolumeRequest, shareName string, sizeIn if manilaErrCode != 0 { // An error has occurred, try to roll-back the share - tryDeleteShare(share, manilaClient) + tryDeleteShare(manilaClient, share) } return nil, status.Errorf(manilaErrCode.toRPCErrorCode(), "failed to create volume %s: %v", shareName, err) @@ -64,7 +64,7 @@ func (blankVolume) create(req *csi.CreateVolumeRequest, shareName string, sizeIn type volumeFromSnapshot struct{} -func (volumeFromSnapshot) create(req *csi.CreateVolumeRequest, shareName string, sizeInGiB int, manilaClient manilaclient.Interface, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) { +func (volumeFromSnapshot) create(manilaClient manilaclient.Interface, req *csi.CreateVolumeRequest, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) { snapshotSource := req.GetVolumeContentSource().GetSnapshot() if snapshotSource.GetSnapshotId() == "" { @@ -100,7 +100,7 @@ func (volumeFromSnapshot) create(req *csi.CreateVolumeRequest, shareName string, Metadata: shareMetadata, } - share, manilaErrCode, err := getOrCreateShare(shareName, createOpts, manilaClient) + share, manilaErrCode, err := getOrCreateShare(manilaClient, shareName, createOpts) if err != nil { if wait.Interrupted(err) { return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for volume %s to become available", share.Name) @@ -108,7 +108,7 @@ func (volumeFromSnapshot) create(req *csi.CreateVolumeRequest, shareName string, if manilaErrCode != 0 { // An error has occurred, try to roll-back the share - tryDeleteShare(share, manilaClient) + tryDeleteShare(manilaClient, share) } return nil, status.Errorf(manilaErrCode.toRPCErrorCode(), "failed to restore snapshot %s into volume %s: %v", snapshotSource.GetSnapshotId(), shareName, err) diff --git a/pkg/identity/keystone/authenticator.go b/pkg/identity/keystone/authenticator.go index 9f45c56c..017d1d9f 100644 --- a/pkg/identity/keystone/authenticator.go +++ b/pkg/identity/keystone/authenticator.go @@ -71,7 +71,7 @@ func (k *Keystoner) GetTokenInfo(token string) (*tokenInfo, error) { return nil, fmt.Errorf("failed to extract roles information from Keystone response: %v", err) } - var userRoles []string + userRoles := make([]string, 0, len(roles)) for _, role := range roles { userRoles = append(userRoles, role.Name) } @@ -90,19 +90,18 @@ func (k *Keystoner) GetTokenInfo(token string) (*tokenInfo, error) { // revive:enable:unexported-return func (k *Keystoner) GetGroups(token string, userID string) ([]string, error) { - var userGroups []string - k.client.ProviderClient.SetToken(token) allGroupPages, err := users.ListGroups(k.client, userID).AllPages() if err != nil { - return userGroups, fmt.Errorf("failed to get user groups from Keystone: %v", err) + return nil, fmt.Errorf("failed to get user groups from Keystone: %v", err) } allGroups, err := groups.ExtractGroups(allGroupPages) if err != nil { - return userGroups, fmt.Errorf("failed to extract user groups from Keystone response: %v", err) + return nil, fmt.Errorf("failed to extract user groups from Keystone response: %v", err) } + userGroups := make([]string, 0, len(allGroups)) for _, g := range allGroups { userGroups = append(userGroups, g.Name) } diff --git a/pkg/identity/keystone/authorizer.go b/pkg/identity/keystone/authorizer.go index 08ff1f79..147f5058 100644 --- a/pkg/identity/keystone/authorizer.go +++ b/pkg/identity/keystone/authorizer.go @@ -324,7 +324,7 @@ func (a *Authorizer) Authorize(attributes authorizer.Attributes) (authorized aut } } - // When the user.Extra does not exist, it means that the keytone user authentication has failed, and the authorization verification should not pass. + // When the user.Extra does not exist, it means that the keystone user authentication has failed, and the authorization verification should not pass. if user.GetExtra() == nil { return authorizer.DecisionDeny, "No auth info found.", nil } diff --git a/pkg/identity/keystone/config.go b/pkg/identity/keystone/config.go index 2d4a87c1..78814d03 100644 --- a/pkg/identity/keystone/config.go +++ b/pkg/identity/keystone/config.go @@ -88,7 +88,7 @@ func (c *Config) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&c.KeystoneCA, "keystone-ca-file", c.KeystoneCA, "File containing the certificate authority for Keystone Service.") fs.StringVar(&c.PolicyFile, "keystone-policy-file", c.PolicyFile, "File containing the policy, if provided, it takes precedence over the policy configmap.") fs.StringVar(&c.PolicyConfigMapName, "policy-configmap-name", c.PolicyConfigMapName, "ConfigMap in kube-system namespace containing the policy configuration, the ConfigMap data must contain the key 'policies'") - fs.StringVar(&c.SyncConfigFile, "sync-config-file", c.SyncConfigFile, "File containing config values for data synchronization beetween Keystone and Kubernetes.") - fs.StringVar(&c.SyncConfigMapName, "sync-configmap-name", "", "ConfigMap in kube-system namespace containing config values for data synchronization beetween Keystone and Kubernetes.") + fs.StringVar(&c.SyncConfigFile, "sync-config-file", c.SyncConfigFile, "File containing config values for data synchronization between Keystone and Kubernetes.") + fs.StringVar(&c.SyncConfigMapName, "sync-configmap-name", "", "ConfigMap in kube-system namespace containing config values for data synchronization between Keystone and Kubernetes.") fs.StringVar(&c.Kubeconfig, "kubeconfig", c.Kubeconfig, "Kubeconfig file used to connect to Kubernetes API to get policy configmap. If the service is running inside the pod, this option is not necessary, will use in-cluster config instead.") } diff --git a/pkg/identity/keystone/keystone.go b/pkg/identity/keystone/keystone.go index dd5ed8ef..2a8e7933 100644 --- a/pkg/identity/keystone/keystone.go +++ b/pkg/identity/keystone/keystone.go @@ -312,7 +312,7 @@ func (k *Auth) authenticateToken(w http.ResponseWriter, r *http.Request, token s var response status response.Authenticated = true - // Modify user info accoding to the sync configuration. + // Modify user info according to the sync configuration. response.User = *k.syncer.syncRoles(&info) data["status"] = response @@ -337,6 +337,7 @@ func (k *Auth) authorizeToken(w http.ResponseWriter, r *http.Request, data map[s attrs := authorizer.AttributesRecord{User: usr} groups := spec["group"].([]interface{}) + usr.Groups = make([]string, 0, len(groups)) for _, v := range groups { usr.Groups = append(usr.Groups, v.(string)) } @@ -420,7 +421,7 @@ func NewKeystoneAuth(c *Config) (*Auth, error) { // Get policy definition either from a policy file or the policy configmap. Policy file takes precedence // over the configmap, but the policy definition will be refreshed based on the configmap change on-the-fly. It - // is possible that both are not provided, in this case, the keytone webhook authorization will always return deny. + // is possible that both are not provided, in this case, the keystone webhook authorization will always return deny. var policy policyList if c.PolicyConfigMapName != "" { cm, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.TODO(), c.PolicyConfigMapName, metav1.GetOptions{}) @@ -450,7 +451,7 @@ func NewKeystoneAuth(c *Config) (*Auth, error) { // Get sync config either from a sync config file or the sync configmap. Sync config file takes precedence // over the configmap, but the sync config definition will be refreshed based on the configmap change on-the-fly. It - // is possible that both are not provided, in this case, the keytone webhook authenticator will not synchronize data. + // is possible that both are not provided, in this case, the keystone webhook authenticator will not synchronize data. var sc *syncConfig if c.SyncConfigMapName != "" { cm, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.TODO(), c.SyncConfigMapName, metav1.GetOptions{}) @@ -543,7 +544,7 @@ func createIdentityV3Provider(options gophercloud.AuthOptions, transport http.Ro } chosen, _, err := utils.ChooseVersion(client, versions) if err != nil { - return nil, fmt.Errorf("Unable to find identity API v3 version : %v", err) + return nil, fmt.Errorf("unable to find identity API v3 version : %v", err) } switch chosen.ID { @@ -551,7 +552,7 @@ func createIdentityV3Provider(options gophercloud.AuthOptions, transport http.Ro return client, nil default: // The switch statement must be out of date from the versions list. - return nil, fmt.Errorf("Unsupported identity API version: %s", chosen.ID) + return nil, fmt.Errorf("unsupported identity API version: %s", chosen.ID) } } diff --git a/pkg/ingress/cmd/root.go b/pkg/ingress/cmd/root.go index 8718af02..f89542a4 100644 --- a/pkg/ingress/cmd/root.go +++ b/pkg/ingress/cmd/root.go @@ -29,6 +29,7 @@ import ( "k8s.io/cloud-provider-openstack/pkg/ingress/config" "k8s.io/cloud-provider-openstack/pkg/ingress/controller" + "k8s.io/cloud-provider-openstack/pkg/version" "k8s.io/component-base/cli" "k8s.io/klog/v2" ) @@ -54,6 +55,7 @@ var rootCmd = &cobra.Command{ signal.Notify(sigterm, syscall.SIGINT) <-sigterm }, + Version: version.Version, } // Execute adds all child commands to the root command sets flags appropriately. diff --git a/pkg/ingress/config/config.go b/pkg/ingress/config/config.go index 6c9b7c98..8658fae9 100644 --- a/pkg/ingress/config/config.go +++ b/pkg/ingress/config/config.go @@ -57,4 +57,9 @@ type octaviaConfig struct { // (Optional) Flavor ID to create the load balancer. // If empty, the default flavor will be used. FlavorID string `mapstructure:"flavor-id"` + + // (Optional) If the ingress controller should use serial API calls when creating and updating + // the load balancer instead of the bulk update API call. + // Default is false. + ProviderRequiresSerialAPICalls bool `mapstructure:"provider-requires-serial-api-calls"` } diff --git a/pkg/ingress/controller/controller.go b/pkg/ingress/controller/controller.go index f212a9a3..c81cf28c 100644 --- a/pkg/ingress/controller/controller.go +++ b/pkg/ingress/controller/controller.go @@ -90,7 +90,7 @@ const ( // https://github.com/kubernetes/cloud-provider/blob/25867882d509131a6fdeaf812ceebfd0f19015dd/controllers/service/controller.go#L673 LabelNodeExcludeLB = "node.kubernetes.io/exclude-from-external-load-balancers" - // DepcreatedLabelNodeRoleMaster specifies that a node is a master + // DeprecatedLabelNodeRoleMaster specifies that a node is a master // It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 // Deprecated in favor of LabelNodeExcludeLB DeprecatedLabelNodeRoleMaster = "node-role.kubernetes.io/master" @@ -108,6 +108,26 @@ const ( // IngressControllerTag is added to the related resources. IngressControllerTag = "octavia.ingress.kubernetes.io" + // IngressAnnotationTimeoutClientData is the timeout for frontend client inactivity. + // If not set, this value defaults to the Octavia configuration key `timeout_client_data`. + // Refer to https://docs.openstack.org/octavia/latest/configuration/configref.html#haproxy_amphora.timeout_client_data + IngressAnnotationTimeoutClientData = "octavia.ingress.kubernetes.io/timeout-client-data" + + // IngressAnnotationTimeoutMemberData is the timeout for backend member inactivity. + // If not set, this value defaults to the Octavia configuration key `timeout_member_data`. + // Refer to https://docs.openstack.org/octavia/latest/configuration/configref.html#haproxy_amphora.timeout_member_data + IngressAnnotationTimeoutMemberData = "octavia.ingress.kubernetes.io/timeout-member-data" + + // IngressAnnotationTimeoutMemberConnect is the timeout for backend member connection. + // If not set, this value defaults to the Octavia configuration key `timeout_member_connect`. + // Refer to https://docs.openstack.org/octavia/latest/configuration/configref.html#haproxy_amphora.timeout_member_connect + IngressAnnotationTimeoutMemberConnect = "octavia.ingress.kubernetes.io/timeout-member-connect" + + // IngressAnnotationTimeoutTCPInspect is the time to wait for TCP packets for content inspection. + // If not set, this value defaults to the Octavia configuration key `timeout_tcp_inspect`. + // Refer to https://docs.openstack.org/octavia/latest/configuration/configref.html#haproxy_amphora.timeout_tcp_inspect + IngressAnnotationTimeoutTCPInspect = "octavia.ingress.kubernetes.io/timeout-tcp-inspect" + // IngressSecretCertName is certificate key name defined in the secret data. IngressSecretCertName = "tls.crt" // IngressSecretKeyName is private key name defined in the secret data. @@ -558,16 +578,6 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { lbName := utils.GetResourceName(ing.Namespace, ing.Name, c.config.ClusterName) logger := log.WithFields(log.Fields{"ingress": key}) - // Delete Barbican secrets - if c.osClient.Barbican != nil && ing.Spec.TLS != nil { - nameFilter := fmt.Sprintf("kube_ingress_%s_%s_%s", c.config.ClusterName, ing.Namespace, ing.Name) - if err := openstackutil.DeleteSecrets(c.osClient.Barbican, nameFilter); err != nil { - return fmt.Errorf("failed to remove Barbican secrets: %v", err) - } - - logger.Info("Barbican secrets deleted") - } - // If load balancer doesn't exist, assume it's already deleted. loadbalancer, err := openstackutil.GetLoadbalancerByName(c.osClient.Octavia, lbName) if err != nil { @@ -618,11 +628,21 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { err = openstackutil.DeleteLoadbalancer(c.osClient.Octavia, loadbalancer.ID, true) if err != nil { - logger.WithFields(log.Fields{"lbID": loadbalancer.ID}).Infof("loadbalancer delete failed: %s", err) + logger.WithFields(log.Fields{"lbID": loadbalancer.ID}).Infof("loadbalancer delete failed: %v", err) } else { logger.WithFields(log.Fields{"lbID": loadbalancer.ID}).Info("loadbalancer deleted") } + // Delete Barbican secrets + if c.osClient.Barbican != nil && ing.Spec.TLS != nil { + nameFilter := fmt.Sprintf("kube_ingress_%s_%s_%s", c.config.ClusterName, ing.Namespace, ing.Name) + if err := openstackutil.DeleteSecrets(c.osClient.Barbican, nameFilter); err != nil { + return fmt.Errorf("failed to remove Barbican secrets: %v", err) + } + + logger.Info("Barbican secrets deleted") + } + return err } @@ -728,8 +748,13 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { // Create listener sourceRanges := getStringFromIngressAnnotation(ing, IngressAnnotationSourceRangesKey, "0.0.0.0/0") + timeoutClientData := maybeGetIntFromIngressAnnotation(ing, IngressAnnotationTimeoutClientData) + timeoutMemberConnect := maybeGetIntFromIngressAnnotation(ing, IngressAnnotationTimeoutMemberConnect) + timeoutMemberData := maybeGetIntFromIngressAnnotation(ing, IngressAnnotationTimeoutMemberData) + timeoutTCPInspect := maybeGetIntFromIngressAnnotation(ing, IngressAnnotationTimeoutTCPInspect) + listenerAllowedCIDRs := strings.Split(sourceRanges, ",") - listener, err := c.osClient.EnsureListener(resName, lb.ID, secretRefs, listenerAllowedCIDRs) + listener, err := c.osClient.EnsureListener(resName, lb.ID, secretRefs, listenerAllowedCIDRs, timeoutClientData, timeoutMemberData, timeoutTCPInspect, timeoutMemberConnect) if err != nil { return err } @@ -877,7 +902,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { } } - // Reconsile octavia resources. + // Reconcile octavia resources. rt := openstack.NewResourceTracker(ingfullName, c.osClient.Octavia, lb.ID, listener.ID, newPools, newPolicies, existingPools, oldPolicies) if err := rt.CreateResources(); err != nil { return err @@ -1017,6 +1042,23 @@ func getStringFromIngressAnnotation(ingress *nwv1.Ingress, annotationKey string, return defaultValue } +// maybeGetIntFromIngressAnnotation searches a given Ingress for a specific annotationKey and either returns the +// annotation's value +func maybeGetIntFromIngressAnnotation(ingress *nwv1.Ingress, annotationKey string) *int { + klog.V(4).Infof("maybeGetIntFromIngressAnnotation(%s/%s, %v33)", ingress.Namespace, ingress.Name, annotationKey) + if annotationValue, ok := ingress.Annotations[annotationKey]; ok { + klog.V(4).Infof("Found a Service Annotation for key: %v", annotationKey) + returnValue, err := strconv.Atoi(annotationValue) + if err != nil { + klog.V(4).Infof("Invalid integer found on Service Annotation: %v = %v", annotationKey, annotationValue) + return nil + } + return &returnValue + } + klog.V(4).Infof("Could not find a Service Annotation; falling back to default setting for annotation %v", annotationKey) + return nil +} + // privateKeyFromPEM converts a PEM block into a crypto.PrivateKey. func privateKeyFromPEM(pemData []byte) (crypto.PrivateKey, error) { var result *pem.Block diff --git a/pkg/ingress/controller/openstack/client.go b/pkg/ingress/controller/openstack/client.go index 24a3100a..518e3226 100644 --- a/pkg/ingress/controller/openstack/client.go +++ b/pkg/ingress/controller/openstack/client.go @@ -73,7 +73,7 @@ func NewOpenStack(cfg config.Config) (*OpenStack, error) { var barbican *gophercloud.ServiceClient barbican, err = openstack.NewKeyManagerV1(provider, epOpts) if err != nil { - log.Warn("Barbican not suppported.") + log.Warn("Barbican not supported.") barbican = nil } diff --git a/pkg/ingress/controller/openstack/octavia.go b/pkg/ingress/controller/openstack/octavia.go index 47242d5c..1705cc36 100644 --- a/pkg/ingress/controller/openstack/octavia.go +++ b/pkg/ingress/controller/openstack/octavia.go @@ -122,7 +122,7 @@ func NewResourceTracker(ingressName string, client *gophercloud.ServiceClient, l logger := log.WithFields(log.Fields{"ingress": ingressName, "lbID": lbID}) - var oldPoolIDs []string + oldPoolIDs := make([]string, 0, len(oldPoolMapping)) for _, poolID := range oldPoolMapping { oldPoolIDs = append(oldPoolIDs, poolID) } @@ -181,7 +181,7 @@ func (rt *ResourceTracker) CreateResources() error { rt.logger.WithFields(log.Fields{"poolName": pool.Name, "poolID": poolID}).Info("pool members updated ") } - var curPoolIDs []string + curPoolIDs := make([]string, 0, len(poolMapping)) for _, id := range poolMapping { curPoolIDs = append(curPoolIDs, id) } @@ -330,7 +330,7 @@ func (os *OpenStack) UpdateLoadBalancerDescription(lbID string, newDescription s } // EnsureListener creates a loadbalancer listener in octavia if it does not exist, wait for the loadbalancer to be ACTIVE. -func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []string, listenerAllowedCIDRs []string) (*listeners.Listener, error) { +func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []string, listenerAllowedCIDRs []string, timeoutClientData, timeoutMemberData, timeoutTCPInspect, timeoutMemberConnect *int) (*listeners.Listener, error) { listener, err := openstackutil.GetListenerByName(os.Octavia, name, lbID) if err != nil { if err != cpoerrors.ErrNotFound { @@ -340,10 +340,14 @@ func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []strin log.WithFields(log.Fields{"lbID": lbID, "listenerName": name}).Info("creating listener") opts := listeners.CreateOpts{ - Name: name, - Protocol: "HTTP", - ProtocolPort: 80, // Ingress Controller only supports http/https for now - LoadbalancerID: lbID, + Name: name, + Protocol: "HTTP", + ProtocolPort: 80, // Ingress Controller only supports http/https for now + LoadbalancerID: lbID, + TimeoutClientData: timeoutClientData, + TimeoutMemberData: timeoutMemberData, + TimeoutMemberConnect: timeoutMemberConnect, + TimeoutTCPInspect: timeoutTCPInspect, } if len(secretRefs) > 0 { opts.DefaultTlsContainerRef = secretRefs[0] @@ -363,7 +367,11 @@ func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []strin } else { if len(listenerAllowedCIDRs) > 0 && !reflect.DeepEqual(listener.AllowedCIDRs, listenerAllowedCIDRs) { _, err := listeners.Update(os.Octavia, listener.ID, listeners.UpdateOpts{ - AllowedCIDRs: &listenerAllowedCIDRs, + AllowedCIDRs: &listenerAllowedCIDRs, + TimeoutClientData: timeoutClientData, + TimeoutMemberData: timeoutMemberData, + TimeoutMemberConnect: timeoutMemberConnect, + TimeoutTCPInspect: timeoutTCPInspect, }).Extract() if err != nil { return nil, fmt.Errorf("failed to update listener allowed CIDRs: %v", err) @@ -449,6 +457,22 @@ func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID strin return nil, fmt.Errorf("error waiting for loadbalancer %s to be active: %v", lbID, err) } + if os.config.Octavia.ProviderRequiresSerialAPICalls { + logger.Info("updating pool members using serial API calls") + // Serially update pool members + err = openstackutil.SeriallyReconcilePoolMembers(os.Octavia, pool, *nodePort, lbID, nodes) + if err != nil { + return nil, fmt.Errorf("error reconciling pool members for pool %s: %v", pool.ID, err) + } + _, err = os.waitLoadbalancerActiveProvisioningStatus(lbID) + if err != nil { + return nil, fmt.Errorf("error waiting for loadbalancer %s to be active: %v", lbID, err) + } + logger.Info("pool members updated") + + return &pool.ID, nil + } + // Batch update pool members var members []pools.BatchUpdateMemberOpts for _, node := range nodes { diff --git a/pkg/kms/client/client.go b/pkg/kms/client/client.go index e469d137..e04c8897 100644 --- a/pkg/kms/client/client.go +++ b/pkg/kms/client/client.go @@ -1,54 +1,53 @@ package main import ( - "fmt" "log" - "os" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - pb "k8s.io/kms/apis/v1beta1" + pb "k8s.io/kms/apis/v2" ) //This client is for test purpose only, Kubernetes api server will call to kms plugin grpc server func main() { - connection, err := grpc.Dial("unix:///var/lib/kms/kms.sock", grpc.WithTransportCredentials(insecure.NewCredentials())) defer func() { _ = connection.Close() }() if err != nil { - fmt.Printf("\nConnection to KMS plugin failed, error: %v", err) + log.Fatalf("Connection to KMS plugin failed, error: %v", err) } kmsClient := pb.NewKeyManagementServiceClient(connection) - request := &pb.VersionRequest{Version: "v1beta1"} - _, err = kmsClient.Version(context.TODO(), request) - + request := &pb.StatusRequest{} + status, err := kmsClient.Status(context.TODO(), request) if err != nil { - fmt.Printf("\nError in getting version from KMS Plugin: %v", err) + log.Fatalf("Error in getting version from KMS Plugin: %v", err) + } + + if status.Version != "v2" { + log.Fatalf("Unsupported KMS Plugin version: %s", status.Version) } + log.Printf("KMS plugin version: %s", status.Version) + secretBytes := []byte("mypassword") //Encryption Request to KMS Plugin encRequest := &pb.EncryptRequest{ - Version: "v1beta1", - Plain: secretBytes} + Plaintext: secretBytes, + } encResponse, err := kmsClient.Encrypt(context.TODO(), encRequest) - if err != nil { - fmt.Printf("\nEncrypt Request Failed: %v", err) - os.Exit(1) + log.Fatalf("Encrypt Request Failed: %v", err) } - cipher := string(encResponse.Cipher) - fmt.Println("cipher:", cipher) + cipher := string(encResponse.Ciphertext) + log.Printf("cipher: %s", cipher) //Decryption Request to KMS plugin decRequest := &pb.DecryptRequest{ - Version: "v1beta1", - Cipher: encResponse.Cipher, + Ciphertext: encResponse.Ciphertext, } decResponse, err := kmsClient.Decrypt(context.TODO(), decRequest) @@ -56,5 +55,5 @@ func main() { log.Fatalf("Unable to decrypt response: %v", err) } - fmt.Printf("\n\ndecryption response %v", decResponse) + log.Printf("Decryption response: %v", decResponse) } diff --git a/pkg/kms/encryption/aescbc/aescbc_test.go b/pkg/kms/encryption/aescbc/aescbc_test.go index a64b7c35..a9014f3a 100644 --- a/pkg/kms/encryption/aescbc/aescbc_test.go +++ b/pkg/kms/encryption/aescbc/aescbc_test.go @@ -9,7 +9,7 @@ import ( var key []byte func init() { - // genereate key for encrypt decrypt operation + // generate key for encrypt decrypt operation genKey() } diff --git a/pkg/kms/server/server.go b/pkg/kms/server/server.go index 7d524ed3..f67ef0b4 100644 --- a/pkg/kms/server/server.go +++ b/pkg/kms/server/server.go @@ -12,14 +12,13 @@ import ( "k8s.io/cloud-provider-openstack/pkg/kms/barbican" "k8s.io/cloud-provider-openstack/pkg/kms/encryption/aescbc" "k8s.io/klog/v2" - pb "k8s.io/kms/apis/v1beta1" + pb "k8s.io/kms/apis/v2" ) const ( netProtocol = "unix" - version = "v1beta1" - runtimename = "barbican" - runtimeversion = "0.0.1" + version = "v2" + runtimeversion = "0.0.2" ) type BarbicanService interface { @@ -100,13 +99,13 @@ func Run(configFilePath string, socketpath string, sigchan <-chan os.Signal) (er } // Version returns KMS service version -func (s *KMSserver) Version(ctx context.Context, req *pb.VersionRequest) (*pb.VersionResponse, error) { +func (s *KMSserver) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { klog.V(4).Infof("Version Information Requested by Kubernetes api server") - res := &pb.VersionResponse{ - Version: version, - RuntimeName: runtimename, - RuntimeVersion: runtimeversion, + res := &pb.StatusResponse{ + Version: version, + Healthz: "ok", + KeyId: s.cfg.KeyManager.KeyID, } return res, nil @@ -116,19 +115,20 @@ func (s *KMSserver) Version(ctx context.Context, req *pb.VersionRequest) (*pb.Ve func (s *KMSserver) Decrypt(ctx context.Context, req *pb.DecryptRequest) (*pb.DecryptResponse, error) { klog.V(4).Infof("Decrypt Request by Kubernetes api server") + // TODO: consider using req.KeyId key, err := s.barbican.GetSecret(s.cfg.KeyManager.KeyID) if err != nil { klog.V(4).Infof("Failed to get key %v: ", err) return nil, err } - plain, err := aescbc.Decrypt(req.Cipher, key) + plain, err := aescbc.Decrypt(req.Ciphertext, key) if err != nil { klog.V(4).Infof("Failed to decrypt data %v: ", err) return nil, err } - return &pb.DecryptResponse{Plain: plain}, nil + return &pb.DecryptResponse{Plaintext: plain}, nil } // Encrypt encrypts DEK @@ -142,11 +142,11 @@ func (s *KMSserver) Encrypt(ctx context.Context, req *pb.EncryptRequest) (*pb.En return nil, err } - cipher, err := aescbc.Encrypt(req.Plain, key) + cipher, err := aescbc.Encrypt(req.Plaintext, key) if err != nil { klog.V(4).Infof("Failed to encrypt data %v: ", err) return nil, err } - return &pb.EncryptResponse{Cipher: cipher}, nil + return &pb.EncryptResponse{Ciphertext: cipher}, nil } diff --git a/pkg/kms/server/server_test.go b/pkg/kms/server/server_test.go index 4b73959b..f1f2fdff 100644 --- a/pkg/kms/server/server_test.go +++ b/pkg/kms/server/server_test.go @@ -6,7 +6,7 @@ import ( "golang.org/x/net/context" "k8s.io/cloud-provider-openstack/pkg/kms/barbican" - pb "k8s.io/kms/apis/v1beta1" + pb "k8s.io/kms/apis/v2" ) var s = new(KMSserver) @@ -14,9 +14,9 @@ var s = new(KMSserver) func TestInitConfig(t *testing.T) { } -func TestVersion(t *testing.T) { - req := &pb.VersionRequest{Version: "v1beta1"} - _, err := s.Version(context.TODO(), req) +func TestStatus(t *testing.T) { + req := &pb.StatusRequest{} + _, err := s.Status(context.TODO(), req) if err != nil { t.FailNow() } @@ -25,15 +25,15 @@ func TestVersion(t *testing.T) { func TestEncryptDecrypt(t *testing.T) { s.barbican = &barbican.FakeBarbican{} fakeData := []byte("fakedata") - encreq := &pb.EncryptRequest{Version: "v1beta1", Plain: fakeData} + encreq := &pb.EncryptRequest{Plaintext: fakeData} encresp, err := s.Encrypt(context.TODO(), encreq) if err != nil { t.Log(err) t.FailNow() } - decreq := &pb.DecryptRequest{Version: "v1beta1", Cipher: encresp.Cipher} + decreq := &pb.DecryptRequest{Ciphertext: encresp.Ciphertext} decresp, err := s.Decrypt(context.TODO(), decreq) - if err != nil || !bytes.Equal(decresp.Plain, fakeData) { + if err != nil || !bytes.Equal(decresp.Plaintext, fakeData) { t.Log(err) t.FailNow() } diff --git a/pkg/openstack/events.go b/pkg/openstack/events.go new file mode 100644 index 00000000..200b613d --- /dev/null +++ b/pkg/openstack/events.go @@ -0,0 +1,24 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +const ( + eventLBForceInternal = "LoadBalancerForcedInternal" + eventLBExternalNetworkSearchFailed = "LoadBalancerExternalNetworkSearchFailed" + eventLBSourceRangesIgnored = "LoadBalancerSourceRangesIgnored" + eventLBAZIgnored = "LoadBalancerAvailabilityZonesIgnored" +) diff --git a/pkg/openstack/instances.go b/pkg/openstack/instances.go index fbc1c700..60c77454 100644 --- a/pkg/openstack/instances.go +++ b/pkg/openstack/instances.go @@ -27,9 +27,10 @@ import ( "strings" "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/pagination" "github.com/mitchellh/mapstructure" v1 "k8s.io/api/core/v1" @@ -48,6 +49,7 @@ import ( // Instances encapsulates an implementation of Instances for OpenStack. type Instances struct { compute *gophercloud.ServiceClient + network *gophercloud.ServiceClient region string regionProviderID bool opts metadata.Opts @@ -148,6 +150,12 @@ func (os *OpenStack) instances() (*Instances, bool) { return nil, false } + network, err := client.NewNetworkV2(os.provider, os.epOpts) + if err != nil { + klog.Errorf("unable to access network v2 API : %v", err) + return nil, false + } + regionalProviderID := false if isRegionalProviderID := sysos.Getenv(RegionalProviderIDEnv); isRegionalProviderID == "true" { regionalProviderID = true @@ -155,6 +163,7 @@ func (os *OpenStack) instances() (*Instances, bool) { return &Instances{ compute: compute, + network: network, region: os.epOpts.Region, regionProviderID: regionalProviderID, opts: os.metadataOpts, @@ -226,12 +235,12 @@ func (i *Instances) NodeAddressesByProviderID(ctx context.Context, providerID st return []v1.NodeAddress{}, err } - interfaces, err := getAttachedInterfacesByID(i.compute, server.ID) + ports, err := getAttachedPorts(i.network, server.ID) if err != nil { return []v1.NodeAddress{}, err } - addresses, err := nodeAddresses(server, interfaces, i.networkingOpts) + addresses, err := nodeAddresses(server, ports, i.network, i.networkingOpts) if err != nil { return []v1.NodeAddress{}, err } @@ -332,11 +341,11 @@ func (i *Instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud return nil, err } - interfaces, err := getAttachedInterfacesByID(i.compute, srv.ID) + ports, err := getAttachedPorts(i.network, srv.ID) if err != nil { return nil, err } - addresses, err := nodeAddresses(srv, interfaces, i.networkingOpts) + addresses, err := nodeAddresses(srv, ports, i.network, i.networkingOpts) if err != nil { return nil, err } @@ -564,13 +573,13 @@ func getServerByName(client *gophercloud.ServiceClient, name types.NodeName) (*S // * access IPs // * metadata hostname // * server object Addresses (floating type) -func nodeAddresses(srv *servers.Server, interfaces []attachinterfaces.Interface, networkingOpts NetworkingOpts) ([]v1.NodeAddress, error) { +func nodeAddresses(srv *servers.Server, ports []PortWithTrunkDetails, client *gophercloud.ServiceClient, networkingOpts NetworkingOpts) ([]v1.NodeAddress, error) { addrs := []v1.NodeAddress{} // parse private IP addresses first in an ordered manner - for _, iface := range interfaces { - for _, fixedIP := range iface.FixedIPs { - if iface.PortState == "ACTIVE" { + for _, port := range ports { + for _, fixedIP := range port.FixedIPs { + if port.Status == "ACTIVE" { isIPv6 := net.ParseIP(fixedIP.IPAddress).To4() == nil if !(isIPv6 && networkingOpts.IPv6SupportDisabled) { AddToNodeAddresses(&addrs, @@ -624,7 +633,40 @@ func nodeAddresses(srv *servers.Server, interfaces []attachinterfaces.Interface, return nil, err } - var networks []string + // Add the addresses assigned on subports via trunk + // This exposes the vlan networks to which subports are attached + for _, port := range ports { + for _, subport := range port.TrunkDetails.SubPorts { + p, err := neutronports.Get(client, subport.PortID).Extract() + if err != nil { + klog.Errorf("Failed to get subport %s details: %v", subport.PortID, err) + continue + } + n, err := networks.Get(client, p.NetworkID).Extract() + if err != nil { + klog.Errorf("Failed to get subport %s network details: %v", subport.PortID, err) + continue + } + for _, fixedIP := range p.FixedIPs { + klog.V(5).Infof("Node '%s' is found subport '%s' address '%s/%s'", srv.Name, p.Name, n.Name, fixedIP.IPAddress) + isIPv6 := net.ParseIP(fixedIP.IPAddress).To4() == nil + if !(isIPv6 && networkingOpts.IPv6SupportDisabled) { + addr := Address{IPType: "fixed", Addr: fixedIP.IPAddress} + subportAddresses := map[string][]Address{n.Name: {addr}} + srvAddresses, ok := addresses[n.Name] + if !ok { + addresses[n.Name] = subportAddresses[n.Name] + } else { + // this is to take care the corner case + // where the same network is attached to the node both directly and via trunk + addresses[n.Name] = append(srvAddresses, subportAddresses[n.Name]...) + } + } + } + } + } + + networks := make([]string, 0, len(addresses)) for k := range addresses { networks = append(networks, k) } @@ -674,6 +716,7 @@ func nodeAddresses(srv *servers.Server, interfaces []attachinterfaces.Interface, sortNodeAddresses(addrs, networkingOpts.AddressSortOrder) } + klog.V(5).Infof("Node '%s' returns addresses '%v'", srv.Name, addrs) return addrs, nil } @@ -683,31 +726,30 @@ func getAddressesByName(client *gophercloud.ServiceClient, name types.NodeName, return nil, err } - interfaces, err := getAttachedInterfacesByID(client, srv.ID) + ports, err := getAttachedPorts(client, srv.ID) if err != nil { return nil, err } - return nodeAddresses(&srv.Server, interfaces, networkingOpts) + return nodeAddresses(&srv.Server, ports, client, networkingOpts) } -// getAttachedInterfacesByID returns the node interfaces of the specified instance. -func getAttachedInterfacesByID(client *gophercloud.ServiceClient, serviceID string) ([]attachinterfaces.Interface, error) { - var interfaces []attachinterfaces.Interface +// getAttachedPorts returns a list of ports attached to a server. +func getAttachedPorts(client *gophercloud.ServiceClient, serverID string) ([]PortWithTrunkDetails, error) { + listOpts := neutronports.ListOpts{ + DeviceID: serverID, + } - mc := metrics.NewMetricContext("server_os_interface", "list") - pager := attachinterfaces.List(client, serviceID) - err := pager.EachPage(func(page pagination.Page) (bool, error) { - s, err := attachinterfaces.ExtractInterfaces(page) - if err != nil { - return false, err - } - interfaces = append(interfaces, s...) - return true, nil - }) - if mc.ObserveRequest(err) != nil { - return interfaces, err + var ports []PortWithTrunkDetails + + allPages, err := neutronports.List(client, listOpts).AllPages() + if err != nil { + return ports, err + } + err = neutronports.ExtractPortsInto(allPages, &ports) + if err != nil { + return ports, err } - return interfaces, nil + return ports, nil } diff --git a/pkg/openstack/instances_test.go b/pkg/openstack/instances_test.go index eb3955c8..0d4b2e1c 100644 --- a/pkg/openstack/instances_test.go +++ b/pkg/openstack/instances_test.go @@ -22,6 +22,7 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" ) @@ -189,3 +190,74 @@ func TestSortNodeAddressesWithMultipleCIDRs(t *testing.T) { executeSortNodeAddressesTest(t, addressSortOrder, want) } + +func Test_instanceIDFromProviderID(t *testing.T) { + type args struct { + providerID string + } + tests := []struct { + name string + args args + wantInstanceID string + wantRegion string + wantErr bool + }{ + { + name: "it parses region & instanceID correctly from providerID", + args: args{ + providerID: "openstack://us-east-1/testInstanceID", + }, + wantInstanceID: "testInstanceID", + wantRegion: "us-east-1", + wantErr: false, + }, + { + name: "it parses instanceID if providerID has empty protocol & no region", + args: args{ + providerID: "/testInstanceID", + }, + wantInstanceID: "testInstanceID", + wantRegion: "", + wantErr: false, + }, + { + name: "it returns error in case of invalid providerID format with no region", + args: args{ + providerID: "openstack://us-east-1-testInstanceID", + }, + wantInstanceID: "", + wantRegion: "", + wantErr: true, + }, + { + name: "it parses correct instanceID in case the region name is the empty string", + args: args{ + providerID: "openstack:///testInstanceID", + }, + wantInstanceID: "testInstanceID", + wantRegion: "", + wantErr: false, + }, + { + name: "it appends openstack:// in case of missing protocol in providerID", + args: args{ + providerID: "us-east-1/testInstanceID", + }, + wantInstanceID: "testInstanceID", + wantRegion: "us-east-1", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotInstanceID, gotRegion, err := instanceIDFromProviderID(tt.args.providerID) + assert.Equal(t, tt.wantInstanceID, gotInstanceID) + assert.Equal(t, tt.wantRegion, gotRegion) + if tt.wantErr == true { + assert.ErrorContains(t, err, "didn't match expected format") + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/pkg/openstack/instancesv2.go b/pkg/openstack/instancesv2.go index e1f2c279..3e6f770f 100644 --- a/pkg/openstack/instancesv2.go +++ b/pkg/openstack/instancesv2.go @@ -34,6 +34,7 @@ import ( // InstancesV2 encapsulates an implementation of InstancesV2 for OpenStack. type InstancesV2 struct { compute *gophercloud.ServiceClient + network *gophercloud.ServiceClient region string regionProviderID bool networkingOpts NetworkingOpts @@ -56,6 +57,12 @@ func (os *OpenStack) instancesv2() (*InstancesV2, bool) { return nil, false } + network, err := client.NewNetworkV2(os.provider, os.epOpts) + if err != nil { + klog.Errorf("unable to access network v2 API : %v", err) + return nil, false + } + regionalProviderID := false if isRegionalProviderID := sysos.Getenv(RegionalProviderIDEnv); isRegionalProviderID == "true" { regionalProviderID = true @@ -63,6 +70,7 @@ func (os *OpenStack) instancesv2() (*InstancesV2, bool) { return &InstancesV2{ compute: compute, + network: network, region: os.epOpts.Region, regionProviderID: regionalProviderID, networkingOpts: os.networkingOpts, @@ -115,12 +123,12 @@ func (i *InstancesV2) InstanceMetadata(ctx context.Context, node *v1.Node) (*clo return nil, err } - interfaces, err := getAttachedInterfacesByID(i.compute, server.ID) + ports, err := getAttachedPorts(i.network, server.ID) if err != nil { return nil, err } - addresses, err := nodeAddresses(&server.Server, interfaces, i.networkingOpts) + addresses, err := nodeAddresses(&server.Server, ports, i.network, i.networkingOpts) if err != nil { return nil, err } diff --git a/pkg/openstack/loadbalancer.go b/pkg/openstack/loadbalancer.go index 97218212..ca411fca 100644 --- a/pkg/openstack/loadbalancer.go +++ b/pkg/openstack/loadbalancer.go @@ -20,31 +20,21 @@ import ( "context" "encoding/json" "fmt" - "reflect" "regexp" "strconv" "strings" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets" "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" v2monitors "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" v2pools "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" - neutrontags "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" - neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" - "github.com/gophercloud/gophercloud/pagination" - secgroups "github.com/gophercloud/utils/openstack/networking/v2/extensions/security/groups" - "gopkg.in/godo.v2/glob" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/kubernetes" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" netutils "k8s.io/utils/net" @@ -88,18 +78,25 @@ const ( ServiceAnnotationLoadBalancerAvailabilityZone = "loadbalancer.openstack.org/availability-zone" // ServiceAnnotationLoadBalancerEnableHealthMonitor defines whether to create health monitor for the load balancer // pool, if not specified, use 'create-monitor' config. The health monitor can be created or deleted dynamically. - ServiceAnnotationLoadBalancerEnableHealthMonitor = "loadbalancer.openstack.org/enable-health-monitor" - ServiceAnnotationLoadBalancerHealthMonitorDelay = "loadbalancer.openstack.org/health-monitor-delay" - ServiceAnnotationLoadBalancerHealthMonitorTimeout = "loadbalancer.openstack.org/health-monitor-timeout" - ServiceAnnotationLoadBalancerHealthMonitorMaxRetries = "loadbalancer.openstack.org/health-monitor-max-retries" - ServiceAnnotationLoadBalancerLoadbalancerHostname = "loadbalancer.openstack.org/hostname" - ServiceAnnotationLoadBalancerAddress = "loadbalancer.openstack.org/load-balancer-address" + ServiceAnnotationLoadBalancerEnableHealthMonitor = "loadbalancer.openstack.org/enable-health-monitor" + ServiceAnnotationLoadBalancerHealthMonitorDelay = "loadbalancer.openstack.org/health-monitor-delay" + ServiceAnnotationLoadBalancerHealthMonitorTimeout = "loadbalancer.openstack.org/health-monitor-timeout" + ServiceAnnotationLoadBalancerHealthMonitorMaxRetries = "loadbalancer.openstack.org/health-monitor-max-retries" + ServiceAnnotationLoadBalancerHealthMonitorMaxRetriesDown = "loadbalancer.openstack.org/health-monitor-max-retries-down" + ServiceAnnotationLoadBalancerLoadbalancerHostname = "loadbalancer.openstack.org/hostname" + ServiceAnnotationLoadBalancerAddress = "loadbalancer.openstack.org/load-balancer-address" // revive:disable:var-naming ServiceAnnotationTlsContainerRef = "loadbalancer.openstack.org/default-tls-container-ref" // revive:enable:var-naming // See https://nip.io defaultProxyHostnameSuffix = "nip.io" ServiceAnnotationLoadBalancerID = "loadbalancer.openstack.org/load-balancer-id" + + // Octavia resources name formats + lbFormat = "%s%s_%s_%s" + listenerFormat = "listener_%d_%s" + poolFormat = "pool_%d_%s" + monitorFormat = "monitor_%d_%s" ) // LbaasV2 is a LoadBalancer implementation based on Octavia @@ -107,251 +104,38 @@ type LbaasV2 struct { LoadBalancer } -// floatingSubnetSpec contains the specification of the public subnet to use for -// a public network. If given it may either describe the subnet id or -// a subnet name pattern for the subnet to use. If a pattern is given -// the first subnet matching the name pattern with an allocatable floating ip -// will be selected. -type floatingSubnetSpec struct { - subnetID string - subnet string - subnetTags string -} - -// TweakSubNetListOpsFunction is used to modify List Options for subnets -type TweakSubNetListOpsFunction func(*subnets.ListOpts) - -// matcher matches a subnet -type matcher func(subnet *subnets.Subnet) bool - -type servicePatcher struct { - kclient kubernetes.Interface - base *corev1.Service - updated *corev1.Service -} - var _ cloudprovider.LoadBalancer = &LbaasV2{} -// negate returns a negated matches for a given one -func negate(f matcher) matcher { return func(s *subnets.Subnet) bool { return !f(s) } } - -func andMatcher(a, b matcher) matcher { - if a == nil { - return b - } - if b == nil { - return a - } - return func(s *subnets.Subnet) bool { - return a(s) && b(s) - } -} - -// reexpNameMatcher creates a subnet matcher matching a subnet by name for a given regexp. -func regexpNameMatcher(r *regexp.Regexp) matcher { - return func(s *subnets.Subnet) bool { return r.FindString(s.Name) == s.Name } -} - -// subnetNameMatcher creates a subnet matcher matching a subnet by name for a given glob -// or regexp -func subnetNameMatcher(pat string) (matcher, error) { - // try to create floating IP in matching subnets - var match matcher - not := false - if strings.HasPrefix(pat, "!") { - not = true - pat = pat[1:] - } - if strings.HasPrefix(pat, "~") { - rexp, err := regexp.Compile(pat[1:]) - if err != nil { - return nil, fmt.Errorf("invalid subnet regexp pattern %q: %s", pat[1:], err) - } - match = regexpNameMatcher(rexp) - } else { - match = regexpNameMatcher(glob.Globexp(pat)) - } - if not { - match = negate(match) - } - return match, nil -} - -// subnetTagMatcher matches a subnet by a given tag spec -func subnetTagMatcher(tags string) matcher { - // try to create floating IP in matching subnets - var match matcher - - list, not, all := tagList(tags) - - match = func(s *subnets.Subnet) bool { - for _, tag := range list { - found := false - for _, t := range s.Tags { - if t == tag { - found = true - break - } - } - if found { - if !all { - return !not - } - } else { - if all { - return not - } - } - } - return not != all - } - return match -} - -func (s *floatingSubnetSpec) Configured() bool { - if s != nil && (s.subnetID != "" || s.MatcherConfigured()) { - return true - } - return false -} - -func (s *floatingSubnetSpec) ListSubnetsForNetwork(lbaas *LbaasV2, networkID string) ([]subnets.Subnet, error) { - matcher, err := s.Matcher(false) - if err != nil { - return nil, err - } - list, err := lbaas.listSubnetsForNetwork(networkID, s.tweakListOpts) - if err != nil { - return nil, err - } - if matcher == nil { - return list, nil - } - - // filter subnets according to spec - var foundSubnets []subnets.Subnet - for _, subnet := range list { - if matcher(&subnet) { - foundSubnets = append(foundSubnets, subnet) - } - } - return foundSubnets, nil -} - -// tweakListOpts can be used to optimize a subnet list query for the -// actually described subnet filter -func (s *floatingSubnetSpec) tweakListOpts(opts *subnets.ListOpts) { - if s.subnetTags != "" { - list, not, all := tagList(s.subnetTags) - tags := strings.Join(list, ",") - if all { - if not { - opts.NotTagsAny = tags // at least one tag must be missing - } else { - opts.Tags = tags // all tags must be present - } - } else { - if not { - opts.NotTags = tags // none of the tags are present - } else { - opts.TagsAny = tags // at least one tag is present - } - } - } -} - -func (s *floatingSubnetSpec) MatcherConfigured() bool { - if s != nil && s.subnetID == "" && (s.subnet != "" || s.subnetTags != "") { - return true - } - return false -} - -func addField(s, name, value string) string { - if value == "" { - return s - } - if s == "" { - s += ", " - } - return fmt.Sprintf("%s%s: %q", s, name, value) -} - -func (s *floatingSubnetSpec) String() string { - if s == nil || (s.subnetID == "" && s.subnet == "" && s.subnetTags == "") { - return "" - } - pat := addField("", "subnetID", s.subnetID) - pat = addField(pat, "pattern", s.subnet) - return addField(pat, "tags", s.subnetTags) -} - -func (s *floatingSubnetSpec) Matcher(tag bool) (matcher, error) { - if !s.MatcherConfigured() { - return nil, nil - } - var match matcher - var err error - if s.subnet != "" { - match, err = subnetNameMatcher(s.subnet) - if err != nil { - return nil, err - } - } - if tag && s.subnetTags != "" { - match = andMatcher(match, subnetTagMatcher(s.subnetTags)) - } - if match == nil { - match = func(s *subnets.Subnet) bool { return true } - } - return match, nil -} - -func tagList(tags string) ([]string, bool, bool) { - not := strings.HasPrefix(tags, "!") - if not { - tags = tags[1:] - } - all := strings.HasPrefix(tags, "&") - if all { - tags = tags[1:] - } - list := strings.Split(tags, ",") - for i := range list { - list[i] = strings.TrimSpace(list[i]) - } - return list, not, all -} - // serviceConfig contains configurations for creating a Service. type serviceConfig struct { - internal bool - connLimit int - configClassName string - lbNetworkID string - lbSubnetID string - lbMemberSubnetID string - lbPublicNetworkID string - lbPublicSubnetSpec *floatingSubnetSpec - keepClientIP bool - enableProxyProtocol bool - timeoutClientData int - timeoutMemberConnect int - timeoutMemberData int - timeoutTCPInspect int - allowedCIDR []string - enableMonitor bool - flavorID string - availabilityZone string - tlsContainerRef string - lbID string - lbName string - supportLBTags bool - healthCheckNodePort int - healthMonitorDelay int - healthMonitorTimeout int - healthMonitorMaxRetries int - preferredIPFamily corev1.IPFamily // preferred (the first) IP family indicated in service's `spec.ipFamilies` + internal bool + connLimit int + configClassName string + lbNetworkID string + lbSubnetID string + lbMemberSubnetID string + lbPublicNetworkID string + lbPublicSubnetSpec *floatingSubnetSpec + keepClientIP bool + enableProxyProtocol bool + timeoutClientData int + timeoutMemberConnect int + timeoutMemberData int + timeoutTCPInspect int + allowedCIDR []string + enableMonitor bool + flavorID string + availabilityZone string + tlsContainerRef string + lbID string + lbName string + supportLBTags bool + healthCheckNodePort int + healthMonitorDelay int + healthMonitorTimeout int + healthMonitorMaxRetries int + healthMonitorMaxRetriesDown int + preferredIPFamily corev1.IPFamily // preferred (the first) IP family indicated in service's `spec.ipFamilies` } type listenerKey struct { @@ -413,38 +197,6 @@ func popListener(existingListeners []listeners.Listener, id string) []listeners. return newListeners } -func getSecurityGroupName(service *corev1.Service) string { - securityGroupName := fmt.Sprintf("lb-sg-%s-%s-%s", service.UID, service.Namespace, service.Name) - //OpenStack requires that the name of a security group is shorter than 255 bytes. - if len(securityGroupName) > 255 { - securityGroupName = securityGroupName[:255] - } - - return securityGroupName -} - -func getSecurityGroupRules(client *gophercloud.ServiceClient, opts rules.ListOpts) ([]rules.SecGroupRule, error) { - var securityRules []rules.SecGroupRule - - mc := metrics.NewMetricContext("security_group_rule", "list") - pager := rules.List(client, opts) - - err := pager.EachPage(func(page pagination.Page) (bool, error) { - ruleList, err := rules.ExtractRules(page) - if err != nil { - return false, err - } - securityRules = append(securityRules, ruleList...) - return true, nil - }) - - if mc.ObserveRequest(err) != nil { - return nil, err - } - - return securityRules, nil -} - func getListenerProtocol(protocol corev1.Protocol, svcConf *serviceConfig) listeners.Protocol { // Make neutron-lbaas code work if svcConf != nil { @@ -465,7 +217,7 @@ func getListenerProtocol(protocol corev1.Protocol, svcConf *serviceConfig) liste } } -func (lbaas *LbaasV2) createFullyPopulatedOctaviaLoadBalancer(name, clusterName string, service *corev1.Service, nodes []*corev1.Node, svcConf *serviceConfig) (*loadbalancers.LoadBalancer, error) { +func (lbaas *LbaasV2) createOctaviaLoadBalancer(name, clusterName string, service *corev1.Service, nodes []*corev1.Node, svcConf *serviceConfig) (*loadbalancers.LoadBalancer, error) { createOpts := loadbalancers.CreateOpts{ Name: name, Description: fmt.Sprintf("Kubernetes external service %s/%s from cluster %s", service.Namespace, service.Name, clusterName), @@ -511,28 +263,27 @@ func (lbaas *LbaasV2) createFullyPopulatedOctaviaLoadBalancer(name, clusterName createOpts.VipAddress = loadBalancerIP } - for portIndex, port := range service.Spec.Ports { - listenerCreateOpt := lbaas.buildListenerCreateOpt(port, svcConf) - listenerCreateOpt.Name = cutString(fmt.Sprintf("listener_%d_%s", portIndex, name)) - members, newMembers, err := lbaas.buildBatchUpdateMemberOpts(port, nodes, svcConf) - if err != nil { - return nil, err - } - poolCreateOpt := lbaas.buildPoolCreateOpt(string(listenerCreateOpt.Protocol), service, svcConf) - poolCreateOpt.Members = members - // Pool name must be provided to create fully populated loadbalancer - poolCreateOpt.Name = cutString(fmt.Sprintf("pool_%d_%s", portIndex, name)) - var withHealthMonitor string - if svcConf.enableMonitor { - opts := lbaas.buildMonitorCreateOpts(svcConf, port) - opts.Name = cutString(fmt.Sprintf("monitor_%d_%s", port.Port, name)) - poolCreateOpt.Monitor = &opts - withHealthMonitor = " with healthmonitor" - } + if !lbaas.opts.ProviderRequiresSerialAPICalls { + for portIndex, port := range service.Spec.Ports { + listenerCreateOpt := lbaas.buildListenerCreateOpt(port, svcConf, cpoutil.Sprintf255(listenerFormat, portIndex, name)) + members, newMembers, err := lbaas.buildBatchUpdateMemberOpts(port, nodes, svcConf) + if err != nil { + return nil, err + } + poolCreateOpt := lbaas.buildPoolCreateOpt(string(listenerCreateOpt.Protocol), service, svcConf, cpoutil.Sprintf255(poolFormat, portIndex, name)) + poolCreateOpt.Members = members + // Pool name must be provided to create fully populated loadbalancer + var withHealthMonitor string + if svcConf.enableMonitor { + opts := lbaas.buildMonitorCreateOpts(svcConf, port, cpoutil.Sprintf255(monitorFormat, portIndex, name)) + poolCreateOpt.Monitor = &opts + withHealthMonitor = " with healthmonitor" + } - listenerCreateOpt.DefaultPool = &poolCreateOpt - createOpts.Listeners = append(createOpts.Listeners, listenerCreateOpt) - klog.V(2).Infof("Loadbalancer %s: adding pool%s using protocol %s with %d members", name, withHealthMonitor, poolCreateOpt.Protocol, len(newMembers)) + listenerCreateOpt.DefaultPool = &poolCreateOpt + createOpts.Listeners = append(createOpts.Listeners, listenerCreateOpt) + klog.V(2).Infof("Loadbalancer %s: adding pool%s using protocol %s with %d members", name, withHealthMonitor, poolCreateOpt.Protocol, len(newMembers)) + } } mc := metrics.NewMetricContext("loadbalancer", "create") @@ -551,6 +302,14 @@ func (lbaas *LbaasV2) createFullyPopulatedOctaviaLoadBalancer(name, clusterName } if loadbalancer, err = openstackutil.WaitActiveAndGetLoadBalancer(lbaas.lb, loadbalancer.ID); err != nil { + if loadbalancer != nil && loadbalancer.ProvisioningStatus == errorStatus { + // If LB landed in ERROR state we should delete it and retry the creation later. + if err = lbaas.deleteLoadBalancer(loadbalancer, service, svcConf, true); err != nil { + return nil, fmt.Errorf("loadbalancer %s is in ERROR state and there was an error when removing it: %v", loadbalancer.ID, err) + } + return nil, fmt.Errorf("loadbalancer %s has gone into ERROR state, please check Octavia for details. Load balancer was "+ + "deleted and its creation will be retried", loadbalancer.ID) + } return nil, err } @@ -596,8 +355,7 @@ func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, s // GetLoadBalancerName returns the constructed load balancer name. func (lbaas *LbaasV2) GetLoadBalancerName(_ context.Context, clusterName string, service *corev1.Service) string { - name := fmt.Sprintf("%s%s_%s_%s", servicePrefix, clusterName, service.Namespace, service.Name) - return cutString(name) + return cpoutil.Sprintf255(lbFormat, servicePrefix, clusterName, service.Namespace, service.Name) } // getLoadBalancerLegacyName returns the legacy load balancer name for backward compatibility. @@ -605,15 +363,6 @@ func (lbaas *LbaasV2) getLoadBalancerLegacyName(_ context.Context, _ string, ser return cloudprovider.DefaultLoadBalancerName(service) } -// cutString makes sure the string length doesn't exceed 255, which is usually the maximum string length in OpenStack. -func cutString(original string) string { - ret := original - if len(original) > 255 { - ret = original[:255] - } - return ret -} - // The LB needs to be configured with instance addresses on the same // subnet as the LB (aka opts.SubnetID). Currently, we're just // guessing that the node's InternalIP is the right address. @@ -686,6 +435,7 @@ func getIntFromServiceAnnotation(service *corev1.Service, annotationKey string, } // getBoolFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's boolean value or a specified defaultSetting +// If the annotation is not found or is not a valid boolean ("true" or "false"), it falls back to the defaultSetting and logs a message accordingly. func getBoolFromServiceAnnotation(service *corev1.Service, annotationKey string, defaultSetting bool) bool { klog.V(4).Infof("getBoolFromServiceAnnotation(%s/%s, %v, %v)", service.Namespace, service.Name, annotationKey, defaultSetting) if annotationValue, ok := service.Annotations[annotationKey]; ok { @@ -696,7 +446,8 @@ func getBoolFromServiceAnnotation(service *corev1.Service, annotationKey string, case "false": returnValue = false default: - returnValue = defaultSetting + klog.Infof("Found a non-boolean Service Annotation: %v = %v (falling back to default setting: %v)", annotationKey, annotationValue, defaultSetting) + return defaultSetting } klog.V(4).Infof("Found a Service Annotation: %v = %v", annotationKey, returnValue) @@ -707,24 +458,24 @@ func getBoolFromServiceAnnotation(service *corev1.Service, annotationKey string, } // getSubnetIDForLB returns subnet-id for a specific node -func getSubnetIDForLB(compute *gophercloud.ServiceClient, node corev1.Node, preferredIPFamily corev1.IPFamily) (string, error) { +func getSubnetIDForLB(network *gophercloud.ServiceClient, node corev1.Node, preferredIPFamily corev1.IPFamily) (string, error) { ipAddress, err := nodeAddressForLB(&node, preferredIPFamily) if err != nil { return "", err } - instanceID := node.Spec.ProviderID - if ind := strings.LastIndex(instanceID, "/"); ind >= 0 { - instanceID = instanceID[(ind + 1):] + _, instanceID, err := instanceIDFromProviderID(node.Spec.ProviderID) + if err != nil { + return "", fmt.Errorf("can't determine instance ID from ProviderID when autodetecting LB subnet: %w", err) } - interfaces, err := getAttachedInterfacesByID(compute, instanceID) + ports, err := getAttachedPorts(network, instanceID) if err != nil { return "", err } - for _, intf := range interfaces { - for _, fixedIP := range intf.FixedIPs { + for _, port := range ports { + for _, fixedIP := range port.FixedIPs { if fixedIP.IPAddress == ipAddress { return fixedIP.SubnetID, nil } @@ -734,103 +485,13 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node corev1.Node, pref return "", cpoerrors.ErrNotFound } -// applyNodeSecurityGroupIDForLB associates the security group with all the ports on the nodes. -func applyNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, network *gophercloud.ServiceClient, nodes []*corev1.Node, sg string) error { - for _, node := range nodes { - nodeName := types.NodeName(node.Name) - srv, err := getServerByName(compute, nodeName) - if err != nil { - return err - } - - listOpts := neutronports.ListOpts{DeviceID: srv.ID} - allPorts, err := openstackutil.GetPorts(network, listOpts) - if err != nil { - return err - } - - for _, port := range allPorts { - // If the Security Group is already present on the port, skip it. - // As soon as this only supports Go 1.18, this can be replaces by - // slices.Contains. - if func() bool { - for _, currentSG := range port.SecurityGroups { - if currentSG == sg { - return true - } - } - return false - }() { - continue - } - - newSGs := append(port.SecurityGroups, sg) - updateOpts := neutronports.UpdateOpts{SecurityGroups: &newSGs} - mc := metrics.NewMetricContext("port", "update") - res := neutronports.Update(network, port.ID, updateOpts) - if mc.ObserveRequest(res.Err) != nil { - return fmt.Errorf("failed to update security group for port %s: %v", port.ID, res.Err) - } - // Add the security group ID as a tag to the port in order to find all these ports when removing the security group. - mc = metrics.NewMetricContext("port_tag", "add") - err := neutrontags.Add(network, "ports", port.ID, sg).ExtractErr() - if mc.ObserveRequest(err) != nil { - return fmt.Errorf("failed to add tag %s to port %s: %v", sg, port.ID, err) - } - } - } - - return nil -} - -// disassociateSecurityGroupForLB removes the given security group from the ports -func disassociateSecurityGroupForLB(network *gophercloud.ServiceClient, sg string) error { - // Find all the ports that have the security group associated. - listOpts := neutronports.ListOpts{TagsAny: sg} - allPorts, err := openstackutil.GetPorts(network, listOpts) - if err != nil { - return err - } - - // Disassocate security group and remove the tag. - for _, port := range allPorts { - existingSGs := sets.NewString() - for _, sgID := range port.SecurityGroups { - existingSGs.Insert(sgID) - } - existingSGs.Delete(sg) - - // Update port security groups - newSGs := existingSGs.List() - updateOpts := neutronports.UpdateOpts{SecurityGroups: &newSGs} - mc := metrics.NewMetricContext("port", "update") - res := neutronports.Update(network, port.ID, updateOpts) - if mc.ObserveRequest(res.Err) != nil { - return fmt.Errorf("failed to update security group for port %s: %v", port.ID, res.Err) - } - // Remove the security group ID tag from the port. - mc = metrics.NewMetricContext("port_tag", "delete") - err := neutrontags.Delete(network, "ports", port.ID, sg).ExtractErr() - if mc.ObserveRequest(err) != nil { - return fmt.Errorf("failed to remove tag %s to port %s: %v", sg, port.ID, res.Err) +// isPortMember returns true if IP and subnetID are one of the FixedIPs on the port +func isPortMember(port PortWithPortSecurity, IP string, subnetID string) bool { + for _, fixedIP := range port.FixedIPs { + if (subnetID == "" || subnetID == fixedIP.SubnetID) && IP == fixedIP.IPAddress { + return true } } - - return nil -} - -// isSecurityGroupNotFound return true while 'err' is object of gophercloud.ErrResourceNotFound -func isSecurityGroupNotFound(err error) bool { - errType := reflect.TypeOf(err).String() - errTypeSlice := strings.Split(errType, ".") - errTypeValue := "" - if len(errTypeSlice) != 0 { - errTypeValue = errTypeSlice[len(errTypeSlice)-1] - } - if errTypeValue == "ErrResourceNotFound" { - return true - } - return false } @@ -903,7 +564,7 @@ func (lbaas *LbaasV2) createFloatingIP(msg string, floatIPOpts floatingips.Creat floatIP, err := floatingips.Create(lbaas.network, floatIPOpts).Extract() err = PreserveGopherError(err) if mc.ObserveRequest(err) != nil { - return floatIP, fmt.Errorf("error creating LB floatingip: %s", err) + return floatIP, fmt.Errorf("error creating LB floatingip: %v", err) } return floatIP, err } @@ -913,7 +574,7 @@ func (lbaas *LbaasV2) updateFloatingIP(floatingip *floatingips.FloatingIP, portI PortID: portID, } if portID != nil { - klog.V(4).Infof("Attaching floating ip %q to loadbalancer port %q", floatingip.FloatingIP, portID) + klog.V(4).Infof("Attaching floating ip %q to loadbalancer port %q", floatingip.FloatingIP, *portID) } else { klog.V(4).Infof("Detaching floating ip %q from port %q", floatingip.FloatingIP, floatingip.PortID) } @@ -1043,10 +704,10 @@ func (lbaas *LbaasV2) ensureFloatingIP(clusterName string, service *corev1.Servi foundSubnet = subnet break } - klog.V(2).Infof("cannot use subnet %s: %s", subnet.Name, err) + klog.V(2).Infof("cannot use subnet %s: %v", subnet.Name, err) } if err != nil { - return "", fmt.Errorf("no free subnet matching %q found for network %s (last error %s)", + return "", fmt.Errorf("no free subnet matching %q found for network %s (last error %v)", svcConf.lbPublicSubnetSpec, svcConf.lbPublicNetworkID, err) } klog.V(2).Infof("Successfully created floating IP %s for loadbalancer %s on subnet %s(%s)", floatIP.FloatingIP, lb.ID, foundSubnet.Name, foundSubnet.ID) @@ -1061,9 +722,10 @@ func (lbaas *LbaasV2) ensureFloatingIP(clusterName string, service *corev1.Servi } klog.V(2).Infof("Successfully created floating IP %s for loadbalancer %s", floatIP.FloatingIP, lb.ID) } - } else { - klog.Warningf("Floating network configuration not provided for Service %s, forcing to ensure an internal load balancer service", serviceName) + msg := "Floating network configuration not provided for Service %s, forcing to ensure an internal load balancer service" + lbaas.eventRecorder.Eventf(service, corev1.EventTypeWarning, eventLBForceInternal, msg, serviceName) + klog.Warningf(msg, serviceName) } } @@ -1077,51 +739,56 @@ func (lbaas *LbaasV2) ensureFloatingIP(clusterName string, service *corev1.Servi func (lbaas *LbaasV2) ensureOctaviaHealthMonitor(lbID string, name string, pool *v2pools.Pool, port corev1.ServicePort, svcConf *serviceConfig) error { monitorID := pool.MonitorID - if monitorID != "" { - monitor, err := openstackutil.GetHealthMonitor(lbaas.lb, monitorID) - if err != nil { - return err - } - //Recreate health monitor with correct protocol if externalTrafficPolicy was changed - createOpts := lbaas.buildMonitorCreateOpts(svcConf, port) - if createOpts.Type != monitor.Type { - klog.InfoS("Recreating health monitor for the pool", "pool", pool.ID, "oldMonitor", monitorID) - if err := openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, lbID); err != nil { - return err - } - monitorID = "" - } - if svcConf.healthMonitorDelay != monitor.Delay || svcConf.healthMonitorTimeout != monitor.Timeout || svcConf.healthMonitorMaxRetries != monitor.MaxRetries { - updateOpts := v2monitors.UpdateOpts{ - Delay: svcConf.healthMonitorDelay, - Timeout: svcConf.healthMonitorTimeout, - MaxRetries: svcConf.healthMonitorMaxRetries, - } - klog.Infof("Updating health monitor %s updateOpts %+v", monitorID, updateOpts) - if err := openstackutil.UpdateHealthMonitor(lbaas.lb, monitorID, updateOpts); err != nil { - return err - } + if monitorID == "" { + // do nothing + if !svcConf.enableMonitor { + return nil } - } - if monitorID == "" && svcConf.enableMonitor { + + // a new monitor must be created klog.V(2).Infof("Creating monitor for pool %s", pool.ID) + createOpts := lbaas.buildMonitorCreateOpts(svcConf, port, name) + return lbaas.createOctaviaHealthMonitor(createOpts, pool.ID, lbID) + } - createOpts := lbaas.buildMonitorCreateOpts(svcConf, port) - // Populate PoolID, attribute is omitted for consumption of the createOpts for fully populated Loadbalancer - createOpts.PoolID = pool.ID - createOpts.Name = name - monitor, err := openstackutil.CreateHealthMonitor(lbaas.lb, createOpts, lbID) - if err != nil { - return err - } - monitorID = monitor.ID - klog.Infof("Health monitor %s for pool %s created.", monitorID, pool.ID) - } else if monitorID != "" && !svcConf.enableMonitor { + // an existing monitor must be deleted + if !svcConf.enableMonitor { klog.Infof("Deleting health monitor %s for pool %s", monitorID, pool.ID) + return openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, lbID) + } + + // get an existing monitor status + monitor, err := openstackutil.GetHealthMonitor(lbaas.lb, monitorID) + if err != nil { + // return err on 404 is ok, since we get monitorID dynamically from the pool + return err + } + // recreate health monitor with a new type + createOpts := lbaas.buildMonitorCreateOpts(svcConf, port, name) + if createOpts.Type != monitor.Type { + klog.InfoS("Recreating health monitor for the pool", "pool", pool.ID, "oldMonitor", monitorID) if err := openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, lbID); err != nil { return err } + return lbaas.createOctaviaHealthMonitor(createOpts, pool.ID, lbID) + } + + // update new monitor parameters + if name != monitor.Name || + svcConf.healthMonitorDelay != monitor.Delay || + svcConf.healthMonitorTimeout != monitor.Timeout || + svcConf.healthMonitorMaxRetries != monitor.MaxRetries || + svcConf.healthMonitorMaxRetriesDown != monitor.MaxRetriesDown { + updateOpts := v2monitors.UpdateOpts{ + Name: &name, + Delay: svcConf.healthMonitorDelay, + Timeout: svcConf.healthMonitorTimeout, + MaxRetries: svcConf.healthMonitorMaxRetries, + MaxRetriesDown: svcConf.healthMonitorMaxRetriesDown, + } + klog.Infof("Updating health monitor %s updateOpts %+v", monitorID, updateOpts) + return openstackutil.UpdateHealthMonitor(lbaas.lb, monitorID, updateOpts, lbID) } return nil @@ -1131,7 +798,9 @@ func (lbaas *LbaasV2) canUseHTTPMonitor(port corev1.ServicePort) bool { if lbaas.opts.LBProvider == "ovn" { // ovn-octavia-provider doesn't support HTTP monitors at all. We got to avoid creating it with ovn. return false - } else if port.Protocol == corev1.ProtocolUDP { + } + + if port.Protocol == corev1.ProtocolUDP { // Older Octavia versions or OVN provider doesn't support HTTP monitors on UDP pools. We got to check if that's the case. return openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureHTTPMonitorsOnUDP, lbaas.opts.LBProvider) } @@ -1140,12 +809,14 @@ func (lbaas *LbaasV2) canUseHTTPMonitor(port corev1.ServicePort) bool { } // buildMonitorCreateOpts returns a v2monitors.CreateOpts without PoolID for consumption of both, fully popuplated Loadbalancers and Monitors. -func (lbaas *LbaasV2) buildMonitorCreateOpts(svcConf *serviceConfig, port corev1.ServicePort) v2monitors.CreateOpts { +func (lbaas *LbaasV2) buildMonitorCreateOpts(svcConf *serviceConfig, port corev1.ServicePort, name string) v2monitors.CreateOpts { opts := v2monitors.CreateOpts{ - Type: string(port.Protocol), - Delay: svcConf.healthMonitorDelay, - Timeout: svcConf.healthMonitorTimeout, - MaxRetries: svcConf.healthMonitorMaxRetries, + Name: name, + Type: string(port.Protocol), + Delay: svcConf.healthMonitorDelay, + Timeout: svcConf.healthMonitorTimeout, + MaxRetries: svcConf.healthMonitorMaxRetries, + MaxRetriesDown: svcConf.healthMonitorMaxRetriesDown, } if port.Protocol == corev1.ProtocolUDP { opts.Type = "UDP-CONNECT" @@ -1159,6 +830,18 @@ func (lbaas *LbaasV2) buildMonitorCreateOpts(svcConf *serviceConfig, port corev1 return opts } +func (lbaas *LbaasV2) createOctaviaHealthMonitor(createOpts v2monitors.CreateOpts, poolID, lbID string) error { + // populate PoolID, attribute is omitted for consumption of the createOpts for fully populated Loadbalancer + createOpts.PoolID = poolID + monitor, err := openstackutil.CreateHealthMonitor(lbaas.lb, createOpts, lbID) + if err != nil { + return err + } + klog.Infof("Health monitor %s for pool %s created.", monitor.ID, poolID) + + return nil +} + // Make sure the pool is created for the Service, nodes are added as pool members. func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *listeners.Listener, service *corev1.Service, port corev1.ServicePort, nodes []*corev1.Node, svcConf *serviceConfig) (*v2pools.Pool, error) { pool, err := openstackutil.GetPoolByListener(lbaas.lb, lbID, listener.ID) @@ -1186,9 +869,8 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list } if pool == nil { - createOpt := lbaas.buildPoolCreateOpt(listener.Protocol, service, svcConf) + createOpt := lbaas.buildPoolCreateOpt(listener.Protocol, service, svcConf, name) createOpt.ListenerID = listener.ID - createOpt.Name = name klog.InfoS("Creating pool", "listenerID", listener.ID, "protocol", createOpt.Protocol) pool, err = openstackutil.CreatePool(lbaas.lb, createOpt, lbID) @@ -1198,6 +880,16 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list klog.V(2).Infof("Pool %s created for listener %s", pool.ID, listener.ID) } + if lbaas.opts.ProviderRequiresSerialAPICalls { + klog.V(2).Infof("Using serial API calls to update members for pool %s", pool.ID) + var nodePort int = int(port.NodePort) + + if err := openstackutil.SeriallyReconcilePoolMembers(lbaas.lb, pool, nodePort, lbID, nodes); err != nil { + return nil, err + } + return pool, nil + } + curMembers := sets.New[string]() poolMembers, err := openstackutil.GetMembersbyPool(lbaas.lb, pool.ID) if err != nil { @@ -1223,7 +915,7 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list return pool, nil } -func (lbaas *LbaasV2) buildPoolCreateOpt(listenerProtocol string, service *corev1.Service, svcConf *serviceConfig) v2pools.CreateOpts { +func (lbaas *LbaasV2) buildPoolCreateOpt(listenerProtocol string, service *corev1.Service, svcConf *serviceConfig, name string) v2pools.CreateOpts { // By default, use the protocol of the listener poolProto := v2pools.Protocol(listenerProtocol) if svcConf.enableProxyProtocol { @@ -1250,6 +942,7 @@ func (lbaas *LbaasV2) buildPoolCreateOpt(listenerProtocol string, service *corev lbmethod := v2pools.LBMethod(lbaas.opts.LBMethod) return v2pools.CreateOpts{ + Name: name, Protocol: poolProto, LBMethod: lbmethod, Persistence: persistence, @@ -1302,9 +995,8 @@ func (lbaas *LbaasV2) ensureOctaviaListener(lbID string, name string, curListene Port: int(port.Port), }] if !isPresent { - listenerCreateOpt := lbaas.buildListenerCreateOpt(port, svcConf) + listenerCreateOpt := lbaas.buildListenerCreateOpt(port, svcConf, name) listenerCreateOpt.LoadbalancerID = lbID - listenerCreateOpt.Name = name klog.V(2).Infof("Creating listener for port %d using protocol %s", int(port.Port), listenerCreateOpt.Protocol) @@ -1389,11 +1081,10 @@ func (lbaas *LbaasV2) ensureOctaviaListener(lbID string, name string, curListene } // buildListenerCreateOpt returns listeners.CreateOpts for a specific Service port and configuration -func (lbaas *LbaasV2) buildListenerCreateOpt(port corev1.ServicePort, svcConf *serviceConfig) listeners.CreateOpts { - listenerProtocol := listeners.Protocol(port.Protocol) - +func (lbaas *LbaasV2) buildListenerCreateOpt(port corev1.ServicePort, svcConf *serviceConfig, name string) listeners.CreateOpts { listenerCreateOpt := listeners.CreateOpts{ - Protocol: listenerProtocol, + Name: name, + Protocol: listeners.Protocol(port.Protocol), ProtocolPort: int(port.Port), ConnLimit: &svcConf.connLimit, } @@ -1426,14 +1117,16 @@ func (lbaas *LbaasV2) buildListenerCreateOpt(port corev1.ServicePort, svcConf *s listenerCreateOpt.Protocol = listeners.ProtocolHTTP } - if len(svcConf.allowedCIDR) > 0 { - listenerCreateOpt.AllowedCIDRs = svcConf.allowedCIDR + if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { + if len(svcConf.allowedCIDR) > 0 { + listenerCreateOpt.AllowedCIDRs = svcConf.allowedCIDR + } } return listenerCreateOpt } // getMemberSubnetID gets the configured member-subnet-id from the different possible sources. -func (lbaas *LbaasV2) getMemberSubnetID(service *corev1.Service, svcConf *serviceConfig) (string, error) { +func (lbaas *LbaasV2) getMemberSubnetID(service *corev1.Service) (string, error) { // Get Member Subnet from Service Annotation memberSubnetIDAnnotation := getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerMemberSubnetID, "") if memberSubnetIDAnnotation != "" { @@ -1532,7 +1225,7 @@ func (lbaas *LbaasV2) checkServiceUpdate(service *corev1.Service, nodes []*corev svcConf.supportLBTags = openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureTags, lbaas.opts.LBProvider) // Find subnet ID for creating members - memberSubnetID, err := lbaas.getMemberSubnetID(service, svcConf) + memberSubnetID, err := lbaas.getMemberSubnetID(service) if err != nil { return fmt.Errorf("unable to get member-subnet-id, %w", err) } @@ -1554,7 +1247,7 @@ func (lbaas *LbaasV2) checkServiceUpdate(service *corev1.Service, nodes []*corev } else { svcConf.lbMemberSubnetID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerSubnetID, lbaas.opts.SubnetID) if len(svcConf.lbMemberSubnetID) == 0 && len(nodes) > 0 { - subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0], svcConf.preferredIPFamily) + subnetID, err := getSubnetIDForLB(lbaas.network, *nodes[0], svcConf.preferredIPFamily) if err != nil { return fmt.Errorf("no subnet-id found for service %s: %v", serviceName, err) } @@ -1580,6 +1273,7 @@ func (lbaas *LbaasV2) checkServiceUpdate(service *corev1.Service, nodes []*corev svcConf.healthMonitorDelay = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerHealthMonitorDelay, int(lbaas.opts.MonitorDelay.Duration.Seconds())) svcConf.healthMonitorTimeout = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerHealthMonitorTimeout, int(lbaas.opts.MonitorTimeout.Duration.Seconds())) svcConf.healthMonitorMaxRetries = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerHealthMonitorMaxRetries, int(lbaas.opts.MonitorMaxRetries)) + svcConf.healthMonitorMaxRetriesDown = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerHealthMonitorMaxRetriesDown, int(lbaas.opts.MonitorMaxRetriesDown)) return nil } @@ -1635,16 +1329,30 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node "initialized and default-tls-container-ref %q is set", svcConf.tlsContainerRef) } - // check if container exists for 'barbican' container store - // tls container ref has the format: https://{keymanager_host}/v1/containers/{uuid} + // check if container or secret exists for 'barbican' container store + // tls container ref has the format: https://{keymanager_host}/v1/containers/{uuid} or https://{keymanager_host}/v1/secrets/{uuid} if lbaas.opts.ContainerStore == "barbican" { slice := strings.Split(svcConf.tlsContainerRef, "/") - containerID := slice[len(slice)-1] - container, err := containers.Get(lbaas.secret, containerID).Extract() - if err != nil { - return fmt.Errorf("failed to get tls container %q: %v", svcConf.tlsContainerRef, err) + if len(slice) < 2 { + return fmt.Errorf("invalid tlsContainerRef for service %s", serviceName) + } + barbicanUUID := slice[len(slice)-1] + barbicanType := slice[len(slice)-2] + if barbicanType == "containers" { + container, err := containers.Get(lbaas.secret, barbicanUUID).Extract() + if err != nil { + return fmt.Errorf("failed to get tls container %q: %v", svcConf.tlsContainerRef, err) + } + klog.V(4).Infof("Default TLS container %q found", container.ContainerRef) + } else if barbicanType == "secrets" { + secret, err := secrets.Get(lbaas.secret, barbicanUUID).Extract() + if err != nil { + return fmt.Errorf("failed to get tls secret %q: %v", svcConf.tlsContainerRef, err) + } + klog.V(4).Infof("Default TLS secret %q found", secret.SecretRef) + } else { + return fmt.Errorf("failed to validate tlsContainerRef for service %s: tlsContainerRef type %s unknown", serviceName, barbicanType) } - klog.V(4).Infof("Default TLS container %q found", container.ContainerRef) } } @@ -1668,7 +1376,7 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node svcConf.lbMemberSubnetID = svcConf.lbSubnetID } if len(svcConf.lbNetworkID) == 0 && len(svcConf.lbSubnetID) == 0 { - subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0], svcConf.preferredIPFamily) + subnetID, err := getSubnetIDForLB(lbaas.network, *nodes[0], svcConf.preferredIPFamily) if err != nil { return fmt.Errorf("failed to get subnet to create load balancer for service %s: %v", serviceName, err) } @@ -1678,7 +1386,7 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node // Override the specific member-subnet-id, if explictly configured. // Otherwise use subnet-id. - memberSubnetID, err := lbaas.getMemberSubnetID(service, svcConf) + memberSubnetID, err := lbaas.getMemberSubnetID(service) if err != nil { return fmt.Errorf("unable to get member-subnet-id, %w", err) } @@ -1711,22 +1419,22 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node } } + // If LB class doesn't define FIP network or subnet, get it from svc annotation or fall back to configuration if floatingNetworkID == "" { floatingNetworkID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerFloatingNetworkID, lbaas.opts.FloatingNetworkID) - if floatingNetworkID == "" { - var err error - floatingNetworkID, err = openstackutil.GetFloatingNetworkID(lbaas.network) - if err != nil { - klog.Warningf("Failed to find floating-network-id for Service %s: %v", serviceName, err) - } - } } - // apply defaults from CCM config + // If there's no annotation and configuration, try to autodetect the FIP network by looking up external nets if floatingNetworkID == "" { - floatingNetworkID = lbaas.opts.FloatingNetworkID + floatingNetworkID, err = openstackutil.GetFloatingNetworkID(lbaas.network) + if err != nil { + msg := "Failed to find floating-network-id for Service %s: %v" + lbaas.eventRecorder.Eventf(service, corev1.EventTypeWarning, eventLBExternalNetworkSearchFailed, msg, serviceName, err) + klog.Warningf(msg, serviceName, err) + } } + // try to get FIP subnet from configuration if !floatingSubnet.Configured() { annos := floatingSubnetSpec{} annos.subnetID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerFloatingSubnetID, "") @@ -1745,7 +1453,7 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node } } - // check subnets belongs to network + // check configured subnet belongs to network if floatingNetworkID != "" && floatingSubnet.subnetID != "" { mc := metrics.NewMetricContext("subnet", "get") subnet, err := subnets.Get(lbaas.network, floatingSubnet.subnetID).Extract() @@ -1784,18 +1492,21 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node svcConf.timeoutTCPInspect = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerTimeoutTCPInspect, 0) } - var listenerAllowedCIDRs []string sourceRanges, err := GetLoadBalancerSourceRanges(service, svcConf.preferredIPFamily) if err != nil { return fmt.Errorf("failed to get source ranges for loadbalancer service %s: %v", serviceName, err) } if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { klog.V(4).Info("LoadBalancerSourceRanges is suppported") - listenerAllowedCIDRs = sourceRanges.StringSlice() + svcConf.allowedCIDR = sourceRanges.StringSlice() + } else if lbaas.opts.LBProvider == "ovn" && lbaas.opts.ManageSecurityGroups { + klog.V(4).Info("LoadBalancerSourceRanges will be enforced on the SG created and attached to LB members") + svcConf.allowedCIDR = sourceRanges.StringSlice() } else { - klog.Warning("LoadBalancerSourceRanges is ignored") + msg := "LoadBalancerSourceRanges are ignored for Service %s because Octavia provider does not support it" + lbaas.eventRecorder.Eventf(service, corev1.EventTypeWarning, eventLBSourceRangesIgnored, msg, serviceName) + klog.Warningf(msg, serviceName) } - svcConf.allowedCIDR = listenerAllowedCIDRs if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureFlavors, lbaas.opts.LBProvider) { svcConf.flavorID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerFlavorID, lbaas.opts.FlavorID) @@ -1805,7 +1516,9 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureAvailabilityZones, lbaas.opts.LBProvider) { svcConf.availabilityZone = availabilityZone } else if availabilityZone != "" { - klog.Warning("LoadBalancer Availability Zones aren't supported. Please, upgrade Octavia API to version 2.14 or later (Ussuri release) to use them") + msg := "LoadBalancer Availability Zones aren't supported. Please, upgrade Octavia API to version 2.14 or later (Ussuri release) to use them for Service %s" + lbaas.eventRecorder.Eventf(service, corev1.EventTypeWarning, eventLBAZIgnored, msg, serviceName) + klog.Warningf(msg, serviceName) } svcConf.enableMonitor = getBoolFromServiceAnnotation(service, ServiceAnnotationLoadBalancerEnableHealthMonitor, lbaas.opts.CreateMonitor) @@ -1815,6 +1528,7 @@ func (lbaas *LbaasV2) checkService(service *corev1.Service, nodes []*corev1.Node svcConf.healthMonitorDelay = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerHealthMonitorDelay, int(lbaas.opts.MonitorDelay.Duration.Seconds())) svcConf.healthMonitorTimeout = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerHealthMonitorTimeout, int(lbaas.opts.MonitorTimeout.Duration.Seconds())) svcConf.healthMonitorMaxRetries = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerHealthMonitorMaxRetries, int(lbaas.opts.MonitorMaxRetries)) + svcConf.healthMonitorMaxRetriesDown = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerHealthMonitorMaxRetriesDown, int(lbaas.opts.MonitorMaxRetriesDown)) return nil } @@ -1837,25 +1551,6 @@ func (lbaas *LbaasV2) checkListenerPorts(service *corev1.Service, curListenerMap return nil } -func newServicePatcher(kclient kubernetes.Interface, base *corev1.Service) servicePatcher { - return servicePatcher{ - kclient: kclient, - base: base.DeepCopy(), - updated: base, - } -} - -// Patch will submit a patch request for the Service unless the updated service -// reference contains the same set of annotations as the base copied during -// servicePatcher initialization. -func (sp *servicePatcher) Patch(ctx context.Context, err error) error { - if reflect.DeepEqual(sp.base.Annotations, sp.updated.Annotations) { - return err - } - perr := cpoutil.PatchService(ctx, sp.kclient, sp.base, sp.updated) - return utilerrors.NewAggregate([]error{err, perr}) -} - func (lbaas *LbaasV2) updateServiceAnnotations(service *corev1.Service, annotations map[string]string) { if service.ObjectMeta.Annotations == nil { service.ObjectMeta.Annotations = map[string]string{} @@ -1951,9 +1646,8 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName if err != cpoerrors.ErrNotFound { return nil, fmt.Errorf("error getting loadbalancer for Service %s: %v", serviceName, err) } - - klog.InfoS("Creating fully populated loadbalancer", "lbName", lbName, "service", klog.KObj(service)) - loadbalancer, err = lbaas.createFullyPopulatedOctaviaLoadBalancer(lbName, clusterName, service, nodes, svcConf) + klog.InfoS("Creating loadbalancer", "lbName", lbName, "service", klog.KObj(service)) + loadbalancer, err = lbaas.createOctaviaLoadBalancer(lbName, clusterName, service, nodes, svcConf) if err != nil { return nil, fmt.Errorf("error creating loadbalancer %s: %v", lbName, err) } @@ -1974,8 +1668,9 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName klog.V(4).InfoS("Load balancer ensured", "lbID", loadbalancer.ID, "isLBOwner", isLBOwner, "createNewLB", createNewLB) - // This is an existing load balancer, either created by occm for other Services or by the user outside of cluster. - if !createNewLB { + // This is an existing load balancer, either created by occm for other Services or by the user outside of cluster, or + // a newly created, unpopulated loadbalancer that needs populating. + if !createNewLB || (lbaas.opts.ProviderRequiresSerialAPICalls && createNewLB) { curListeners := loadbalancer.Listeners curListenerMapping := make(map[listenerKey]*listeners.Listener) for i, l := range curListeners { @@ -1990,17 +1685,17 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName } for portIndex, port := range service.Spec.Ports { - listener, err := lbaas.ensureOctaviaListener(loadbalancer.ID, cutString(fmt.Sprintf("listener_%d_%s", portIndex, lbName)), curListenerMapping, port, svcConf, service) + listener, err := lbaas.ensureOctaviaListener(loadbalancer.ID, cpoutil.Sprintf255(listenerFormat, portIndex, lbName), curListenerMapping, port, svcConf, service) if err != nil { return nil, err } - pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cutString(fmt.Sprintf("pool_%d_%s", portIndex, lbName)), listener, service, port, nodes, svcConf) + pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, lbName), listener, service, port, nodes, svcConf) if err != nil { return nil, err } - if err := lbaas.ensureOctaviaHealthMonitor(loadbalancer.ID, cutString(fmt.Sprintf("monitor_%d_%s", portIndex, lbName)), pool, port, svcConf); err != nil { + if err := lbaas.ensureOctaviaHealthMonitor(loadbalancer.ID, cpoutil.Sprintf255(monitorFormat, portIndex, lbName), pool, port, svcConf); err != nil { return nil, err } @@ -2042,10 +1737,16 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName status := lbaas.createLoadBalancerStatus(service, svcConf, addr) if lbaas.opts.ManageSecurityGroups { - err := lbaas.ensureSecurityGroup(clusterName, service, nodes, loadbalancer, svcConf.preferredIPFamily, svcConf.lbMemberSubnetID) + err := lbaas.ensureAndUpdateOctaviaSecurityGroup(clusterName, service, nodes, svcConf) if err != nil { return status, fmt.Errorf("failed when reconciling security groups for LB service %v/%v: %v", service.Namespace, service.Name, err) } + } else { + // Attempt to delete the SG if `manage-security-groups` is disabled. When CPO is reconfigured to enable it we + // will reconcile the LB and create the SG. This is to make sure it works the same in the opposite direction. + if err := lbaas.ensureSecurityGroupDeleted(clusterName, service); err != nil { + return status, err + } } return status, nil @@ -2082,59 +1783,6 @@ func (lbaas *LbaasV2) listSubnetsForNetwork(networkID string, tweak ...TweakSubN return subs, nil } -// group, if it not present. -func (lbaas *LbaasV2) ensureSecurityRule( - direction rules.RuleDirection, - protocol rules.RuleProtocol, - etherType rules.RuleEtherType, - remoteIPPrefix, secGroupID string, - portRangeMin, portRangeMax int, -) error { - sgListopts := rules.ListOpts{ - Direction: string(direction), - Protocol: string(protocol), - PortRangeMax: portRangeMin, - PortRangeMin: portRangeMax, - RemoteIPPrefix: remoteIPPrefix, - SecGroupID: secGroupID, - } - sgRules, err := getSecurityGroupRules(lbaas.network, sgListopts) - if err != nil && !cpoerrors.IsNotFound(err) { - return fmt.Errorf( - "failed to find security group rules in %s: %v", secGroupID, err) - } - if len(sgRules) != 0 { - return nil - } - - sgRuleCreateOpts := rules.CreateOpts{ - Direction: direction, - Protocol: protocol, - PortRangeMax: portRangeMin, - PortRangeMin: portRangeMax, - RemoteIPPrefix: remoteIPPrefix, - SecGroupID: secGroupID, - EtherType: etherType, - } - - mc := metrics.NewMetricContext("security_group_rule", "create") - _, err = rules.Create(lbaas.network, sgRuleCreateOpts).Extract() - if mc.ObserveRequest(err) != nil { - return fmt.Errorf( - "failed to create rule for security group %s: %v", - secGroupID, err) - } - return nil -} - -// ensureSecurityGroup ensures security group exist for specific loadbalancer service. -// Creating security group for specific loadbalancer service when it does not exist. -func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *corev1.Service, nodes []*corev1.Node, - loadbalancer *loadbalancers.LoadBalancer, preferredIPFamily corev1.IPFamily, memberSubnetID string) error { - - return lbaas.ensureAndUpdateOctaviaSecurityGroup(clusterName, apiService, nodes, memberSubnetID) -} - func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName string, service *corev1.Service, nodes []*corev1.Node) error { svcConf := new(serviceConfig) var err error @@ -2190,18 +1838,26 @@ func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName return fmt.Errorf("loadbalancer %s does not contain required listener for port %d and protocol %s", loadbalancer.ID, port.Port, port.Protocol) } - _, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cutString(fmt.Sprintf("pool_%d_%s", portIndex, loadbalancer.Name)), &listener, service, port, nodes, svcConf) + pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, loadbalancer.Name), &listener, service, port, nodes, svcConf) + if err != nil { + return err + } + + err = lbaas.ensureOctaviaHealthMonitor(loadbalancer.ID, cpoutil.Sprintf255(monitorFormat, portIndex, loadbalancer.Name), pool, port, svcConf) if err != nil { return err } } if lbaas.opts.ManageSecurityGroups { - err := lbaas.updateSecurityGroup(clusterName, service, nodes, svcConf.lbMemberSubnetID) + err := lbaas.ensureAndUpdateOctaviaSecurityGroup(clusterName, service, nodes, svcConf) if err != nil { return fmt.Errorf("failed to update Security Group for loadbalancer service %s: %v", serviceName, err) } } + // We don't try to lookup and delete the SG here when `manage-security-group=false` as `UpdateLoadBalancer()` is + // only called on changes to the list of the Nodes. Deletion of the SG on reconfiguration will be handled by + // EnsureLoadBalancer() that is the true LB reconcile function. return nil } @@ -2213,101 +1869,6 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string return mc.ObserveReconcile(err) } -// ensureAndUpdateOctaviaSecurityGroup handles the creation and update of the security group and the securiry rules for the octavia load balancer -func (lbaas *LbaasV2) ensureAndUpdateOctaviaSecurityGroup(clusterName string, apiService *corev1.Service, nodes []*corev1.Node, memberSubnetID string) error { - // get service ports - ports := apiService.Spec.Ports - if len(ports) == 0 { - return fmt.Errorf("no ports provided to openstack load balancer") - } - - // ensure security group for LB - lbSecGroupName := getSecurityGroupName(apiService) - lbSecGroupID, err := secgroups.IDFromName(lbaas.network, lbSecGroupName) - if err != nil { - // If the security group of LB not exist, create it later - if isSecurityGroupNotFound(err) { - lbSecGroupID = "" - } else { - return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err) - } - } - if len(lbSecGroupID) == 0 { - // create security group - lbSecGroupCreateOpts := groups.CreateOpts{ - Name: lbSecGroupName, - Description: fmt.Sprintf("Security Group for %s/%s Service LoadBalancer in cluster %s", apiService.Namespace, apiService.Name, clusterName), - } - - mc := metrics.NewMetricContext("security_group", "create") - lbSecGroup, err := groups.Create(lbaas.network, lbSecGroupCreateOpts).Extract() - if mc.ObserveRequest(err) != nil { - return fmt.Errorf("failed to create Security Group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) - } - lbSecGroupID = lbSecGroup.ID - } - - mc := metrics.NewMetricContext("subnet", "get") - subnet, err := subnets.Get(lbaas.network, memberSubnetID).Extract() - if mc.ObserveRequest(err) != nil { - return fmt.Errorf( - "failed to find subnet %s from openstack: %v", memberSubnetID, err) - } - - etherType := rules.EtherType4 - if netutils.IsIPv6CIDRString(subnet.CIDR) { - etherType = rules.EtherType6 - } - - if apiService.Spec.HealthCheckNodePort != 0 { - err = lbaas.ensureSecurityRule( - rules.DirIngress, - rules.ProtocolTCP, - etherType, - subnet.CIDR, - lbSecGroupID, - int(apiService.Spec.HealthCheckNodePort), - int(apiService.Spec.HealthCheckNodePort), - ) - if err != nil { - return fmt.Errorf( - "failed to apply security rule for health check node port, %w", - err) - } - } - - // ensure rules for node security group - for _, port := range ports { - if port.NodePort == 0 { // It's 0 when AllocateLoadBalancerNodePorts=False - continue - } - err = lbaas.ensureSecurityRule( - rules.DirIngress, - rules.RuleProtocol(port.Protocol), - etherType, - subnet.CIDR, - lbSecGroupID, - int(port.NodePort), - int(port.NodePort), - ) - if err != nil { - return fmt.Errorf( - "failed to apply security rule for port %d, %w", - port.NodePort, err) - } - - if err := applyNodeSecurityGroupIDForLB(lbaas.compute, lbaas.network, nodes, lbSecGroupID); err != nil { - return err - } - } - return nil -} - -// updateSecurityGroup updating security group for specific loadbalancer service. -func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *corev1.Service, nodes []*corev1.Node, memberSubnetID string) error { - return lbaas.ensureAndUpdateOctaviaSecurityGroup(clusterName, apiService, nodes, memberSubnetID) -} - // EnsureLoadBalancerDeleted deletes the specified load balancer func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *corev1.Service) error { mc := metrics.NewMetricContext("loadbalancer", "delete") @@ -2335,6 +1896,83 @@ func (lbaas *LbaasV2) deleteFIPIfCreatedByProvider(fip *floatingips.FloatingIP, return true, nil } +// deleteLoadBalancer removes the LB and its children either by using Octavia cascade deletion or manually +func (lbaas *LbaasV2) deleteLoadBalancer(loadbalancer *loadbalancers.LoadBalancer, service *corev1.Service, svcConf *serviceConfig, needDeleteLB bool) error { + if needDeleteLB && lbaas.opts.CascadeDelete { + klog.InfoS("Deleting load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) + if err := openstackutil.DeleteLoadbalancer(lbaas.lb, loadbalancer.ID, true); err != nil { + return err + } + klog.InfoS("Deleted load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) + } else { + // get all listeners associated with this loadbalancer + listenerList, err := openstackutil.GetListenersByLoadBalancerID(lbaas.lb, loadbalancer.ID) + if err != nil { + return fmt.Errorf("error getting LB %s listeners: %v", loadbalancer.ID, err) + } + + if !needDeleteLB { + var listenersToDelete []listeners.Listener + curListenerMapping := make(map[listenerKey]*listeners.Listener) + for i, l := range listenerList { + key := listenerKey{Protocol: listeners.Protocol(l.Protocol), Port: l.ProtocolPort} + curListenerMapping[key] = &listenerList[i] + } + + for _, port := range service.Spec.Ports { + proto := getListenerProtocol(port.Protocol, svcConf) + listener, isPresent := curListenerMapping[listenerKey{ + Protocol: proto, + Port: int(port.Port), + }] + if isPresent && cpoutil.Contains(listener.Tags, svcConf.lbName) { + listenersToDelete = append(listenersToDelete, *listener) + } + } + listenerList = listenersToDelete + } + + // get all pools (and health monitors) associated with this loadbalancer + var monitorIDs []string + for _, listener := range listenerList { + pool, err := openstackutil.GetPoolByListener(lbaas.lb, loadbalancer.ID, listener.ID) + if err != nil && err != cpoerrors.ErrNotFound { + return fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) + } + if pool != nil { + if pool.MonitorID != "" { + monitorIDs = append(monitorIDs, pool.MonitorID) + } + } + } + + // delete monitors + for _, monitorID := range monitorIDs { + klog.InfoS("Deleting health monitor", "monitorID", monitorID, "lbID", loadbalancer.ID) + if err := openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, loadbalancer.ID); err != nil { + return err + } + klog.InfoS("Deleted health monitor", "monitorID", monitorID, "lbID", loadbalancer.ID) + } + + // delete listeners + if err := lbaas.deleteListeners(loadbalancer.ID, listenerList); err != nil { + return err + } + + if needDeleteLB { + // delete the loadbalancer in old way, i.e. no cascading. + klog.InfoS("Deleting load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) + if err := openstackutil.DeleteLoadbalancer(lbaas.lb, loadbalancer.ID, false); err != nil { + return err + } + klog.InfoS("Deleted load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) + } + } + + return nil +} + func (lbaas *LbaasV2) ensureLoadBalancerDeleted(ctx context.Context, clusterName string, service *corev1.Service) error { lbName := lbaas.GetLoadBalancerName(ctx, clusterName, service) legacyName := lbaas.getLoadBalancerLegacyName(ctx, clusterName, service) @@ -2348,6 +1986,7 @@ func (lbaas *LbaasV2) ensureLoadBalancerDeleted(ctx context.Context, clusterName if err := lbaas.checkServiceDelete(service, svcConf); err != nil { return err } + svcConf.lbName = lbName if svcConf.lbID != "" { loadbalancer, err = openstackutil.GetLoadbalancerByID(lbaas.lb, svcConf.lbID) @@ -2407,76 +2046,8 @@ func (lbaas *LbaasV2) ensureLoadBalancerDeleted(ctx context.Context, clusterName } } - if needDeleteLB && lbaas.opts.CascadeDelete { - klog.InfoS("Deleting load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) - if err := openstackutil.DeleteLoadbalancer(lbaas.lb, loadbalancer.ID, true); err != nil { - return err - } - klog.InfoS("Deleted load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) - } else { - // get all listeners associated with this loadbalancer - listenerList, err := openstackutil.GetListenersByLoadBalancerID(lbaas.lb, loadbalancer.ID) - if err != nil { - return fmt.Errorf("error getting LB %s listeners: %v", loadbalancer.ID, err) - } - - if !needDeleteLB { - var listenersToDelete []listeners.Listener - curListenerMapping := make(map[listenerKey]*listeners.Listener) - for i, l := range listenerList { - key := listenerKey{Protocol: listeners.Protocol(l.Protocol), Port: l.ProtocolPort} - curListenerMapping[key] = &listenerList[i] - } - - for _, port := range service.Spec.Ports { - proto := getListenerProtocol(port.Protocol, svcConf) - listener, isPresent := curListenerMapping[listenerKey{ - Protocol: proto, - Port: int(port.Port), - }] - if isPresent && cpoutil.Contains(listener.Tags, lbName) { - listenersToDelete = append(listenersToDelete, *listener) - } - } - listenerList = listenersToDelete - } - - // get all pools (and health monitors) associated with this loadbalancer - var monitorIDs []string - for _, listener := range listenerList { - pool, err := openstackutil.GetPoolByListener(lbaas.lb, loadbalancer.ID, listener.ID) - if err != nil && err != cpoerrors.ErrNotFound { - return fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) - } - if pool != nil { - if pool.MonitorID != "" { - monitorIDs = append(monitorIDs, pool.MonitorID) - } - } - } - - // delete monitors - for _, monitorID := range monitorIDs { - klog.InfoS("Deleting health monitor", "monitorID", monitorID, "lbID", loadbalancer.ID) - if err := openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, loadbalancer.ID); err != nil { - return err - } - klog.InfoS("Deleted health monitor", "monitorID", monitorID, "lbID", loadbalancer.ID) - } - - // delete listeners - if err := lbaas.deleteListeners(loadbalancer.ID, listenerList); err != nil { - return err - } - - if needDeleteLB { - // delete the loadbalancer in old way, i.e. no cascading. - klog.InfoS("Deleting load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) - if err := openstackutil.DeleteLoadbalancer(lbaas.lb, loadbalancer.ID, false); err != nil { - return err - } - klog.InfoS("Deleted load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) - } + if err = lbaas.deleteLoadBalancer(loadbalancer, service, svcConf, needDeleteLB); err != nil { + return err } // Remove the Service's tag from the load balancer. @@ -2498,86 +2069,15 @@ func (lbaas *LbaasV2) ensureLoadBalancerDeleted(ctx context.Context, clusterName klog.InfoS("Updated load balancer tags", "lbID", loadbalancer.ID) } - // Delete the Security Group - if lbaas.opts.ManageSecurityGroups { - if err := lbaas.EnsureSecurityGroupDeleted(clusterName, service); err != nil { - return err - } - } - - return nil -} - -// EnsureSecurityGroupDeleted deleting security group for specific loadbalancer service. -func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(_ string, service *corev1.Service) error { - // Generate Name - lbSecGroupName := getSecurityGroupName(service) - lbSecGroupID, err := secgroups.IDFromName(lbaas.network, lbSecGroupName) - if err != nil { - if isSecurityGroupNotFound(err) { - // It is OK when the security group has been deleted by others. - return nil - } - return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err) - } - - // Disassociate the security group from the neutron ports on the nodes. - if err := disassociateSecurityGroupForLB(lbaas.network, lbSecGroupID); err != nil { - return fmt.Errorf("failed to disassociate security group %s: %v", lbSecGroupID, err) - } - - mc := metrics.NewMetricContext("security_group", "delete") - lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID) - if lbSecGroup.Err != nil && !cpoerrors.IsNotFound(lbSecGroup.Err) { - return mc.ObserveRequest(lbSecGroup.Err) - } - _ = mc.ObserveRequest(nil) - - if len(lbaas.opts.NodeSecurityGroupIDs) == 0 { - // Just happen when nodes have not Security Group, or should not happen - // UpdateLoadBalancer and EnsureLoadBalancer can set lbaas.opts.NodeSecurityGroupIDs when it is empty - // And service controller call UpdateLoadBalancer to set lbaas.opts.NodeSecurityGroupIDs when controller manager service is restarted. - klog.Warningf("Can not find node-security-group from all the nodes of this cluster when delete loadbalancer service %s/%s", - service.Namespace, service.Name) - } else { - // Delete the rules in the Node Security Group - for _, nodeSecurityGroupID := range lbaas.opts.NodeSecurityGroupIDs { - opts := rules.ListOpts{ - SecGroupID: nodeSecurityGroupID, - RemoteGroupID: lbSecGroupID, - } - secGroupRules, err := getSecurityGroupRules(lbaas.network, opts) - - if err != nil && !cpoerrors.IsNotFound(err) { - msg := fmt.Sprintf("error finding rules for remote group id %s in security group id %s: %v", lbSecGroupID, nodeSecurityGroupID, err) - return fmt.Errorf(msg) - } - - for _, rule := range secGroupRules { - mc := metrics.NewMetricContext("security_group_rule", "delete") - res := rules.Delete(lbaas.network, rule.ID) - if res.Err != nil && !cpoerrors.IsNotFound(res.Err) { - _ = mc.ObserveRequest(res.Err) - return fmt.Errorf("error occurred deleting security group rule: %s: %v", rule.ID, res.Err) - } - _ = mc.ObserveRequest(nil) - } - } + // Delete the Security Group. We're doing that even if `manage-security-groups` is disabled to make sure we don't + // orphan created SGs even if CPO got reconfigured. + if err := lbaas.ensureSecurityGroupDeleted(clusterName, service); err != nil { + return err } return nil } -// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0 -func IsAllowAll(ipnets netsets.IPNet) bool { - for _, s := range ipnets.StringSlice() { - if s == "0.0.0.0/0" { - return true - } - } - return false -} - // GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. // If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, // extracting the source ranges to allow, and if not present returns a default (allow-all) value. diff --git a/pkg/openstack/loadbalancer_service_patcher.go b/pkg/openstack/loadbalancer_service_patcher.go new file mode 100644 index 00000000..5c3b6c15 --- /dev/null +++ b/pkg/openstack/loadbalancer_service_patcher.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +import ( + "context" + "reflect" + + corev1 "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/kubernetes" + cpoutil "k8s.io/cloud-provider-openstack/pkg/util" +) + +type servicePatcher struct { + kclient kubernetes.Interface + base *corev1.Service + updated *corev1.Service +} + +func newServicePatcher(kclient kubernetes.Interface, base *corev1.Service) servicePatcher { + return servicePatcher{ + kclient: kclient, + base: base.DeepCopy(), + updated: base, + } +} + +// Patch will submit a patch request for the Service unless the updated service +// reference contains the same set of annotations as the base copied during +// servicePatcher initialization. +func (sp *servicePatcher) Patch(ctx context.Context, err error) error { + if reflect.DeepEqual(sp.base.Annotations, sp.updated.Annotations) { + return err + } + perr := cpoutil.PatchService(ctx, sp.kclient, sp.base, sp.updated) + return utilerrors.NewAggregate([]error{err, perr}) +} diff --git a/pkg/openstack/loadbalancer_sg.go b/pkg/openstack/loadbalancer_sg.go new file mode 100644 index 00000000..f38a9d54 --- /dev/null +++ b/pkg/openstack/loadbalancer_sg.go @@ -0,0 +1,361 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +import ( + "fmt" + "strings" + + "github.com/gophercloud/gophercloud" + neutrontags "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + secgroups "github.com/gophercloud/utils/openstack/networking/v2/extensions/security/groups" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + netutils "k8s.io/utils/net" + "k8s.io/utils/strings/slices" + + "k8s.io/cloud-provider-openstack/pkg/metrics" + cpoerrors "k8s.io/cloud-provider-openstack/pkg/util/errors" + openstackutil "k8s.io/cloud-provider-openstack/pkg/util/openstack" +) + +func getSecurityGroupName(service *corev1.Service) string { + securityGroupName := fmt.Sprintf("lb-sg-%s-%s-%s", service.UID, service.Namespace, service.Name) + //OpenStack requires that the name of a security group is shorter than 255 bytes. + if len(securityGroupName) > 255 { + securityGroupName = securityGroupName[:255] + } + + return securityGroupName +} + +// applyNodeSecurityGroupIDForLB associates the security group with the ports being members of the LB on the nodes. +func applyNodeSecurityGroupIDForLB(network *gophercloud.ServiceClient, svcConf *serviceConfig, nodes []*corev1.Node, sg string) error { + for _, node := range nodes { + serverID, _, err := instanceIDFromProviderID(node.Spec.ProviderID) + if err != nil { + return fmt.Errorf("error getting server ID from the node: %w", err) + } + + addr, _ := nodeAddressForLB(node, svcConf.preferredIPFamily) + if addr == "" { + // If node has no viable address let's ignore it. + continue + } + + listOpts := neutronports.ListOpts{DeviceID: serverID} + allPorts, err := openstackutil.GetPorts[PortWithPortSecurity](network, listOpts) + if err != nil { + return err + } + + for _, port := range allPorts { + // You can't assign an SG to a port with port_security_enabled=false, skip them. + if !port.PortSecurityEnabled { + continue + } + + // If the Security Group is already present on the port, skip it. + if slices.Contains(port.SecurityGroups, sg) { + continue + } + + // Only add SGs to the port actually attached to the LB + if !isPortMember(port, addr, svcConf.lbMemberSubnetID) { + continue + } + + // Add the SG to the port + // TODO(dulek): This isn't an atomic operation. In order to protect from lost update issues we should use + // `revision_number` handling to make sure our update to `security_groups` field wasn't preceded + // by a different one. Same applies to a removal of the SG. + newSGs := append(port.SecurityGroups, sg) + updateOpts := neutronports.UpdateOpts{SecurityGroups: &newSGs} + mc := metrics.NewMetricContext("port", "update") + res := neutronports.Update(network, port.ID, updateOpts) + if mc.ObserveRequest(res.Err) != nil { + return fmt.Errorf("failed to update security group for port %s: %v", port.ID, res.Err) + } + } + } + + return nil +} + +// disassociateSecurityGroupForLB removes the given security group from the ports +func disassociateSecurityGroupForLB(network *gophercloud.ServiceClient, sg string) error { + // Find all the ports that have the security group associated. + listOpts := neutronports.ListOpts{SecurityGroups: []string{sg}} + allPorts, err := openstackutil.GetPorts[neutronports.Port](network, listOpts) + if err != nil { + return err + } + + // Disassocate security group and remove the tag. + for _, port := range allPorts { + existingSGs := sets.NewString() + for _, sgID := range port.SecurityGroups { + existingSGs.Insert(sgID) + } + existingSGs.Delete(sg) + + // Update port security groups + newSGs := existingSGs.List() + // TODO(dulek): This should be done using Neutron's revision_number to make sure + // we don't trigger a lost update issue. + updateOpts := neutronports.UpdateOpts{SecurityGroups: &newSGs} + mc := metrics.NewMetricContext("port", "update") + res := neutronports.Update(network, port.ID, updateOpts) + if mc.ObserveRequest(res.Err) != nil { + return fmt.Errorf("failed to update security group for port %s: %v", port.ID, res.Err) + } + + // Remove the security group ID tag from the port. Please note we don't tag ports with SG IDs anymore, + // so this stays for backward compatibility. It's reasonable to delete it in the future. 404s are ignored. + if slices.Contains(port.Tags, sg) { + mc = metrics.NewMetricContext("port_tag", "delete") + err := neutrontags.Delete(network, "ports", port.ID, sg).ExtractErr() + if mc.ObserveRequest(err) != nil { + return fmt.Errorf("failed to remove tag %s to port %s: %v", sg, port.ID, res.Err) + } + } + } + + return nil +} + +// group, if it not present. +func (lbaas *LbaasV2) ensureSecurityRule(sgRuleCreateOpts rules.CreateOpts) error { + mc := metrics.NewMetricContext("security_group_rule", "create") + _, err := rules.Create(lbaas.network, sgRuleCreateOpts).Extract() + if err != nil && cpoerrors.IsConflictError(err) { + // Conflict means the SG rule already exists, so ignoring that error. + klog.Warningf("Security group rule already found when trying to create it. This indicates concurrent "+ + "updates to the SG %s and is unexpected", sgRuleCreateOpts.SecGroupID) + return mc.ObserveRequest(nil) + } else if mc.ObserveRequest(err) != nil { + return fmt.Errorf("failed to create rule for security group %s: %v", sgRuleCreateOpts.SecGroupID, err) + } + return nil +} + +func compareSecurityGroupRuleAndCreateOpts(rule rules.SecGroupRule, opts rules.CreateOpts) bool { + return rule.Direction == string(opts.Direction) && + strings.EqualFold(rule.Protocol, string(opts.Protocol)) && + rule.EtherType == string(opts.EtherType) && + rule.RemoteIPPrefix == opts.RemoteIPPrefix && + rule.PortRangeMin == opts.PortRangeMin && + rule.PortRangeMax == opts.PortRangeMax +} + +func getRulesToCreateAndDelete(wantedRules []rules.CreateOpts, existingRules []rules.SecGroupRule) ([]rules.CreateOpts, []rules.SecGroupRule) { + toCreate := make([]rules.CreateOpts, 0, len(wantedRules)) // Max is all rules need creation + toDelete := make([]rules.SecGroupRule, 0, len(existingRules)) // Max will be all the existing rules to be deleted + // Surely this can be done in a more efficient way. Is it worth optimizing if most of + // the time we'll deal with just 1 or 2 elements in each array? I doubt it. + for _, existingRule := range existingRules { + found := false + for _, wantedRule := range wantedRules { + if compareSecurityGroupRuleAndCreateOpts(existingRule, wantedRule) { + found = true + break + } + } + if !found { + // in existingRules but not in wantedRules, delete + toDelete = append(toDelete, existingRule) + } + } + for _, wantedRule := range wantedRules { + found := false + for _, existingRule := range existingRules { + if compareSecurityGroupRuleAndCreateOpts(existingRule, wantedRule) { + found = true + break + } + } + if !found { + // in wantedRules but not in exisitngRules, create + toCreate = append(toCreate, wantedRule) + } + } + + return toCreate, toDelete +} + +// ensureAndUpdateOctaviaSecurityGroup handles the creation and update of the security group and the securiry rules for the octavia load balancer +func (lbaas *LbaasV2) ensureAndUpdateOctaviaSecurityGroup(clusterName string, apiService *corev1.Service, nodes []*corev1.Node, svcConf *serviceConfig) error { + // get service ports + ports := apiService.Spec.Ports + if len(ports) == 0 { + return fmt.Errorf("no ports provided to openstack load balancer") + } + + // ensure security group for LB + lbSecGroupName := getSecurityGroupName(apiService) + lbSecGroupID, err := secgroups.IDFromName(lbaas.network, lbSecGroupName) + if err != nil { + // If the security group of LB not exist, create it later + if cpoerrors.IsNotFound(err) { + lbSecGroupID = "" + } else { + return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err) + } + } + if len(lbSecGroupID) == 0 { + // create security group + lbSecGroupCreateOpts := groups.CreateOpts{ + Name: lbSecGroupName, + Description: fmt.Sprintf("Security Group for %s/%s Service LoadBalancer in cluster %s", apiService.Namespace, apiService.Name, clusterName), + } + + mc := metrics.NewMetricContext("security_group", "create") + lbSecGroup, err := groups.Create(lbaas.network, lbSecGroupCreateOpts).Extract() + if mc.ObserveRequest(err) != nil { + return fmt.Errorf("failed to create Security Group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + } + lbSecGroupID = lbSecGroup.ID + } + + mc := metrics.NewMetricContext("subnet", "get") + subnet, err := subnets.Get(lbaas.network, svcConf.lbMemberSubnetID).Extract() + if mc.ObserveRequest(err) != nil { + return fmt.Errorf( + "failed to find subnet %s from openstack: %v", svcConf.lbMemberSubnetID, err) + } + + etherType := rules.EtherType4 + if netutils.IsIPv6CIDRString(subnet.CIDR) { + etherType = rules.EtherType6 + } + cidrs := []string{subnet.CIDR} + if lbaas.opts.LBProvider == "ovn" { + // OVN keeps the source IP of the incoming traffic. This means that we cannot just open the LB range, but we + // need to open for the whole world. This can be restricted by using the service.spec.loadBalancerSourceRanges. + // svcConf.allowedCIDR will give us the ranges calculated by GetLoadBalancerSourceRanges() earlier. + cidrs = svcConf.allowedCIDR + } + + existingRules, err := openstackutil.GetSecurityGroupRules(lbaas.network, rules.ListOpts{SecGroupID: lbSecGroupID}) + if err != nil { + return fmt.Errorf( + "failed to find security group rules in %s: %v", lbSecGroupID, err) + } + + // List of the security group rules wanted in the SG. + // Number of Ports plus the potential HealthCheckNodePort. + wantedRules := make([]rules.CreateOpts, 0, len(ports)+1) + + if apiService.Spec.HealthCheckNodePort != 0 { + // TODO(dulek): How should this work with OVN…? Do we need to allow all? + // Probably the traffic goes from the compute node? + wantedRules = append(wantedRules, + rules.CreateOpts{ + Direction: rules.DirIngress, + Protocol: rules.ProtocolTCP, + EtherType: etherType, + RemoteIPPrefix: subnet.CIDR, + SecGroupID: lbSecGroupID, + PortRangeMin: int(apiService.Spec.HealthCheckNodePort), + PortRangeMax: int(apiService.Spec.HealthCheckNodePort), + }, + ) + } + + for _, port := range ports { + if port.NodePort == 0 { // It's 0 when AllocateLoadBalancerNodePorts=False + continue + } + for _, cidr := range cidrs { + protocol := strings.ToLower(string(port.Protocol)) // K8s uses TCP, Neutron uses tcp, etc. + wantedRules = append(wantedRules, + rules.CreateOpts{ + Direction: rules.DirIngress, + Protocol: rules.RuleProtocol(protocol), + EtherType: etherType, + RemoteIPPrefix: cidr, + SecGroupID: lbSecGroupID, + PortRangeMin: int(port.NodePort), + PortRangeMax: int(port.NodePort), + }, + ) + } + } + + toCreate, toDelete := getRulesToCreateAndDelete(wantedRules, existingRules) + + // create new rules + for _, opts := range toCreate { + err := lbaas.ensureSecurityRule(opts) + if err != nil { + return fmt.Errorf("failed to apply security rule (%v), %w", opts, err) + } + } + + // delete unneeded rules + for _, existingRule := range toDelete { + klog.Infof("Deleting rule %s from security group %s (%s)", existingRule.ID, existingRule.SecGroupID, lbSecGroupName) + mc := metrics.NewMetricContext("security_group_rule", "delete") + err := rules.Delete(lbaas.network, existingRule.ID).ExtractErr() + if err != nil && cpoerrors.IsNotFound(err) { + // ignore 404 + klog.Warningf("Security group rule %s found missing when trying to delete it. This indicates concurrent "+ + "updates to the SG %s and is unexpected", existingRule.ID, existingRule.SecGroupID) + return mc.ObserveRequest(nil) + } else if mc.ObserveRequest(err) != nil { + return fmt.Errorf("failed to delete security group rule %s: %w", existingRule.ID, err) + } + } + + if err := applyNodeSecurityGroupIDForLB(lbaas.network, svcConf, nodes, lbSecGroupID); err != nil { + return err + } + return nil +} + +// ensureSecurityGroupDeleted deleting security group for specific loadbalancer service. +func (lbaas *LbaasV2) ensureSecurityGroupDeleted(_ string, service *corev1.Service) error { + // Generate Name + lbSecGroupName := getSecurityGroupName(service) + lbSecGroupID, err := secgroups.IDFromName(lbaas.network, lbSecGroupName) + if err != nil { + if cpoerrors.IsNotFound(err) { + // It is OK when the security group has been deleted by others. + return nil + } + return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err) + } + + // Disassociate the security group from the neutron ports on the nodes. + if err := disassociateSecurityGroupForLB(lbaas.network, lbSecGroupID); err != nil { + return fmt.Errorf("failed to disassociate security group %s: %v", lbSecGroupID, err) + } + + mc := metrics.NewMetricContext("security_group", "delete") + lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID) + if lbSecGroup.Err != nil && !cpoerrors.IsNotFound(lbSecGroup.Err) { + return mc.ObserveRequest(lbSecGroup.Err) + } + _ = mc.ObserveRequest(nil) + + return nil +} diff --git a/pkg/openstack/loadbalancer_subnet_match.go b/pkg/openstack/loadbalancer_subnet_match.go new file mode 100644 index 00000000..f2041bd5 --- /dev/null +++ b/pkg/openstack/loadbalancer_subnet_match.go @@ -0,0 +1,234 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +import ( + "fmt" + "regexp" + "strings" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "gopkg.in/godo.v2/glob" +) + +// floatingSubnetSpec contains the specification of the public subnet to use for +// a public network. If given it may either describe the subnet id or +// a subnet name pattern for the subnet to use. If a pattern is given +// the first subnet matching the name pattern with an allocatable floating ip +// will be selected. +type floatingSubnetSpec struct { + subnetID string + subnet string + subnetTags string +} + +// TweakSubNetListOpsFunction is used to modify List Options for subnets +type TweakSubNetListOpsFunction func(*subnets.ListOpts) + +// matcher matches a subnet +type matcher func(subnet *subnets.Subnet) bool + +// negate returns a negated matches for a given one +func negate(f matcher) matcher { return func(s *subnets.Subnet) bool { return !f(s) } } + +func andMatcher(a, b matcher) matcher { + if a == nil { + return b + } + if b == nil { + return a + } + return func(s *subnets.Subnet) bool { + return a(s) && b(s) + } +} + +// reexpNameMatcher creates a subnet matcher matching a subnet by name for a given regexp. +func regexpNameMatcher(r *regexp.Regexp) matcher { + return func(s *subnets.Subnet) bool { return r.FindString(s.Name) == s.Name } +} + +// subnetNameMatcher creates a subnet matcher matching a subnet by name for a given glob +// or regexp +func subnetNameMatcher(pat string) (matcher, error) { + // try to create floating IP in matching subnets + var match matcher + not := false + if strings.HasPrefix(pat, "!") { + not = true + pat = pat[1:] + } + if strings.HasPrefix(pat, "~") { + rexp, err := regexp.Compile(pat[1:]) + if err != nil { + return nil, fmt.Errorf("invalid subnet regexp pattern %q: %v", pat[1:], err) + } + match = regexpNameMatcher(rexp) + } else { + match = regexpNameMatcher(glob.Globexp(pat)) + } + if not { + match = negate(match) + } + return match, nil +} + +// subnetTagMatcher matches a subnet by a given tag spec +func subnetTagMatcher(tags string) matcher { + // try to create floating IP in matching subnets + var match matcher + + list, not, all := tagList(tags) + + match = func(s *subnets.Subnet) bool { + for _, tag := range list { + found := false + for _, t := range s.Tags { + if t == tag { + found = true + break + } + } + if found { + if !all { + return !not + } + } else { + if all { + return not + } + } + } + return not != all + } + return match +} + +func (s *floatingSubnetSpec) Configured() bool { + if s != nil && (s.subnetID != "" || s.MatcherConfigured()) { + return true + } + return false +} + +func (s *floatingSubnetSpec) ListSubnetsForNetwork(lbaas *LbaasV2, networkID string) ([]subnets.Subnet, error) { + matcher, err := s.Matcher(false) + if err != nil { + return nil, err + } + list, err := lbaas.listSubnetsForNetwork(networkID, s.tweakListOpts) + if err != nil { + return nil, err + } + if matcher == nil { + return list, nil + } + + // filter subnets according to spec + var foundSubnets []subnets.Subnet + for _, subnet := range list { + if matcher(&subnet) { + foundSubnets = append(foundSubnets, subnet) + } + } + return foundSubnets, nil +} + +// tweakListOpts can be used to optimize a subnet list query for the +// actually described subnet filter +func (s *floatingSubnetSpec) tweakListOpts(opts *subnets.ListOpts) { + if s.subnetTags != "" { + list, not, all := tagList(s.subnetTags) + tags := strings.Join(list, ",") + if all { + if not { + opts.NotTagsAny = tags // at least one tag must be missing + } else { + opts.Tags = tags // all tags must be present + } + } else { + if not { + opts.NotTags = tags // none of the tags are present + } else { + opts.TagsAny = tags // at least one tag is present + } + } + } +} + +func (s *floatingSubnetSpec) MatcherConfigured() bool { + if s != nil && s.subnetID == "" && (s.subnet != "" || s.subnetTags != "") { + return true + } + return false +} + +func addField(s, name, value string) string { + if value == "" { + return s + } + if s == "" { + s += ", " + } + return fmt.Sprintf("%s%s: %q", s, name, value) +} + +func (s *floatingSubnetSpec) String() string { + if s == nil || (s.subnetID == "" && s.subnet == "" && s.subnetTags == "") { + return "" + } + pat := addField("", "subnetID", s.subnetID) + pat = addField(pat, "pattern", s.subnet) + return addField(pat, "tags", s.subnetTags) +} + +func (s *floatingSubnetSpec) Matcher(tag bool) (matcher, error) { + if !s.MatcherConfigured() { + return nil, nil + } + var match matcher + var err error + if s.subnet != "" { + match, err = subnetNameMatcher(s.subnet) + if err != nil { + return nil, err + } + } + if tag && s.subnetTags != "" { + match = andMatcher(match, subnetTagMatcher(s.subnetTags)) + } + if match == nil { + match = func(s *subnets.Subnet) bool { return true } + } + return match, nil +} + +func tagList(tags string) ([]string, bool, bool) { + not := strings.HasPrefix(tags, "!") + if not { + tags = tags[1:] + } + all := strings.HasPrefix(tags, "&") + if all { + tags = tags[1:] + } + list := strings.Split(tags, ",") + for i := range list { + list[i] = strings.TrimSpace(list[i]) + } + return list, not, all +} diff --git a/pkg/openstack/openstack_loadbalancer_subnet_match_test.go b/pkg/openstack/loadbalancer_subnet_match_test.go similarity index 100% rename from pkg/openstack/openstack_loadbalancer_subnet_match_test.go rename to pkg/openstack/loadbalancer_subnet_match_test.go diff --git a/pkg/openstack/loadbalancer_test.go b/pkg/openstack/loadbalancer_test.go index 67801fb9..115ea2f7 100644 --- a/pkg/openstack/loadbalancer_test.go +++ b/pkg/openstack/loadbalancer_test.go @@ -1,12 +1,21 @@ package openstack import ( + "context" + "fmt" + "reflect" "sort" "testing" - "github.com/stretchr/testify/assert" - + "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + v2monitors "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + cpoerrors "k8s.io/cloud-provider-openstack/pkg/util/errors" ) type testPopListener struct { @@ -133,3 +142,2288 @@ func TestPopListener(t *testing.T) { assert.Equal(t, ids, item.result, item.name) } } + +type testGetRulesToCreateAndDelete struct { + testName string + wantedRules []rules.CreateOpts + existingRules []rules.SecGroupRule + toCreate []rules.CreateOpts + toDelete []rules.SecGroupRule +} + +func TestGetRulesToCreateAndDelete(t *testing.T) { + tests := []testGetRulesToCreateAndDelete{ + { + testName: "Empty elements", + wantedRules: []rules.CreateOpts{}, + existingRules: []rules.SecGroupRule{}, + toCreate: []rules.CreateOpts{}, + toDelete: []rules.SecGroupRule{}, + }, + { + testName: "Removal of default egress SG rules", + wantedRules: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 123, + PortRangeMin: 123, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/8", + }, + }, + existingRules: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "egress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "0.0.0.0/0", + }, { + ID: "baz", + Direction: "egress", + EtherType: "IPv6", + SecGroupID: "foo", + RemoteIPPrefix: "::/0", + }, + }, + toCreate: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 123, + PortRangeMin: 123, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/8", + }, + }, + toDelete: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "egress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "0.0.0.0/0", + }, { + ID: "baz", + Direction: "egress", + EtherType: "IPv6", + SecGroupID: "foo", + RemoteIPPrefix: "::/0", + }, + }, + }, + { + testName: "Protocol case mismatch", + wantedRules: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 123, + PortRangeMin: 123, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/8", + }, + }, + existingRules: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 123, + PortRangeMin: 123, + Protocol: "tcp", + RemoteIPPrefix: "10.0.0.0/8", + }, + }, + toCreate: []rules.CreateOpts{}, + toDelete: []rules.SecGroupRule{}, + }, + { + testName: "changing a port number", + wantedRules: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 124, + PortRangeMin: 124, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/8", + }, + }, + existingRules: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 123, + PortRangeMin: 123, + }, + }, + toCreate: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 124, + PortRangeMin: 124, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/8", + }, + }, + toDelete: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 123, + PortRangeMin: 123, + }, + }, + }, + { + testName: "changing the CIDR", + wantedRules: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 123, + PortRangeMin: 123, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/24", + }, + }, + existingRules: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 123, + PortRangeMin: 123, + }, + }, + toCreate: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 123, + PortRangeMin: 123, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/24", + }, + }, + toDelete: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 123, + PortRangeMin: 123, + }, + }, + }, + { + testName: "wiping all rules", + wantedRules: []rules.CreateOpts{}, + existingRules: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 123, + PortRangeMin: 123, + }, + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 124, + PortRangeMin: 124, + }, + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 125, + PortRangeMin: 125, + }, + }, + toCreate: []rules.CreateOpts{}, + toDelete: []rules.SecGroupRule{ + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 123, + PortRangeMin: 123, + }, + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 124, + PortRangeMin: 124, + }, + { + ID: "bar", + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + RemoteIPPrefix: "10.0.0.0/8", + PortRangeMax: 125, + PortRangeMin: 125, + }, + }, + }, + { + testName: "several rules for an empty SG", + wantedRules: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 123, + PortRangeMin: 123, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/8", + }, { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 124, + PortRangeMin: 124, + Protocol: "TCP", + RemoteIPPrefix: "10.0.10.0/24", + }, { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 124, + PortRangeMin: 124, + Protocol: "UDP", + RemoteIPPrefix: "10.0.12.0/24", + }, + }, + existingRules: []rules.SecGroupRule{}, + toCreate: []rules.CreateOpts{ + { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 123, + PortRangeMin: 123, + Protocol: "TCP", + RemoteIPPrefix: "10.0.0.0/8", + }, { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 124, + PortRangeMin: 124, + Protocol: "TCP", + RemoteIPPrefix: "10.0.10.0/24", + }, { + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: "foo", + PortRangeMax: 124, + PortRangeMin: 124, + Protocol: "UDP", + RemoteIPPrefix: "10.0.12.0/24", + }, + }, + toDelete: []rules.SecGroupRule{}, + }, + } + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + toCreate, toDelete := getRulesToCreateAndDelete(tt.wantedRules, tt.existingRules) + assert.ElementsMatch(t, tt.toCreate, toCreate) + assert.ElementsMatch(t, tt.toDelete, toDelete) + }) + } +} + +func Test_getListenerProtocol(t *testing.T) { + type testArg struct { + protocol corev1.Protocol + svcConf *serviceConfig + } + + tests := []struct { + name string + testArg testArg + expected listeners.Protocol + }{ + { + name: "not nil svcConf and tlsContainerRef is not empty", + testArg: testArg{ + svcConf: &serviceConfig{ + tlsContainerRef: "tls-container-ref", + }, + }, + expected: listeners.ProtocolTerminatedHTTPS, + }, + { + name: "not nil svcConf and keepClientIP is true", + testArg: testArg{ + svcConf: &serviceConfig{ + keepClientIP: true, + }, + }, + expected: listeners.ProtocolHTTP, + }, + { + name: "nil svcConf with TCP protocol", + testArg: testArg{ + svcConf: nil, + protocol: corev1.ProtocolTCP, + }, + expected: listeners.ProtocolTCP, + }, + { + name: "nil svcConf with UDP protocol", + testArg: testArg{ + svcConf: nil, + protocol: corev1.ProtocolUDP, + }, + expected: listeners.ProtocolUDP, + }, + { + name: "test for no specification on svc and a random protocol to test it return value", + testArg: testArg{ + svcConf: nil, + protocol: corev1.ProtocolSCTP, + }, + expected: listeners.ProtocolSCTP, + }, + { + name: "passing a svcConf tls container ref with a keep client IP", + testArg: testArg{ + svcConf: &serviceConfig{ + tlsContainerRef: "tls-container-ref", + keepClientIP: true, + }, + }, + expected: listeners.ProtocolTerminatedHTTPS, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getListenerProtocol(tt.testArg.protocol, tt.testArg.svcConf); !reflect.DeepEqual(got, tt.expected) { + t.Errorf("getListenerProtocol() = %v, expected %v", got, tt.expected) + } + }) + } +} + +func TestLbaasV2_checkListenerPorts(t *testing.T) { + type args struct { + service *corev1.Service + curListenerMapping map[listenerKey]*listeners.Listener + isLBOwner bool + lbName string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "error is not thrown if loadbalancer matches & if port is already in use by a lb", + args: args{ + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "service", + Protocol: "https", + Port: 9090, + }, + }, + }, + }, + curListenerMapping: map[listenerKey]*listeners.Listener{ + { + Protocol: "https", + Port: 9090, + }: { + ID: "listenerid", + Tags: []string{"test-lb"}, + }, + }, + isLBOwner: false, + lbName: "test-lb", + }, + wantErr: false, + }, + { + name: "error is thrown if loadbalancer doesn't matches & if port is already in use by a service", + args: args{ + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "service", + Protocol: "https", + Port: 9090, + }, + }, + }, + }, + curListenerMapping: map[listenerKey]*listeners.Listener{ + { + Protocol: "https", + Port: 9090, + }: { + ID: "listenerid", + Tags: []string{"test-lb", "test-lb1"}, + }, + }, + isLBOwner: false, + lbName: "test-lb2", + }, + wantErr: true, + }, + { + name: "error is not thrown if lbOwner is present & no tags on service", + args: args{ + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "service", + Protocol: "https", + Port: 9090, + }, + }, + }, + }, + curListenerMapping: map[listenerKey]*listeners.Listener{ + { + Protocol: "https", + Port: 9090, + }: { + ID: "listenerid", + }, + }, + isLBOwner: true, + lbName: "test-lb", + }, + wantErr: false, + }, + { + name: "error is not thrown if lbOwner is true & there are tags on service", + args: args{ + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "service", + Protocol: "http", + Port: 9091, + }, + }, + }, + }, + curListenerMapping: map[listenerKey]*listeners.Listener{ + { + Protocol: "https", + Port: 9090, + }: { + ID: "listenerid", + Tags: []string{"test-lb"}, + }, + }, + isLBOwner: true, + lbName: "test-lb", + }, + wantErr: false, + }, + { + name: "error is not thrown if listener key doesn't match port & protocol", + args: args{ + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Name: "service", + Protocol: "http", + Port: 9091, + }, + }, + }, + }, + curListenerMapping: map[listenerKey]*listeners.Listener{ + { + Protocol: "https", + Port: 9090, + }: { + ID: "listenerid", + Tags: []string{"test-lb"}, + }, + }, + isLBOwner: false, + lbName: "test-lb", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lbaas := &LbaasV2{ + LoadBalancer: LoadBalancer{}, + } + err := lbaas.checkListenerPorts(tt.args.service, tt.args.curListenerMapping, tt.args.isLBOwner, tt.args.lbName) + if tt.wantErr == true { + assert.ErrorContains(t, err, "already exists") + } else { + assert.NoError(t, err) + } + }) + } +} +func TestLbaasV2_createLoadBalancerStatus(t *testing.T) { + type fields struct { + LoadBalancer LoadBalancer + } + type result struct { + HostName string + IPAddress string + } + type args struct { + service *corev1.Service + svcConf *serviceConfig + addr string + } + tests := []struct { + name string + fields fields + args args + want result + }{ + { + name: "it should return hostname from service annotation", + fields: fields{ + LoadBalancer: LoadBalancer{ + opts: LoadBalancerOpts{ + EnableIngressHostname: false, + IngressHostnameSuffix: "test", + }, + }, + }, + args: args{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"loadbalancer.openstack.org/hostname": "testHostName"}, + }, + }, + svcConf: &serviceConfig{ + enableProxyProtocol: false, + }, + addr: "10.10.0.6", + }, + want: result{ + HostName: "testHostName", + }, + }, + { + name: "it should return fakehostname if proxyProtocol & IngressHostName is enabled without svc annotation", + fields: fields{ + LoadBalancer: LoadBalancer{ + opts: LoadBalancerOpts{ + EnableIngressHostname: true, + IngressHostnameSuffix: "ingress-suffix", + }, + }, + }, + args: args{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"test": "key"}, + }, + }, + svcConf: &serviceConfig{ + enableProxyProtocol: true, + }, + addr: "10.10.0.6", + }, + want: result{ + HostName: "10.10.0.6.ingress-suffix", + }, + }, + { + name: "it should default to ip address if not hostname can be found from svc or proxyProtocol", + fields: fields{ + LoadBalancer: LoadBalancer{ + opts: LoadBalancerOpts{ + EnableIngressHostname: false, + IngressHostnameSuffix: "ingress-suffix", + }, + }, + }, + args: args{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"test": "key"}, + }, + }, + svcConf: &serviceConfig{ + enableProxyProtocol: false, + }, + addr: "10.10.0.6", + }, + want: result{ + IPAddress: "10.10.0.6", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lbaas := &LbaasV2{ + LoadBalancer: tt.fields.LoadBalancer, + } + + result := lbaas.createLoadBalancerStatus(tt.args.service, tt.args.svcConf, tt.args.addr) + assert.Equal(t, tt.want.HostName, result.Ingress[0].Hostname) + assert.Equal(t, tt.want.IPAddress, result.Ingress[0].IP) + }) + } +} + +func Test_getIntFromServiceAnnotation(t *testing.T) { + type args struct { + service *corev1.Service + annotationKey string + defaultSetting int + } + tests := []struct { + name string + args args + want int + }{ + { + name: "return default setting if no service annotation", + args: args{ + defaultSetting: 1, + annotationKey: "bar", + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"foo": "2"}, + }, + }, + }, + want: 1, + }, + { + name: "return annotation key if it exists in service annotation", + args: args{ + defaultSetting: 1, + annotationKey: "foo", + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"foo": "2"}, + }, + }, + }, + want: 2, + }, + { + name: "return default setting if key isn't valid integer", + args: args{ + defaultSetting: 1, + annotationKey: "foo", + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"foo": "bar"}, + }, + }, + }, + want: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, getIntFromServiceAnnotation(tt.args.service, tt.args.annotationKey, tt.args.defaultSetting)) + }) + } +} + +func TestLbaasV2_GetLoadBalancerName(t *testing.T) { + lbaas := &LbaasV2{} + + type testArgs struct { + ctx context.Context + clusterName string + service *corev1.Service + } + tests := []struct { + name string + testArgs testArgs + expected string + }{ + { + name: "valid input with short name", + testArgs: testArgs{ + ctx: context.Background(), + clusterName: "my-valid-cluster", + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "valid-cluster-namespace", + Name: "valid-name", + }, + }, + }, + expected: "kube_service_my-valid-cluster_valid-cluster-namespace_valid-name", + }, + { + name: "input that surpass value maximum length", + testArgs: testArgs{ + ctx: context.Background(), + clusterName: "a-longer-valid-cluster", + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "a-longer-valid-cluster-namespace", + Name: "a-longer-valid-name-for-the-load-balance-name-to-test-if-the-length-of-value-is-longer-than-required-maximum-length-random-addition-hardcode-number-to-make-it-above-length-255-at-the-end-yeah-so-the-rest-is-additional-input", + }, + }, + }, + expected: "kube_service_a-longer-valid-cluster_a-longer-valid-cluster-namespace_a-longer-valid-name-for-the-load-balance-name-to-test-if-the-length-of-value-is-longer-than-required-maximum-length-random-addition-hardcode-number-to-make-it-above-length-255-at-the-end", + }, + { + name: "empty input", + testArgs: testArgs{ + ctx: context.Background(), + clusterName: "", + service: &corev1.Service{}, + }, + expected: "kube_service___", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := lbaas.GetLoadBalancerName(tt.testArgs.ctx, tt.testArgs.clusterName, tt.testArgs.service) + assert.Equal(t, tt.expected, got) + }) + } +} + +func Test_buildPoolCreateOpt(t *testing.T) { + type args struct { + protocol string + svcConf *serviceConfig + service *corev1.Service + lbaasV2 *LbaasV2 + } + tests := []struct { + name string + args args + want pools.CreateOpts + }{ + { + name: "test for proxy protocol enabled", + args: args{ + protocol: "TCP", + svcConf: &serviceConfig{ + keepClientIP: true, + tlsContainerRef: "tls-container-ref", + enableProxyProtocol: true, + }, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "ovn", + LBMethod: "SOURCE_IP_PORT", + }, + }, + }, + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + SessionAffinity: corev1.ServiceAffinityClientIP, + }, + }, + }, + want: pools.CreateOpts{ + Name: "test for proxy protocol enabled", + Protocol: pools.ProtocolPROXY, + LBMethod: "SOURCE_IP_PORT", + Persistence: &pools.SessionPersistence{Type: "SOURCE_IP"}, + }, + }, + { + name: "test for pool protocol http with proxy protocol disabled", + args: args{ + protocol: "HTTP", + svcConf: &serviceConfig{ + keepClientIP: true, + tlsContainerRef: "tls-container-ref", + enableProxyProtocol: false, + }, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "ovn", + LBMethod: "SOURCE_IP_PORT", + }, + }, + }, + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + SessionAffinity: corev1.ServiceAffinityClientIP, + }, + }, + }, + want: pools.CreateOpts{ + Name: "test for pool protocol http with proxy protocol disabled", + Protocol: pools.ProtocolHTTP, + LBMethod: "SOURCE_IP_PORT", + Persistence: &pools.SessionPersistence{Type: "SOURCE_IP"}, + }, + }, + { + name: "test for pool protocol UDP with proxy protocol disabled", + args: args{ + protocol: "UDP", + svcConf: &serviceConfig{ + keepClientIP: true, + tlsContainerRef: "tls-container-ref", + enableProxyProtocol: false, + }, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "ovn", + LBMethod: "SOURCE_IP_PORT", + }, + }, + }, + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + SessionAffinity: corev1.ServiceAffinityClientIP, + }, + }, + }, + want: pools.CreateOpts{ + Name: "test for pool protocol UDP with proxy protocol disabled", + Protocol: pools.ProtocolHTTP, + LBMethod: "SOURCE_IP_PORT", + Persistence: &pools.SessionPersistence{Type: "SOURCE_IP"}, + }, + }, + { + name: "test for session affinity none", + args: args{ + protocol: "TCP", + svcConf: &serviceConfig{ + keepClientIP: true, + tlsContainerRef: "tls-container-ref", + }, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "ovn", + LBMethod: "SOURCE_IP_PORT", + }, + }, + }, + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + SessionAffinity: corev1.ServiceAffinityNone, + }, + }, + }, + want: pools.CreateOpts{ + Name: "test for session affinity none", + Protocol: pools.ProtocolHTTP, + LBMethod: "SOURCE_IP_PORT", + Persistence: nil, + }, + }, + { + name: "test for session affinity client ip", + args: args{ + protocol: "TCP", + svcConf: &serviceConfig{ + keepClientIP: true, + tlsContainerRef: "tls-container-ref", + }, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "ovn", + LBMethod: "SOURCE_IP_PORT", + }, + }, + }, + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + SessionAffinity: corev1.ServiceAffinityClientIP, + }, + }, + }, + want: pools.CreateOpts{ + Name: "test for session affinity client ip", + Protocol: pools.ProtocolHTTP, + LBMethod: "SOURCE_IP_PORT", + Persistence: &pools.SessionPersistence{Type: "SOURCE_IP"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.args.lbaasV2.buildPoolCreateOpt(tt.args.protocol, tt.args.service, tt.args.svcConf, tt.name) + assert.Equal(t, got, tt.want) + }) + } +} + +func Test_getSecurityGroupName(t *testing.T) { + tests := []struct { + name string + service *corev1.Service + expected string + }{ + { + name: "regular test security group name and length", + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + UID: "12345", + Namespace: "security-group-namespace", + Name: "security-group-name", + }, + }, + expected: "lb-sg-12345-security-group-namespace-security-group-name", + }, + { + name: "security group name longer than 255 byte", + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + UID: "12345678-90ab-cdef-0123-456789abcdef", + Namespace: "security-group-longer-test-namespace", + Name: "security-group-longer-test-service-name-with-more-than-255-byte-this-test-should-be-longer-than-255-i-need-that-ijiojohoo-afhwefkbfk-jwebfwbifwbewifobiu-efbiobfoiqwebi-the-end-e-end-pardon-the-long-string-i-really-apologize-if-this-is-a-bad-thing-to-do", + }, + }, + expected: "lb-sg-12345678-90ab-cdef-0123-456789abcdef-security-group-longer-test-namespace-security-group-longer-test-service-name-with-more-than-255-byte-this-test-should-be-longer-than-255-i-need-that-ijiojohoo-afhwefkbfk-jwebfwbifwbewifobiu-efbiobfoiqwebi-the-end", + }, + { + name: "test the security group name with all empty param", + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{}, + }, + expected: "lb-sg---", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := getSecurityGroupName(test.service) + + assert.Equal(t, test.expected, got) + }) + } +} + +func Test_getBoolFromServiceAnnotation(t *testing.T) { + type testargs struct { + service *corev1.Service + annotationKey string + defaultSetting bool + } + tests := []struct { + name string + testargs testargs + want bool + }{ + { + name: "Return default setting if no service annotation", + testargs: testargs{ + annotationKey: "bar", + defaultSetting: true, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"foo": "false"}, + }, + }, + }, + want: true, + }, + { + name: "Return annotation key if it exists in service annotation (true)", + testargs: testargs{ + annotationKey: "foo", + defaultSetting: false, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"foo": "true"}, + }, + }, + }, + want: true, + }, + { + name: "Return annotation key if it exists in service annotation (false)", + testargs: testargs{ + annotationKey: "foo", + defaultSetting: true, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"foo": "false"}, + }, + }, + }, + want: false, + }, + { + name: "Return default setting if key isn't a valid boolean value", + testargs: testargs{ + annotationKey: "foo", + defaultSetting: true, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"foo": "invalid"}, + }, + }, + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getBoolFromServiceAnnotation(tt.testargs.service, tt.testargs.annotationKey, tt.testargs.defaultSetting) + if got != tt.want { + t.Errorf("getBoolFromServiceAnnotation() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestLbaasV2_updateServiceAnnotations(t *testing.T) { + service := &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: nil, + }, + } + + annotations := map[string]string{ + "key1": "value1", + "key2": "value2", + } + + lbaas := LbaasV2{} + lbaas.updateServiceAnnotations(service, annotations) + + serviceAnnotations := make([]map[string]string, 0) + for key, value := range service.ObjectMeta.Annotations { + serviceAnnotations = append(serviceAnnotations, map[string]string{key: value}) + } + + expectedAnnotations := []map[string]string{ + {"key1": "value1"}, + {"key2": "value2"}, + } + + assert.ElementsMatch(t, expectedAnnotations, serviceAnnotations) +} + +func Test_getStringFromServiceAnnotation(t *testing.T) { + type testArgs struct { + service *corev1.Service + annotationKey string + defaultSetting string + } + + tests := []struct { + name string + testArgs testArgs + expected string + }{ + { + name: "enter empty arguments", + testArgs: testArgs{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{}, + }, + annotationKey: "", + defaultSetting: "", + }, + expected: "", + }, + { + name: "enter valid arguments with annotations", + testArgs: testArgs{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "service-namespace", + Name: "service-name", + Annotations: map[string]string{"annotationKey": "annotation-Value"}, + }, + }, + annotationKey: "annotationKey", + defaultSetting: "default-setting", + }, + expected: "annotation-Value", + }, + { + name: "valid arguments without annotations", + testArgs: testArgs{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "service-namespace", + Name: "service-name", + }, + }, + annotationKey: "annotationKey", + defaultSetting: "default-setting", + }, + expected: "default-setting", + }, + { + name: "enter argument without default-setting", + testArgs: testArgs{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "service-namespace", + Name: "service-name", + Annotations: map[string]string{"annotationKey": "annotation-Value"}, + }, + }, + annotationKey: "annotationKey", + defaultSetting: "", + }, + expected: "annotation-Value", + }, + { + name: "enter argument without annotation and default-setting", + testArgs: testArgs{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "service-namespace", + Name: "service-name", + }, + }, + annotationKey: "annotationKey", + defaultSetting: "", + }, + expected: "", + }, + { + name: "enter argument with a non-existing annotationKey with default setting", + testArgs: testArgs{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "service-namespace", + Name: "service-name", + Annotations: map[string]string{"annotationKey": "annotation-Value"}, + }, + }, + annotationKey: "invalid-annotationKey", + defaultSetting: "default-setting", + }, + expected: "default-setting", + }, + { + name: "enter argument with a non-existing annotationKey without a default setting", + testArgs: testArgs{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "service-namespace", + Name: "service-name", + Annotations: map[string]string{"annotationKey": "annotation-Value"}, + }, + }, + annotationKey: "invalid-annotationKey", + defaultSetting: "", + }, + expected: "", + }, + { + name: "no name-space and service name but valid annotations", + testArgs: testArgs{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"annotationKey": "annotation-Value"}, + }, + }, + annotationKey: "annotationKey", + defaultSetting: "default-setting", + }, + expected: "annotation-Value", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := getStringFromServiceAnnotation(test.testArgs.service, test.testArgs.annotationKey, test.testArgs.defaultSetting) + + assert.Equal(t, test.expected, got) + }) + } +} + +func Test_nodeAddressForLB(t *testing.T) { + type testArgs struct { + node *corev1.Node + preferredIPFamily corev1.IPFamily + } + + tests := []struct { + name string + testArgs testArgs + expect string + expectedErr error + }{ + { + name: "Empty Address with IPv4 protocol family ", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + }, + preferredIPFamily: corev1.IPv4Protocol, + }, + expect: "", + expectedErr: cpoerrors.ErrNoAddressFound, + }, + { + name: "Empty Address with IPv6 protocol family ", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + }, + preferredIPFamily: corev1.IPv6Protocol, + }, + expect: "", + expectedErr: cpoerrors.ErrNoAddressFound, + }, + { + name: "valid address with IPv4 protocol family", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv4Protocol, + }, + expect: "192.168.1.1", + expectedErr: nil, + }, + { + name: "valid address with IPv6 protocol family", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv6Protocol, + }, + expect: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + expectedErr: nil, + }, + { + name: "multiple IPv4 address", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + { + Type: corev1.NodeExternalIP, + Address: "192.168.1.2", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv4Protocol, + }, + expect: "192.168.1.1", + expectedErr: nil, + }, + { + name: "multiple IPv6 address", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + { + Type: corev1.NodeExternalIP, + Address: "2001:0db8:85a3:3333:1111:8a2e:9999:8888", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv6Protocol, + }, + expect: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + expectedErr: nil, + }, + { + name: "multiple mix addresses expecting IPv6 response", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + { + Type: corev1.NodeInternalIP, + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv6Protocol, + }, + expect: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + expectedErr: nil, + }, + { + name: "multiple mix addresses expecting IPv4 response", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeExternalIP, + Address: "2009:0db8:85a3:0003:0001:8a2e:0370:9999", + }, + + { + Type: corev1.NodeInternalIP, + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + + { + Type: corev1.NodeExternalIP, + Address: "2001:0db8:85a3:0000:1111:8a2e:9798:7334", + }, + + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + + { + Type: corev1.NodeExternalIP, + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv4Protocol, + }, + expect: "192.168.1.1", + expectedErr: nil, + }, + { + name: "single valid IPv4 address without preferred valid specification", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + }, + }, + }, + }, + expect: "192.168.1.1", + expectedErr: nil, + }, + { + name: "single valid IPv6 address without preferred valid specification", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + }, + }, + }, + }, + expect: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + expectedErr: nil, + }, + { + name: "multiple valid IPv6 address without preferred valid specification", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + { + Type: corev1.NodeInternalIP, + Address: "192.168.0.1", + }, + { + Type: corev1.NodeInternalIP, + Address: "2001:0db8:85a3:1111:2222:8a2e:6869:7334", + }, + }, + }, + }, + }, + expect: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + expectedErr: nil, + }, + { + name: "invalid IPv4 address specification", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv4Protocol, + }, + expect: "", + expectedErr: cpoerrors.ErrNoAddressFound, + }, + { + name: "invalid IPv6 address specification", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv6Protocol, + }, + expect: "", + expectedErr: cpoerrors.ErrNoAddressFound, + }, + { + name: "Ignore NodeExternalDNS address with IPv4 protocol family", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeExternalDNS, + Address: "example.com", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv4Protocol, + }, + expect: "", + expectedErr: cpoerrors.ErrNoAddressFound, + }, + { + name: "Ignore NodeExternalDNS address with IPv6 protocol family", + testArgs: testArgs{ + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeExternalDNS, + Address: "example.com", + }, + }, + }, + }, + preferredIPFamily: corev1.IPv6Protocol, + }, + expect: "", + expectedErr: cpoerrors.ErrNoAddressFound, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := nodeAddressForLB(test.testArgs.node, test.testArgs.preferredIPFamily) + if test.expectedErr != nil { + assert.EqualError(t, err, test.expectedErr.Error()) + } else { + assert.NoError(t, test.expectedErr, err) + } + + assert.Equal(t, test.expect, got) + }) + } +} + +func TestLbaasV2_getMemberSubnetID(t *testing.T) { + lbaasOpts := LoadBalancerOpts{ + LBClasses: map[string]*LBClass{ + "lbclassKey": { + MemberSubnetID: "lb-class-member-subnet-id-5678", + }, + }, + MemberSubnetID: "default-memberSubnetId", + } + + tests := []struct { + name string + opts LoadBalancerOpts + service *corev1.Service + want string + wantErr string + }{ + { + name: "get member subnet id from service annotation", + opts: LoadBalancerOpts{}, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerMemberSubnetID: "member-subnet-id", + ServiceAnnotationLoadBalancerClass: "svc-annotation-loadbalance-class", + }, + }, + }, + want: "member-subnet-id", + wantErr: "", + }, + { + name: "get member subnet id from config class", + opts: lbaasOpts, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerClass: "lbclassKey", + }, + }, + }, + want: "lb-class-member-subnet-id-5678", + wantErr: "", + }, + { + name: "get member subnet id from default config", + opts: lbaasOpts, + service: &corev1.Service{}, + want: "default-memberSubnetId", + wantErr: "", + }, + { + name: "error when loadbalancer class not found", + opts: LoadBalancerOpts{}, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerClass: "invalid-lb-class", + }, + }, + }, + want: "", + wantErr: "invalid loadbalancer class \"invalid-lb-class\"", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lbaas := LbaasV2{ + LoadBalancer: LoadBalancer{ + opts: tt.opts, + }, + } + + got, err := lbaas.getMemberSubnetID(tt.service) + if tt.wantErr != "" { + assert.EqualError(t, err, tt.wantErr) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tt.want, got) + }) + } +} + +func TestBuildBatchUpdateMemberOpts(t *testing.T) { + // Sample Nodes + node1 := &corev1.Node{ + ObjectMeta: v1.ObjectMeta{ + Name: "node-1", + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + }, + }, + } + node2 := &corev1.Node{ + ObjectMeta: v1.ObjectMeta{ + Name: "node-2", + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.2", + }, + }, + }, + } + testCases := []struct { + name string + nodes []*corev1.Node + port corev1.ServicePort + svcConf *serviceConfig + expectedLen int + expectedNewMembersCount int + }{ + { + name: "NodePortequalszero", + nodes: []*corev1.Node{node1, node2}, + port: corev1.ServicePort{NodePort: 0}, + svcConf: &serviceConfig{ + preferredIPFamily: corev1.IPv4Protocol, + lbMemberSubnetID: "subnet-12345-test", + healthCheckNodePort: 8081, + }, + expectedLen: 0, + expectedNewMembersCount: 0, + }, + { + name: "Valid nodes, canUseHTTPMonitor=false", + nodes: []*corev1.Node{node1, node2}, + port: corev1.ServicePort{NodePort: 8080}, + svcConf: &serviceConfig{ + preferredIPFamily: corev1.IPv4Protocol, + lbMemberSubnetID: "subnet-12345-test", + healthCheckNodePort: 8081, + enableMonitor: false, + }, + expectedLen: 2, + expectedNewMembersCount: 2, + }, + { + name: "Valid nodes, canUseHTTPMonitor=true", + nodes: []*corev1.Node{node1, node2}, + port: corev1.ServicePort{NodePort: 8080}, + svcConf: &serviceConfig{ + preferredIPFamily: corev1.IPv4Protocol, + lbMemberSubnetID: "subnet-12345-test", + healthCheckNodePort: 8081, + enableMonitor: true, + }, + expectedLen: 2, + expectedNewMembersCount: 2, + }, + { + name: "Invalid preferred IP family, fallback to default", + nodes: []*corev1.Node{node1, node2}, + port: corev1.ServicePort{NodePort: 0}, + svcConf: &serviceConfig{ + preferredIPFamily: "invalid-family", + lbMemberSubnetID: "subnet-12345-test", + healthCheckNodePort: 8081, + }, + expectedLen: 0, + expectedNewMembersCount: 0, + }, + { + name: "ErrNoAddressFound happens and no member is created", + nodes: []*corev1.Node{ + { + ObjectMeta: v1.ObjectMeta{Name: "node-1"}, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + }, + }, + port: corev1.ServicePort{NodePort: 8080}, + svcConf: &serviceConfig{ + preferredIPFamily: corev1.IPv4Protocol, + lbMemberSubnetID: "subnet-12345-test", + healthCheckNodePort: 8081, + enableMonitor: false, + }, + expectedLen: 0, + expectedNewMembersCount: 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + lbaas := &LbaasV2{} + members, newMembers, err := lbaas.buildBatchUpdateMemberOpts(tc.port, tc.nodes, tc.svcConf) + assert.Len(t, members, tc.expectedLen) + assert.NoError(t, err) + + if tc.expectedNewMembersCount == 0 { + assert.Empty(t, newMembers) + } else { + assert.Len(t, newMembers, tc.expectedNewMembersCount) + } + }) + } +} + +func Test_getSubnetID(t *testing.T) { + type args struct { + svcConf *serviceConfig + service *corev1.Service + lbaasV2 *LbaasV2 + } + tests := []struct { + name string + args args + want string + expectedErr string + }{ + { + name: "test get subnet from service annotation", + args: args{ + svcConf: &serviceConfig{}, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBClasses: map[string]*LBClass{ + "test-class": { + SubnetID: "test-class-subnet-id", + }, + }, + }, + }, + }, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + "loadbalancer.openstack.org/subnet-id": "annotation-test-id", + "loadbalancer.openstack.org/class": "test-class", + }, + }, + }, + }, + want: "annotation-test-id", + }, + { + name: "test get subnet from config class", + args: args{ + svcConf: &serviceConfig{}, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBClasses: map[string]*LBClass{ + "test-class": { + SubnetID: "test-class-subnet-id", + }, + }, + }, + }, + }, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + "loadbalancer.openstack.org/class": "test-class", + }, + }, + }, + }, + want: "test-class-subnet-id", + }, + { + name: "test get subnet from config class with invalid loadbalancer class", + args: args{ + svcConf: &serviceConfig{}, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBClasses: map[string]*LBClass{ + "decoy-class": { + SubnetID: "test-id", + }, + }, + SubnetID: "test-subnet-id", + }, + }, + }, + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + "loadbalancer.openstack.org/class": "test-class", + }, + }, + }, + }, + want: "", + expectedErr: fmt.Sprintf("invalid loadbalancer class %q", "test-class"), + }, + { + name: "test get subnet from default config", + args: args{ + svcConf: &serviceConfig{}, + lbaasV2: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBClasses: map[string]*LBClass{ + "test-config-class-subnet-id": { + SubnetID: "test-id", + }, + }, + SubnetID: "test-default-subnet-id", + }, + }, + }, + service: &corev1.Service{}, + }, + want: "test-default-subnet-id", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.args.lbaasV2.getSubnetID(tt.args.service, tt.args.svcConf) + if tt.expectedErr != "" { + assert.EqualError(t, err, tt.expectedErr) + } + if tt.expectedErr == "" { + assert.NoError(t, err) + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestLbaasV2_getNetworkID(t *testing.T) { + lbaas := LbaasV2{ + LoadBalancer: LoadBalancer{ + opts: LoadBalancerOpts{ + LBClasses: map[string]*LBClass{ + "lbclassKey": { + NetworkID: "lb-class-network-id-1234", + }, + }, + NetworkID: "default-lb-class-networkId", + }, + }, + } + + type testArg struct { + service *corev1.Service + } + tests := []struct { + name string + lbaas LbaasV2 + arg testArg + want string + wantErr string + }{ + { + name: "get network id from service annotation", + lbaas: LbaasV2{}, + arg: testArg{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerNetworkID: "subnet-id", + }, + }, + }, + }, + want: "subnet-id", + wantErr: "", + }, + { + name: "get network id from config class", + lbaas: lbaas, + arg: testArg{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerClass: "lbclassKey", + }, + }, + }, + }, + want: "lb-class-network-id-1234", + wantErr: "", + }, + { + name: "get network id from default config", + lbaas: lbaas, + arg: testArg{ + service: &corev1.Service{}, + }, + want: "default-lb-class-networkId", + wantErr: "", + }, + { + name: "error when loadbalancer class not found", + lbaas: LbaasV2{}, + arg: testArg{ + service: &corev1.Service{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerClass: "invalid-lb-class", + }, + }, + }, + }, + want: "", + wantErr: "invalid loadbalancer class \"invalid-lb-class\"", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.lbaas.getNetworkID(tt.arg.service, &serviceConfig{}) + + if tt.wantErr != "" { + assert.EqualError(t, err, tt.wantErr) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_buildMonitorCreateOpts(t *testing.T) { + type testArg struct { + lbaas *LbaasV2 + svcConf *serviceConfig + port corev1.ServicePort + } + tests := []struct { + name string + testArg testArg + want v2monitors.CreateOpts + }{ + { + name: "test for port protocol udp with ovn provider", + testArg: testArg{ + lbaas: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "ovn", + }, + lb: &gophercloud.ServiceClient{}, + }, + }, + svcConf: &serviceConfig{ + healthMonitorDelay: 6, + healthMonitorTimeout: 5, + healthMonitorMaxRetries: 4, + healthMonitorMaxRetriesDown: 3, + healthCheckNodePort: 32100, + }, + port: corev1.ServicePort{ + Protocol: corev1.ProtocolUDP, + }, + }, + want: v2monitors.CreateOpts{ + Name: "test for port protocol udp with ovn provider", + Type: "UDP-CONNECT", + Delay: 6, + Timeout: 5, + MaxRetries: 4, + MaxRetriesDown: 3, + }, + }, + { + name: "using tcp with ovn provider", + testArg: testArg{ + lbaas: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "ovn", + }, + }, + }, + svcConf: &serviceConfig{ + healthMonitorDelay: 3, + healthMonitorTimeout: 8, + healthMonitorMaxRetries: 6, + healthMonitorMaxRetriesDown: 2, + healthCheckNodePort: 31200, + }, + port: corev1.ServicePort{ + Protocol: corev1.ProtocolTCP, + }, + }, + want: v2monitors.CreateOpts{ + Name: "using tcp with ovn provider", + Type: "TCP", + Delay: 3, + Timeout: 8, + MaxRetries: 6, + MaxRetriesDown: 2, + }, + }, + { + name: "using node port zero", + testArg: testArg{ + lbaas: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "ovn", + }, + }, + }, + svcConf: &serviceConfig{ + healthMonitorDelay: 3, + healthMonitorTimeout: 5, + healthMonitorMaxRetries: 1, + healthMonitorMaxRetriesDown: 2, + healthCheckNodePort: 0, + }, + port: corev1.ServicePort{ + Protocol: corev1.ProtocolTCP, + }, + }, + want: v2monitors.CreateOpts{ + Name: "using node port zero", + Type: "TCP", + Delay: 3, + Timeout: 5, + MaxRetries: 1, + MaxRetriesDown: 2, + }, + }, + { + name: "using tcp protocol with not-ovn provider", + testArg: testArg{ + lbaas: &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "amphora", + }, + lb: &gophercloud.ServiceClient{}, + }, + }, + svcConf: &serviceConfig{ + healthMonitorDelay: 3, + healthMonitorTimeout: 4, + healthMonitorMaxRetries: 1, + healthMonitorMaxRetriesDown: 5, + healthCheckNodePort: 310000, + }, + port: corev1.ServicePort{ + Protocol: corev1.ProtocolTCP, + }, + }, + want: v2monitors.CreateOpts{ + Name: "using tcp protocol with not-ovn provider", + Type: "HTTP", + Delay: 3, + Timeout: 4, + MaxRetries: 1, + MaxRetriesDown: 5, + + URLPath: "/healthz", + HTTPMethod: "GET", + ExpectedCodes: "200", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.testArg.lbaas.buildMonitorCreateOpts(tt.testArg.svcConf, tt.testArg.port, tt.name) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestBuildListenerCreateOpt(t *testing.T) { + svcConf := &serviceConfig{ + connLimit: 100, + lbName: "my-lb", + } + testCases := []struct { + name string + port corev1.ServicePort + svcConf *serviceConfig + expectedCreateOpt listeners.CreateOpts + }{ + { + name: "Test with basic configuration", + port: corev1.ServicePort{ + Protocol: "TCP", + Port: 80, + }, + svcConf: &serviceConfig{ + connLimit: 100, + lbName: "my-lb", + }, + expectedCreateOpt: listeners.CreateOpts{ + Name: "Test with basic configuration", + Protocol: listeners.ProtocolTCP, + ProtocolPort: 80, + ConnLimit: &svcConf.connLimit, + Tags: nil, + }, + }, + { + name: "Test with TLSContainerRef and X-Forwarded-For", + port: corev1.ServicePort{ + Protocol: "TCP", + Port: 443, + }, + svcConf: &serviceConfig{ + connLimit: 100, + lbName: "my-lb", + tlsContainerRef: "tls-container-ref", + keepClientIP: true, + }, + expectedCreateOpt: listeners.CreateOpts{ + Name: "Test with TLSContainerRef and X-Forwarded-For", + Protocol: listeners.ProtocolTerminatedHTTPS, + ProtocolPort: 443, + ConnLimit: &svcConf.connLimit, + DefaultTlsContainerRef: "tls-container-ref", + InsertHeaders: map[string]string{"X-Forwarded-For": "true"}, + Tags: nil, + }, + }, + { + name: "Test with TLSContainerRef but without X-Forwarded-For", + port: corev1.ServicePort{ + Protocol: "TCP", + Port: 443, + }, + svcConf: &serviceConfig{ + connLimit: 100, + lbName: "my-lb", + tlsContainerRef: "tls-container-ref", + keepClientIP: false, + }, + expectedCreateOpt: listeners.CreateOpts{ + Name: "Test with TLSContainerRef but without X-Forwarded-For", + Protocol: listeners.ProtocolTerminatedHTTPS, + ProtocolPort: 443, + ConnLimit: &svcConf.connLimit, + DefaultTlsContainerRef: "tls-container-ref", + Tags: nil, + }, + }, + { + name: "Test with supported CIDRs", + port: corev1.ServicePort{ + Protocol: "TCP", + Port: 8080, + }, + svcConf: &serviceConfig{ + connLimit: 100, + lbName: "my-lb", + tlsContainerRef: "tls-container-ref", + keepClientIP: true, + allowedCIDR: []string{"192.168.1.0/24", "10.0.0.0/8"}, + }, + expectedCreateOpt: listeners.CreateOpts{ + Name: "Test with supported CIDRs", + Protocol: listeners.ProtocolTerminatedHTTPS, + ProtocolPort: 8080, + ConnLimit: &svcConf.connLimit, + DefaultTlsContainerRef: "tls-container-ref", + InsertHeaders: map[string]string{"X-Forwarded-For": "true"}, + AllowedCIDRs: svcConf.allowedCIDR, + Tags: nil, + }, + }, + { + name: "Test with Protocol forced to HTTP", + port: corev1.ServicePort{ + Protocol: "TCP", + Port: 80, + }, + svcConf: &serviceConfig{ + connLimit: 100, + lbName: "my-lb", + keepClientIP: true, + tlsContainerRef: "", + }, + expectedCreateOpt: listeners.CreateOpts{ + Name: "Test with Protocol forced to HTTP", + Protocol: listeners.ProtocolHTTP, + ProtocolPort: 80, + ConnLimit: &svcConf.connLimit, + InsertHeaders: map[string]string{"X-Forwarded-For": "true"}, + Tags: nil, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + lbaas := &LbaasV2{ + LoadBalancer{ + opts: LoadBalancerOpts{ + LBProvider: "not-ovn", + }, + lb: &gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{}, + Endpoint: "", + }, + }, + } + createOpt := lbaas.buildListenerCreateOpt(tc.port, tc.svcConf, tc.name) + assert.Equal(t, tc.expectedCreateOpt, createOpt) + + }) + } +} diff --git a/pkg/openstack/openstack.go b/pkg/openstack/openstack.go index 9f916166..aee84846 100644 --- a/pkg/openstack/openstack.go +++ b/pkg/openstack/openstack.go @@ -27,6 +27,9 @@ import ( "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunk_details" + neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/spf13/pflag" gcfg "gopkg.in/gcfg.v1" "k8s.io/apimachinery/pkg/types" @@ -34,8 +37,12 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" + "k8s.io/api/core/v1" "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" "k8s.io/cloud-provider-openstack/pkg/client" "k8s.io/cloud-provider-openstack/pkg/metrics" "k8s.io/cloud-provider-openstack/pkg/util" @@ -67,44 +74,55 @@ func AddExtraFlags(fs *pflag.FlagSet) { fs.StringArrayVar(&userAgentData, "user-agent", nil, "Extra data to add to gophercloud user-agent. Use multiple times to add more than one component.") } +type PortWithTrunkDetails struct { + neutronports.Port + trunk_details.TrunkDetailsExt +} + +type PortWithPortSecurity struct { + neutronports.Port + portsecurity.PortSecurityExt +} + // LoadBalancer is used for creating and maintaining load balancers type LoadBalancer struct { - secret *gophercloud.ServiceClient - network *gophercloud.ServiceClient - compute *gophercloud.ServiceClient - lb *gophercloud.ServiceClient - opts LoadBalancerOpts - kclient kubernetes.Interface + secret *gophercloud.ServiceClient + network *gophercloud.ServiceClient + lb *gophercloud.ServiceClient + opts LoadBalancerOpts + kclient kubernetes.Interface + eventRecorder record.EventRecorder } // LoadBalancerOpts have the options to talk to Neutron LBaaSV2 or Octavia type LoadBalancerOpts struct { - Enabled bool `gcfg:"enabled"` // if false, disables the controller - LBVersion string `gcfg:"lb-version"` // overrides autodetection. Only support v2. - SubnetID string `gcfg:"subnet-id"` // overrides autodetection. - MemberSubnetID string `gcfg:"member-subnet-id"` // overrides autodetection. - NetworkID string `gcfg:"network-id"` // If specified, will create virtual ip from a subnet in network which has available IP addresses - FloatingNetworkID string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip. - FloatingSubnetID string `gcfg:"floating-subnet-id"` // If specified, will create floating ip for loadbalancer in this particular floating pool subnetwork. - FloatingSubnet string `gcfg:"floating-subnet"` // If specified, will create floating ip for loadbalancer in one of the matching floating pool subnetworks. - FloatingSubnetTags string `gcfg:"floating-subnet-tags"` // If specified, will create floating ip for loadbalancer in one of the matching floating pool subnetworks. - LBClasses map[string]*LBClass // Predefined named Floating networks and subnets - LBMethod string `gcfg:"lb-method"` // default to ROUND_ROBIN. - LBProvider string `gcfg:"lb-provider"` - CreateMonitor bool `gcfg:"create-monitor"` - MonitorDelay util.MyDuration `gcfg:"monitor-delay"` - MonitorTimeout util.MyDuration `gcfg:"monitor-timeout"` - MonitorMaxRetries uint `gcfg:"monitor-max-retries"` - ManageSecurityGroups bool `gcfg:"manage-security-groups"` - NodeSecurityGroupIDs []string // Do not specify, get it automatically when enable manage-security-groups. TODO(FengyunPan): move it into cache - InternalLB bool `gcfg:"internal-lb"` // default false - CascadeDelete bool `gcfg:"cascade-delete"` - FlavorID string `gcfg:"flavor-id"` - AvailabilityZone string `gcfg:"availability-zone"` - EnableIngressHostname bool `gcfg:"enable-ingress-hostname"` // Used with proxy protocol by adding a dns suffix to the load balancer IP address. Default false. - IngressHostnameSuffix string `gcfg:"ingress-hostname-suffix"` // Used with proxy protocol by adding a dns suffix to the load balancer IP address. Default nip.io. - MaxSharedLB int `gcfg:"max-shared-lb"` // Number of Services in maximum can share a single load balancer. Default 2 - ContainerStore string `gcfg:"container-store"` // Used to specify the store of the tls-container-ref + Enabled bool `gcfg:"enabled"` // if false, disables the controller + LBVersion string `gcfg:"lb-version"` // overrides autodetection. Only support v2. + SubnetID string `gcfg:"subnet-id"` // overrides autodetection. + MemberSubnetID string `gcfg:"member-subnet-id"` // overrides autodetection. + NetworkID string `gcfg:"network-id"` // If specified, will create virtual ip from a subnet in network which has available IP addresses + FloatingNetworkID string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip. + FloatingSubnetID string `gcfg:"floating-subnet-id"` // If specified, will create floating ip for loadbalancer in this particular floating pool subnetwork. + FloatingSubnet string `gcfg:"floating-subnet"` // If specified, will create floating ip for loadbalancer in one of the matching floating pool subnetworks. + FloatingSubnetTags string `gcfg:"floating-subnet-tags"` // If specified, will create floating ip for loadbalancer in one of the matching floating pool subnetworks. + LBClasses map[string]*LBClass // Predefined named Floating networks and subnets + LBMethod string `gcfg:"lb-method"` // default to ROUND_ROBIN. + LBProvider string `gcfg:"lb-provider"` + CreateMonitor bool `gcfg:"create-monitor"` + MonitorDelay util.MyDuration `gcfg:"monitor-delay"` + MonitorTimeout util.MyDuration `gcfg:"monitor-timeout"` + MonitorMaxRetries uint `gcfg:"monitor-max-retries"` + MonitorMaxRetriesDown uint `gcfg:"monitor-max-retries-down"` + ManageSecurityGroups bool `gcfg:"manage-security-groups"` + InternalLB bool `gcfg:"internal-lb"` // default false + CascadeDelete bool `gcfg:"cascade-delete"` + FlavorID string `gcfg:"flavor-id"` + AvailabilityZone string `gcfg:"availability-zone"` + EnableIngressHostname bool `gcfg:"enable-ingress-hostname"` // Used with proxy protocol by adding a dns suffix to the load balancer IP address. Default false. + IngressHostnameSuffix string `gcfg:"ingress-hostname-suffix"` // Used with proxy protocol by adding a dns suffix to the load balancer IP address. Default nip.io. + MaxSharedLB int `gcfg:"max-shared-lb"` // Number of Services in maximum can share a single load balancer. Default 2 + ContainerStore string `gcfg:"container-store"` // Used to specify the store of the tls-container-ref + ProviderRequiresSerialAPICalls bool `gcfg:"provider-requires-serial-api-calls"` // default false, the provider supports the "bulk update" API call // revive:disable:var-naming TlsContainerRef string `gcfg:"default-tls-container-ref"` // reference to a tls container // revive:enable:var-naming @@ -153,6 +171,9 @@ type OpenStack struct { useV1Instances bool // TODO: v1 instance apis can be deleted after the v2 is verified enough nodeInformer coreinformers.NodeInformer nodeInformerHasSynced func() bool + + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder } // Config is used to read and store information from the cloud configuration file @@ -186,6 +207,9 @@ func init() { func (os *OpenStack) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) { clientset := clientBuilder.ClientOrDie("cloud-controller-manager") os.kclient = clientset + os.eventBroadcaster = record.NewBroadcaster() + os.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: os.kclient.CoreV1().Events("")}) + os.eventRecorder = os.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-provider-openstack"}) } // ReadConfig reads values from the cloud.conf @@ -205,12 +229,14 @@ func ReadConfig(config io.Reader) (Config, error) { cfg.LoadBalancer.MonitorDelay = util.MyDuration{Duration: 5 * time.Second} cfg.LoadBalancer.MonitorTimeout = util.MyDuration{Duration: 3 * time.Second} cfg.LoadBalancer.MonitorMaxRetries = 1 + cfg.LoadBalancer.MonitorMaxRetriesDown = 3 cfg.LoadBalancer.CascadeDelete = true cfg.LoadBalancer.EnableIngressHostname = false cfg.LoadBalancer.IngressHostnameSuffix = defaultProxyHostnameSuffix cfg.LoadBalancer.TlsContainerRef = "" cfg.LoadBalancer.ContainerStore = "barbican" cfg.LoadBalancer.MaxSharedLB = 2 + cfg.LoadBalancer.ProviderRequiresSerialAPICalls = false err := gcfg.FatalOnly(gcfg.ReadInto(&cfg, config)) if err != nil { @@ -336,12 +362,6 @@ func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { return nil, false } - compute, err := client.NewComputeV2(os.provider, os.epOpts) - if err != nil { - klog.Errorf("Failed to create an OpenStack Compute client: %v", err) - return nil, false - } - lb, err := client.NewLoadBalancerV2(os.provider, os.epOpts) if err != nil { klog.Errorf("Failed to create an OpenStack LoadBalancer client: %v", err) @@ -364,7 +384,7 @@ func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { klog.V(1).Info("Claiming to support LoadBalancer") - return &LbaasV2{LoadBalancer{secret, network, compute, lb, os.lbOpts, os.kclient}}, true + return &LbaasV2{LoadBalancer{secret, network, lb, os.lbOpts, os.kclient, os.eventRecorder}}, true } // Zones indicates that we support zones @@ -463,7 +483,7 @@ func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { return nil, false } - r, err := NewRoutes(os, network, netExts["extraroute-atomic"]) + r, err := NewRoutes(os, network, netExts["extraroute-atomic"], netExts["allowed-address-pairs"]) if err != nil { klog.Warningf("Error initialising Routes support: %v", err) return nil, false diff --git a/pkg/openstack/openstack_test.go b/pkg/openstack/openstack_test.go index f7d98438..1b436b50 100644 --- a/pkg/openstack/openstack_test.go +++ b/pkg/openstack/openstack_test.go @@ -27,8 +27,8 @@ import ( "time" "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/spf13/pflag" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -83,7 +83,7 @@ func ConfigFromEnv() Config { func TestReadConfig(t *testing.T) { _, err := ReadConfig(nil) if err == nil { - t.Errorf("Should fail when no config is provided: %s", err) + t.Errorf("Should fail when no config is provided: %v", err) } cfg, err := ReadConfig(strings.NewReader(` @@ -98,12 +98,13 @@ func TestReadConfig(t *testing.T) { create-monitor = yes monitor-delay = 1m monitor-timeout = 30s - monitor-max-retries = 3 + monitor-max-retries = 1 + monitor-max-retries-down = 3 [Metadata] search-order = configDrive, metadataService `)) if err != nil { - t.Fatalf("Should succeed when a valid config is provided: %s", err) + t.Fatalf("Should succeed when a valid config is provided: %v", err) } if cfg.Global.AuthURL != "http://auth.url" { t.Errorf("incorrect authurl: %s", cfg.Global.AuthURL) @@ -131,16 +132,19 @@ func TestReadConfig(t *testing.T) { } if !cfg.LoadBalancer.CreateMonitor { - t.Errorf("incorrect lb.createmonitor: %t", cfg.LoadBalancer.CreateMonitor) + t.Errorf("incorrect lb.create-monitor: %t", cfg.LoadBalancer.CreateMonitor) } if cfg.LoadBalancer.MonitorDelay.Duration != 1*time.Minute { - t.Errorf("incorrect lb.monitordelay: %s", cfg.LoadBalancer.MonitorDelay) + t.Errorf("incorrect lb.monitor-delay: %s", cfg.LoadBalancer.MonitorDelay) } if cfg.LoadBalancer.MonitorTimeout.Duration != 30*time.Second { - t.Errorf("incorrect lb.monitortimeout: %s", cfg.LoadBalancer.MonitorTimeout) + t.Errorf("incorrect lb.monitor-timeout: %s", cfg.LoadBalancer.MonitorTimeout) } - if cfg.LoadBalancer.MonitorMaxRetries != 3 { - t.Errorf("incorrect lb.monitormaxretries: %d", cfg.LoadBalancer.MonitorMaxRetries) + if cfg.LoadBalancer.MonitorMaxRetries != 1 { + t.Errorf("incorrect lb.monitor-max-retries: %d", cfg.LoadBalancer.MonitorMaxRetries) + } + if cfg.LoadBalancer.MonitorMaxRetriesDown != 3 { + t.Errorf("incorrect lb.monitor-max-retries-down: %d", cfg.LoadBalancer.MonitorMaxRetriesDown) } if cfg.Metadata.SearchOrder != "configDrive, metadataService" { t.Errorf("incorrect md.search-order: %v", cfg.Metadata.SearchOrder) @@ -187,13 +191,14 @@ clouds: create-monitor = yes monitor-delay = 1m monitor-timeout = 30s - monitor-max-retries = 3 + monitor-max-retries = 1 + monitor-max-retries-down = 3 [Metadata] search-order = configDrive, metadataService `)) if err != nil { - t.Fatalf("Should succeed when a valid config is provided: %s", err) + t.Fatalf("Should succeed when a valid config is provided: %v", err) } // config has priority @@ -227,7 +232,15 @@ clouds: // Make non-global sections dont get overwritten if !cfg.LoadBalancer.CreateMonitor { - t.Errorf("incorrect lb.createmonitor: %t", cfg.LoadBalancer.CreateMonitor) + t.Errorf("incorrect lb.create-monitor: %t", cfg.LoadBalancer.CreateMonitor) + } + + if cfg.LoadBalancer.MonitorMaxRetries != 1 { + t.Errorf("incorrect lb.monitor-max-retries: %d", cfg.LoadBalancer.MonitorMaxRetries) + } + + if cfg.LoadBalancer.MonitorMaxRetriesDown != 3 { + t.Errorf("incorrect lb.monitor-max-retries-down: %d", cfg.LoadBalancer.MonitorMaxRetriesDown) } } @@ -360,10 +373,10 @@ func TestNodeAddresses(t *testing.T) { PublicNetworkName: []string{"public"}, } - interfaces := []attachinterfaces.Interface{ - { - PortState: "ACTIVE", - FixedIPs: []attachinterfaces.FixedIP{ + ports := []PortWithTrunkDetails{{ + Port: neutronports.Port{ + Status: "ACTIVE", + FixedIPs: []neutronports.IP{ { IPAddress: "10.0.0.32", }, @@ -372,9 +385,10 @@ func TestNodeAddresses(t *testing.T) { }, }, }, + }, } - addrs, err := nodeAddresses(&srv, interfaces, networkingOpts) + addrs, err := nodeAddresses(&srv, ports, nil, networkingOpts) if err != nil { t.Fatalf("nodeAddresses returned error: %v", err) } @@ -439,10 +453,10 @@ func TestNodeAddressesCustomPublicNetwork(t *testing.T) { PublicNetworkName: []string{"pub-net"}, } - interfaces := []attachinterfaces.Interface{ - { - PortState: "ACTIVE", - FixedIPs: []attachinterfaces.FixedIP{ + ports := []PortWithTrunkDetails{{ + Port: neutronports.Port{ + Status: "ACTIVE", + FixedIPs: []neutronports.IP{ { IPAddress: "10.0.0.32", }, @@ -451,9 +465,10 @@ func TestNodeAddressesCustomPublicNetwork(t *testing.T) { }, }, }, + }, } - addrs, err := nodeAddresses(&srv, interfaces, networkingOpts) + addrs, err := nodeAddresses(&srv, ports, nil, networkingOpts) if err != nil { t.Fatalf("nodeAddresses returned error: %v", err) } @@ -512,10 +527,10 @@ func TestNodeAddressesCustomPublicNetworkWithIntersectingFixedIP(t *testing.T) { PublicNetworkName: []string{"pub-net"}, } - interfaces := []attachinterfaces.Interface{ - { - PortState: "ACTIVE", - FixedIPs: []attachinterfaces.FixedIP{ + ports := []PortWithTrunkDetails{{ + Port: neutronports.Port{ + Status: "ACTIVE", + FixedIPs: []neutronports.IP{ { IPAddress: "10.0.0.32", }, @@ -528,9 +543,10 @@ func TestNodeAddressesCustomPublicNetworkWithIntersectingFixedIP(t *testing.T) { }, }, }, + }, } - addrs, err := nodeAddresses(&srv, interfaces, networkingOpts) + addrs, err := nodeAddresses(&srv, ports, nil, networkingOpts) if err != nil { t.Fatalf("nodeAddresses returned error: %v", err) } @@ -600,10 +616,10 @@ func TestNodeAddressesMultipleCustomInternalNetworks(t *testing.T) { InternalNetworkName: []string{"private", "also-private"}, } - interfaces := []attachinterfaces.Interface{ - { - PortState: "ACTIVE", - FixedIPs: []attachinterfaces.FixedIP{ + ports := []PortWithTrunkDetails{{ + Port: neutronports.Port{ + Status: "ACTIVE", + FixedIPs: []neutronports.IP{ { IPAddress: "10.0.0.32", }, @@ -612,9 +628,10 @@ func TestNodeAddressesMultipleCustomInternalNetworks(t *testing.T) { }, }, }, + }, } - addrs, err := nodeAddresses(&srv, interfaces, networkingOpts) + addrs, err := nodeAddresses(&srv, ports, nil, networkingOpts) if err != nil { t.Fatalf("nodeAddresses returned error: %v", err) } @@ -684,10 +701,10 @@ func TestNodeAddressesOneInternalNetwork(t *testing.T) { InternalNetworkName: []string{"also-private"}, } - interfaces := []attachinterfaces.Interface{ - { - PortState: "ACTIVE", - FixedIPs: []attachinterfaces.FixedIP{ + ports := []PortWithTrunkDetails{{ + Port: neutronports.Port{ + Status: "ACTIVE", + FixedIPs: []neutronports.IP{ { IPAddress: "10.0.0.32", }, @@ -696,9 +713,10 @@ func TestNodeAddressesOneInternalNetwork(t *testing.T) { }, }, }, + }, } - addrs, err := nodeAddresses(&srv, interfaces, networkingOpts) + addrs, err := nodeAddresses(&srv, ports, nil, networkingOpts) if err != nil { t.Fatalf("nodeAddresses returned error: %v", err) } @@ -760,10 +778,10 @@ func TestNodeAddressesIPv6Disabled(t *testing.T) { IPv6SupportDisabled: true, } - interfaces := []attachinterfaces.Interface{ - { - PortState: "ACTIVE", - FixedIPs: []attachinterfaces.FixedIP{ + ports := []PortWithTrunkDetails{{ + Port: neutronports.Port{ + Status: "ACTIVE", + FixedIPs: []neutronports.IP{ { IPAddress: "10.0.0.32", }, @@ -772,9 +790,10 @@ func TestNodeAddressesIPv6Disabled(t *testing.T) { }, }, }, + }, } - addrs, err := nodeAddresses(&srv, interfaces, networkingOpts) + addrs, err := nodeAddresses(&srv, ports, nil, networkingOpts) if err != nil { t.Fatalf("nodeAddresses returned error: %v", err) } @@ -841,10 +860,10 @@ func TestNodeAddressesWithAddressSortOrderOptions(t *testing.T) { AddressSortOrder: "10.0.0.0/8, 50.56.176.0/24, 2001:4800::/32", } - interfaces := []attachinterfaces.Interface{ - { - PortState: "ACTIVE", - FixedIPs: []attachinterfaces.FixedIP{ + ports := []PortWithTrunkDetails{{ + Port: neutronports.Port{ + Status: "ACTIVE", + FixedIPs: []neutronports.IP{ { IPAddress: "10.0.0.32", }, @@ -853,9 +872,10 @@ func TestNodeAddressesWithAddressSortOrderOptions(t *testing.T) { }, }, }, + }, } - addrs, err := nodeAddresses(&srv, interfaces, networkingOpts) + addrs, err := nodeAddresses(&srv, ports, nil, networkingOpts) if err != nil { t.Fatalf("nodeAddresses returned error: %v", err) } @@ -884,7 +904,7 @@ func TestNewOpenStack(t *testing.T) { _, err := NewOpenStack(cfg) if err != nil { - t.Fatalf("Failed to construct/authenticate OpenStack: %s", err) + t.Fatalf("Failed to construct/authenticate OpenStack: %v", err) } } @@ -900,7 +920,7 @@ func TestLoadBalancer(t *testing.T) { os, err := NewOpenStack(cfg) if err != nil { - t.Fatalf("Failed to construct/authenticate OpenStack: %s", err) + t.Fatalf("Failed to construct/authenticate OpenStack: %v", err) } lb, ok := os.LoadBalancer() @@ -910,7 +930,7 @@ func TestLoadBalancer(t *testing.T) { _, exists, err := lb.GetLoadBalancer(context.TODO(), testClusterName, &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "noexist"}}) if err != nil { - t.Fatalf("GetLoadBalancer(\"noexist\") returned error: %s", err) + t.Fatalf("GetLoadBalancer(\"noexist\") returned error: %v", err) } if exists { t.Fatalf("GetLoadBalancer(\"noexist\") returned exists") @@ -945,7 +965,7 @@ func TestZones(t *testing.T) { zone, err := z.GetZone(context.TODO()) if err != nil { - t.Fatalf("GetZone() returned error: %s", err) + t.Fatalf("GetZone() returned error: %v", err) } if zone.Region != "myRegion" { diff --git a/pkg/openstack/routes.go b/pkg/openstack/routes.go index 0be2c227..59cb5ba8 100644 --- a/pkg/openstack/routes.go +++ b/pkg/openstack/routes.go @@ -18,6 +18,7 @@ package openstack import ( "context" + openstackutil "k8s.io/cloud-provider-openstack/pkg/util/openstack" "net" "sync" @@ -43,6 +44,8 @@ type Routes struct { networkIDs []string // whether Neutron supports "extraroute-atomic" extension atomicRoutes bool + // whether Neutron supports "allowed-address-pairs" extension + allowedAddressPairs bool // Neutron with no "extraroute-atomic" extension can modify only one route at // once sync.Mutex @@ -51,15 +54,16 @@ type Routes struct { var _ cloudprovider.Routes = &Routes{} // NewRoutes creates a new instance of Routes -func NewRoutes(os *OpenStack, network *gophercloud.ServiceClient, atomicRoutes bool) (cloudprovider.Routes, error) { +func NewRoutes(os *OpenStack, network *gophercloud.ServiceClient, atomicRoutes bool, allowedAddressPairs bool) (cloudprovider.Routes, error) { if os.routeOpts.RouterID == "" { return nil, errors.ErrNoRouterID } return &Routes{ - network: network, - os: os, - atomicRoutes: atomicRoutes, + network: network, + os: os, + atomicRoutes: atomicRoutes, + allowedAddressPairs: allowedAddressPairs, }, nil } @@ -82,7 +86,7 @@ func (r *Routes) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr return nil, err } - var routes []*cloudprovider.Route + routes := make([]*cloudprovider.Route, 0, len(router.Routes)) for _, item := range router.Routes { nodeName, foundNode := getNodeNameByAddr(item.NextHop, nodes) route := cloudprovider.Route{ @@ -239,7 +243,7 @@ func removeRoute(network *gophercloud.ServiceClient, routerID string, oldRoute [ return unwinder, nil } -func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *ports.Port, newPairs []ports.AddressPair) (func(), error) { +func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *PortWithPortSecurity, newPairs []ports.AddressPair) (func(), error) { origPairs := port.AllowedAddressPairs // shallow copy mc := metrics.NewMetricContext("port", "update") @@ -329,11 +333,22 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s defer onFailure.call(unwind) } + if !r.allowedAddressPairs { + klog.V(4).Infof("Route created (skipping the allowed_address_pairs update): %v", route) + onFailure.disarm() + return nil + } + // get the port of addr on target node. port, err := getPortByIP(r.network, addr, r.networkIDs) if err != nil { return err } + if !port.PortSecurityEnabled { + klog.Warningf("Skipping allowed_address_pair for port: %s", port.ID) + onFailure.disarm() + return nil + } found := false for _, item := range port.AllowedAddressPairs { @@ -421,6 +436,9 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo } else { // atomic route update blackhole := route.Blackhole + if blackhole { + addr = string(route.TargetNode) + } route := []routers.Route{{ DestinationCIDR: route.DestinationCIDR, NextHop: addr, @@ -434,11 +452,22 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo defer onFailure.call(unwind) } + if !r.allowedAddressPairs { + klog.V(4).Infof("Route deleted (skipping the allowed_address_pairs update): %v", route) + onFailure.disarm() + return nil + } + // get the port of addr on target node. port, err := getPortByIP(r.network, addr, r.networkIDs) if err != nil { return err } + if !port.PortSecurityEnabled { + klog.Warningf("Skipping allowed_address_pair for port: %s", port.ID) + onFailure.disarm() + return nil + } addrPairs := port.AllowedAddressPairs index := -1 @@ -466,7 +495,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo return nil } -func getPortByIP(network *gophercloud.ServiceClient, addr string, networkIDs []string) (*ports.Port, error) { +func getPortByIP(network *gophercloud.ServiceClient, addr string, networkIDs []string) (*PortWithPortSecurity, error) { for _, networkID := range networkIDs { opts := ports.ListOpts{ FixedIPs: []ports.FixedIPOpts{ @@ -476,12 +505,7 @@ func getPortByIP(network *gophercloud.ServiceClient, addr string, networkIDs []s }, NetworkID: networkID, } - mc := metrics.NewMetricContext("port", "list") - pages, err := ports.List(network, opts).AllPages() - if mc.ObserveRequest(err) != nil { - return nil, err - } - ports, err := ports.ExtractPorts(pages) + ports, err := openstackutil.GetPorts[PortWithPortSecurity](network, opts) if err != nil { return nil, err } diff --git a/pkg/openstack/routes_test.go b/pkg/openstack/routes_test.go index 5ecd553e..350b7442 100644 --- a/pkg/openstack/routes_test.go +++ b/pkg/openstack/routes_test.go @@ -38,7 +38,7 @@ func TestRoutes(t *testing.T) { os, err := NewOpenStack(cfg) if err != nil { - t.Fatalf("Failed to construct/authenticate OpenStack: %s", err) + t.Fatalf("Failed to construct/authenticate OpenStack: %v", err) } vms := getServers(os) diff --git a/pkg/util/blockdevice/blockdevice_linux.go b/pkg/util/blockdevice/blockdevice_linux.go index 1ee683bb..738bc09d 100644 --- a/pkg/util/blockdevice/blockdevice_linux.go +++ b/pkg/util/blockdevice/blockdevice_linux.go @@ -49,7 +49,7 @@ func IsBlockDevice(path string) (bool, error) { var stat unix.Stat_t err := unix.Stat(path, &stat) if err != nil { - return false, fmt.Errorf("failed to stat() %q: %s", path, err) + return false, fmt.Errorf("failed to stat() %q: %v", path, err) } return (stat.Mode & unix.S_IFMT) == unix.S_IFBLK, nil @@ -64,7 +64,7 @@ func GetBlockDeviceSize(path string) (int64, error) { defer fd.Close() pos, err := fd.Seek(0, io.SeekEnd) if err != nil { - return 0, fmt.Errorf("error seeking to end of %s: %s", path, err) + return 0, fmt.Errorf("error seeking to end of %s: %v", path, err) } return pos, nil } diff --git a/pkg/util/errors/errors.go b/pkg/util/errors/errors.go index e55af59a..8be1a6e9 100644 --- a/pkg/util/errors/errors.go +++ b/pkg/util/errors/errors.go @@ -77,3 +77,17 @@ func IsInvalidError(err error) bool { return false } + +func IsConflictError(err error) bool { + if _, ok := err.(gophercloud.ErrDefault409); ok { + return true + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == http.StatusConflict { + return true + } + } + + return false +} diff --git a/pkg/util/metadata/metadata.go b/pkg/util/metadata/metadata.go index 7068e974..f78fbc64 100644 --- a/pkg/util/metadata/metadata.go +++ b/pkg/util/metadata/metadata.go @@ -189,11 +189,17 @@ func getFromConfigDrive(metadataVersion string) (*Metadata, error) { return parseMetadata(f) } +func noProxyHTTPClient() *http.Client { + noProxyTransport := http.DefaultTransport.(*http.Transport).Clone() + noProxyTransport.Proxy = nil + return &http.Client{Transport: noProxyTransport} +} + func getFromMetadataService(metadataVersion string) (*Metadata, error) { // Try to get JSON from metadata server. metadataURL := getMetadataURL(metadataVersion) - klog.V(4).Infof("Attempting to fetch metadata from %s", metadataURL) - resp, err := http.Get(metadataURL) + klog.V(4).Infof("Attempting to fetch metadata from %s, ignoring proxy settings", metadataURL) + resp, err := noProxyHTTPClient().Get(metadataURL) if err != nil { return nil, fmt.Errorf("error fetching %s: %v", metadataURL, err) } diff --git a/pkg/util/metadata/metadata_test.go b/pkg/util/metadata/metadata_test.go index 29cf362b..63595237 100644 --- a/pkg/util/metadata/metadata_test.go +++ b/pkg/util/metadata/metadata_test.go @@ -18,6 +18,9 @@ package metadata import ( "fmt" + "net/http" + "net/http/httptest" + "os" "strings" "testing" ) @@ -31,7 +34,7 @@ var FakeMetadata = Metadata{ func TestParseMetadata(t *testing.T) { _, err := parseMetadata(strings.NewReader("bogus")) if err == nil { - t.Errorf("Should fail when bad data is provided: %s", err) + t.Errorf("Should fail when bad data is provided: %v", err) } data := strings.NewReader(` @@ -71,7 +74,7 @@ func TestParseMetadata(t *testing.T) { `) md, err := parseMetadata(data) if err != nil { - t.Fatalf("Should succeed when provided with valid data: %s", err) + t.Fatalf("Should succeed when provided with valid data: %v", err) } if md.Name != "test" { @@ -148,3 +151,27 @@ func TestCheckMetaDataOpts(t *testing.T) { } } } + +func TestGetFromMetadataService(t *testing.T) { + t.Run("ignores HTTP_PROXY", func(t *testing.T) { + // Here I spin up an HTTP server, set it as HTTP_PROXY, and + // assert that the request to the Metadata server doesn't hit + // it. + fakeProxy := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { + t.Errorf("the call to Metadata hit the proxy server") + })) + defer fakeProxy.Close() + + // defer resetting HTTP_PROXY to whatever it was before this test + defer func(originalValue string, wasSet bool) { + if wasSet { + os.Setenv("HTTP_PROXY", originalValue) + } else { + os.Unsetenv("HTTP_PROXY") + } + }(os.LookupEnv("HTTP_PROXY")) + + os.Setenv("HTTP_PROXY", fakeProxy.URL) + _, _ = getFromMetadataService("") + }) +} diff --git a/pkg/util/openstack/keymanager.go b/pkg/util/openstack/keymanager.go index ab3ef928..ce35ffd0 100644 --- a/pkg/util/openstack/keymanager.go +++ b/pkg/util/openstack/keymanager.go @@ -86,11 +86,11 @@ func CreateSecret(client *gophercloud.ServiceClient, name string, secretType str return secret.SecretRef, nil } -// ParseSecretID return secret ID from serectRef +// ParseSecretID return secret ID from secretRef func ParseSecretID(ref string) (string, error) { parts := strings.Split(ref, "/") if len(parts) < 2 { - return "", fmt.Errorf("Could not parse %s", ref) + return "", fmt.Errorf("could not parse %s", ref) } return parts[len(parts)-1], nil @@ -119,7 +119,7 @@ func DeleteSecrets(client *gophercloud.ServiceClient, partName string) error { } mc := metrics.NewMetricContext("secret", "delete") err = secrets.Delete(client, secretID).ExtractErr() - if mc.ObserveRequest(err) != nil { + if mc.ObserveRequest(err) != nil && !cpoerrors.IsNotFound(err) { return err } } diff --git a/pkg/util/openstack/loadbalancer.go b/pkg/util/openstack/loadbalancer.go index 0e12e002..9cdebd17 100644 --- a/pkg/util/openstack/loadbalancer.go +++ b/pkg/util/openstack/loadbalancer.go @@ -18,6 +18,8 @@ package openstack import ( "fmt" + "os" + "strconv" "time" "github.com/gophercloud/gophercloud" @@ -46,7 +48,7 @@ const ( waitLoadbalancerInitDelay = 1 * time.Second waitLoadbalancerFactor = 1.2 - waitLoadbalancerActiveSteps = 19 + waitLoadbalancerActiveSteps = 23 waitLoadbalancerDeleteSteps = 12 activeStatus = "ACTIVE" @@ -55,16 +57,6 @@ const ( var ( octaviaVersion string - - // ErrNotFound is used to inform that the object is missing. - // Deprecated: use cpoerrors.ErrNotFound instead. - // TODO: remove in v1.27.0. - ErrNotFound = cpoerrors.ErrNotFound - - // ErrMultipleResults is used when we unexpectedly get back multiple results. - // Deprecated: use cpoerrors.ErrMultipleResults instead. - // TODO: remove in v1.27.0. - ErrMultipleResults = cpoerrors.ErrMultipleResults ) // getOctaviaVersion returns the current Octavia API version. @@ -159,13 +151,24 @@ func IsOctaviaFeatureSupported(client *gophercloud.ServiceClient, feature int, l return false } +func getTimeoutSteps(name string, steps int) int { + if v := os.Getenv(name); v != "" { + s, err := strconv.Atoi(v) + if err == nil && s >= 0 { + return s + } + } + return steps +} + // WaitActiveAndGetLoadBalancer wait for LB active then return the LB object for further usage func WaitActiveAndGetLoadBalancer(client *gophercloud.ServiceClient, loadbalancerID string) (*loadbalancers.LoadBalancer, error) { klog.InfoS("Waiting for load balancer ACTIVE", "lbID", loadbalancerID) + steps := getTimeoutSteps("OCCM_WAIT_LB_ACTIVE_STEPS", waitLoadbalancerActiveSteps) backoff := wait.Backoff{ Duration: waitLoadbalancerInitDelay, Factor: waitLoadbalancerFactor, - Steps: waitLoadbalancerActiveSteps, + Steps: steps, } var loadbalancer *loadbalancers.LoadBalancer @@ -174,13 +177,14 @@ func WaitActiveAndGetLoadBalancer(client *gophercloud.ServiceClient, loadbalance var err error loadbalancer, err = loadbalancers.Get(client, loadbalancerID).Extract() if mc.ObserveRequest(err) != nil { - return false, err + klog.Warningf("Failed to fetch loadbalancer status from OpenStack (lbID %q): %s", loadbalancerID, err) + return false, nil } if loadbalancer.ProvisioningStatus == activeStatus { klog.InfoS("Load balancer ACTIVE", "lbID", loadbalancerID) return true, nil } else if loadbalancer.ProvisioningStatus == errorStatus { - return true, fmt.Errorf("loadbalancer has gone into ERROR state") + return true, fmt.Errorf("loadbalancer %s has gone into ERROR state", loadbalancerID) } else { return false, nil } @@ -501,7 +505,7 @@ func GetPoolByListener(client *gophercloud.ServiceClient, lbID, listenerID strin return &listenerPools[0], nil } -// GetPools retrives the pools belong to the loadbalancer. +// GetPools retrieves the pools belong to the loadbalancer. func GetPools(client *gophercloud.ServiceClient, lbID string) ([]pools.Pool, error) { var lbPools []pools.Pool @@ -655,13 +659,17 @@ func CreateL7Rule(client *gophercloud.ServiceClient, policyID string, opts l7pol } // UpdateHealthMonitor updates a health monitor. -func UpdateHealthMonitor(client *gophercloud.ServiceClient, monitorID string, opts monitors.UpdateOpts) error { +func UpdateHealthMonitor(client *gophercloud.ServiceClient, monitorID string, opts monitors.UpdateOpts, lbID string) error { mc := metrics.NewMetricContext("loadbalancer_healthmonitor", "update") _, err := monitors.Update(client, monitorID, opts).Extract() if mc.ObserveRequest(err) != nil { return fmt.Errorf("failed to update healthmonitor: %v", err) } + if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + return fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating healthmonitor: %v", lbID, err) + } + return nil } diff --git a/pkg/util/openstack/loadbalancer_serial.go b/pkg/util/openstack/loadbalancer_serial.go new file mode 100644 index 00000000..a49f2a73 --- /dev/null +++ b/pkg/util/openstack/loadbalancer_serial.go @@ -0,0 +1,99 @@ +package openstack + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" + apiv1 "k8s.io/api/core/v1" + klog "k8s.io/klog/v2" + + cpoutil "k8s.io/cloud-provider-openstack/pkg/util" + cpoerrors "k8s.io/cloud-provider-openstack/pkg/util/errors" +) + +func memberExists(members []pools.Member, addr string, port int) bool { + for _, member := range members { + if member.Address == addr && member.ProtocolPort == port { + return true + } + } + + return false +} + +func popMember(members []pools.Member, addr string, port int) []pools.Member { + for i, member := range members { + if member.Address == addr && member.ProtocolPort == port { + members[i] = members[len(members)-1] + members = members[:len(members)-1] + } + } + + return members +} + +func getNodeAddressForLB(node *apiv1.Node) (string, error) { + addrs := node.Status.Addresses + if len(addrs) == 0 { + return "", fmt.Errorf("no address found for host") + } + + for _, addr := range addrs { + if addr.Type == apiv1.NodeInternalIP { + return addr.Address, nil + } + } + + return addrs[0].Address, nil +} + +func SeriallyReconcilePoolMembers(client *gophercloud.ServiceClient, pool *pools.Pool, nodePort int, lbID string, nodes []*apiv1.Node) error { + + members, err := GetMembersbyPool(client, pool.ID) + if err != nil && !cpoerrors.IsNotFound(err) { + return fmt.Errorf("error getting pool members %s: %v", pool.ID, err) + } + + for _, node := range nodes { + addr, err := getNodeAddressForLB(node) + if err != nil { + if err == cpoerrors.ErrNotFound { + // Node failure, do not create member + klog.Warningf("Failed to create LB pool member for node %s: %v", node.Name, err) + continue + } else { + return fmt.Errorf("error getting address for node %s: %v", node.Name, err) + } + } + if !memberExists(members, addr, nodePort) { + klog.V(2).Infof("Creating member for pool %s", pool.ID) + _, err := pools.CreateMember(client, pool.ID, pools.CreateMemberOpts{ + Name: cpoutil.CutString255(fmt.Sprintf("member_%s_%s_%d", node.Name, addr, nodePort)), + ProtocolPort: nodePort, + Address: addr, + }).Extract() + if err != nil { + return fmt.Errorf("error creating LB pool member for node: %s, %v", node.Name, err) + } + if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + return err + } + } else { + // After all members have been processed, remaining members are deleted as obsolete. + members = popMember(members, addr, nodePort) + } + klog.V(2).Infof("Ensured pool %s has member for %s at %s", pool.ID, node.Name, addr) + } + for _, member := range members { + klog.V(2).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) + err := pools.DeleteMember(client, pool.ID, member.ID).ExtractErr() + if err != nil && !cpoerrors.IsNotFound(err) { + return fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) + } + if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + return err + } + } + return nil +} diff --git a/pkg/util/openstack/network.go b/pkg/util/openstack/network.go index 0a811a2a..889086f4 100644 --- a/pkg/util/openstack/network.go +++ b/pkg/util/openstack/network.go @@ -140,15 +140,16 @@ func getSubnet(networkSubnet string, subnetList []subnets.Subnet) *subnets.Subne } // GetPorts gets all the filtered ports. -func GetPorts(client *gophercloud.ServiceClient, listOpts neutronports.ListOpts) ([]neutronports.Port, error) { +func GetPorts[PortType interface{}](client *gophercloud.ServiceClient, listOpts neutronports.ListOpts) ([]PortType, error) { mc := metrics.NewMetricContext("port", "list") allPages, err := neutronports.List(client, listOpts).AllPages() if mc.ObserveRequest(err) != nil { - return []neutronports.Port{}, err + return []PortType{}, err } - allPorts, err := neutronports.ExtractPorts(allPages) + var allPorts []PortType + err = neutronports.ExtractPortsInto(allPages, &allPorts) if err != nil { - return []neutronports.Port{}, err + return []PortType{}, err } return allPorts, nil diff --git a/pkg/util/openstack/security_group.go b/pkg/util/openstack/security_group.go new file mode 100644 index 00000000..0e31eb9e --- /dev/null +++ b/pkg/util/openstack/security_group.go @@ -0,0 +1,32 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + "k8s.io/cloud-provider-openstack/pkg/metrics" +) + +func GetSecurityGroupRules(client *gophercloud.ServiceClient, opts rules.ListOpts) ([]rules.SecGroupRule, error) { + mc := metrics.NewMetricContext("security_group_rule", "list") + page, err := rules.List(client, opts).AllPages() + if mc.ObserveRequest(err) != nil { + return nil, err + } + return rules.ExtractRules(page) +} diff --git a/pkg/util/util.go b/pkg/util/util.go index 4ec3c995..40e05441 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -6,14 +6,30 @@ import ( "fmt" "time" + "github.com/container-storage-interface/spec/lib/go/csi" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" ) +// CutString255 makes sure the string length doesn't exceed 255, which is usually the maximum string length in OpenStack. +func CutString255(original string) string { + ret := original + if len(original) > 255 { + ret = original[:255] + } + return ret +} + +// Sprintf255 formats according to a format specifier and returns the resulting string with a maximum length of 255 characters. +func Sprintf255(format string, args ...interface{}) string { + return CutString255(fmt.Sprintf(format, args...)) +} + // MyDuration is the encoding.TextUnmarshaler interface for time.Duration type MyDuration struct { time.Duration @@ -78,25 +94,50 @@ func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { func PatchService(ctx context.Context, client clientset.Interface, cur, mod *v1.Service) error { curJSON, err := json.Marshal(cur) if err != nil { - return fmt.Errorf("failed to serialize current service object: %s", err) + return fmt.Errorf("failed to serialize current service object: %v", err) } modJSON, err := json.Marshal(mod) if err != nil { - return fmt.Errorf("failed to serialize modified service object: %s", err) + return fmt.Errorf("failed to serialize modified service object: %v", err) } patch, err := strategicpatch.CreateTwoWayMergePatch(curJSON, modJSON, v1.Service{}) if err != nil { - return fmt.Errorf("failed to create 2-way merge patch: %s", err) + return fmt.Errorf("failed to create 2-way merge patch: %v", err) } if len(patch) == 0 || string(patch) == "{}" { return nil } _, err = client.CoreV1().Services(cur.Namespace).Patch(ctx, cur.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) if err != nil { - return fmt.Errorf("failed to patch service object %s/%s: %s", cur.Namespace, cur.Name, err) + return fmt.Errorf("failed to patch service object %s/%s: %v", cur.Namespace, cur.Name, err) } return nil } + +func GetAZFromTopology(topologyKey string, requirement *csi.TopologyRequirement) string { + var zone string + var exists bool + + defer func() { klog.V(1).Infof("detected AZ from the topology: %s", zone) }() + klog.V(4).Infof("preferred topology requirement: %+v", requirement.GetPreferred()) + klog.V(4).Infof("requisite topology requirement: %+v", requirement.GetRequisite()) + + for _, topology := range requirement.GetPreferred() { + zone, exists = topology.GetSegments()[topologyKey] + if exists { + return zone + } + } + + for _, topology := range requirement.GetRequisite() { + zone, exists = topology.GetSegments()[topologyKey] + if exists { + return zone + } + } + + return zone +} diff --git a/release-procedure.md b/release-procedure.md index 6e851847..febf267d 100644 --- a/release-procedure.md +++ b/release-procedure.md @@ -1,32 +1,42 @@ # Release Procedure -Cloud Provider OpenStack Release is done in sync with kubernetes/kubernetes, Minor versions can be released intermittently for critical bug fixes. - -## Before Release - -Update cloud-provider-openstack to kubernetes/kubernetes latest release. Make Sure all CI check passed. +The Cloud Provider OpenStack Release is done in sync with +kubernetes/kubernetes. Minor versions can be released intermittently for +critical bug fixes. ## Making a Release -1. Checkout the release branch +1. Checkout the release branch. -``` +```bash $ git fetch upstream -$ git checkout -b release-X.Y upstream/release-X.Y +$ git checkout -b my-release upstream/release-X.Y ``` -2. Update manifests with new release images, create a PR against release branch to update. +2. Update the minor version with the expected version. -3. Make tag and push to upstream repo. +Make changes in the `docs/manifests/tests/examples` directories using the +`hack/bump_release.sh` script by running the following command: +```bash +$ hack/bump-release.sh 28 29 0 ``` -$ git tag vX.Y.Z + +This will replace `1.28.x`/`2.28.x` with `1.29.0`/`2.29.0` strings in the +`docs/manifests/tests/examples` directories. Ensure that you double-check the +diff before committing the changes. Non-related changes must not be shipped. + +3. Create a new pull request (PR) and make sure all CI checks have passed. + +4. Once the PR is merged, make a tag and push it to the upstream repository. + +```bash +$ git checkout -b release-X.Y upstream/release-X.Y +$ git pull upstream release-X.Y --tags +$ git tag -m "Release for cloud-provider-openstack to support Kubernetes release x" vX.Y.Z $ git push upstream vX.Y.Z ``` -4. [Github Actions](https://github.com/kubernetes/cloud-provider-openstack/actions/workflows/release-cpo.yaml) will make [new draft release](https://github.com/kubernetes/cloud-provider-openstack/releases) to repository. -Cloudbuild should build new images to gcr.io/k8s-staging-provider-os. - -5. Make PR https://github.com/kubernetes/k8s.io/blob/main/k8s.gcr.io/images/k8s-staging-provider-os/images.yaml to promote gcr.io images to registry.k8s.io. +5. [Github Actions](https://github.com/kubernetes/cloud-provider-openstack/actions/workflows/release-cpo.yaml) will create new [Docker images](https://console.cloud.google.com/gcr/images/k8s-staging-provider-os) and generate a [new draft release](https://github.com/kubernetes/cloud-provider-openstack/releases) in the repository. -6. Make release notes and publish the release after the new images are published. +6. Create release notes using the "Generate release notes" button in the GitHub "New release" UI and publish the release. diff --git a/tests/ci-csi-cinder-e2e.sh b/tests/ci-csi-cinder-e2e.sh index c1d40b9d..c3b6469c 100755 --- a/tests/ci-csi-cinder-e2e.sh +++ b/tests/ci-csi-cinder-e2e.sh @@ -29,10 +29,7 @@ set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. cd "${REPO_ROOT}" || exit 1 -# PULL_NUMBER and PULL_BASE_REF are Prow job environment variables -PR_NUMBER="${PULL_NUMBER:-}" -[[ -z $PR_NUMBER ]] && echo "PR_NUMBER is required" && exit 1 -PR_BRANCH="${PULL_BASE_REF:-master}" +GOPATH=${PWD%/*/*/*} # /home/prow/go will be 3 directories up from where code is CONFIG_ANSIBLE="${CONFIG_ANSIBLE:-"true"}" RESOURCE_TYPE="${RESOURCE_TYPE:-"gce-project"}" ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" @@ -44,7 +41,8 @@ cleanup() { } trap cleanup EXIT -python3 -m pip install requests ansible +apt-get update +apt-get install -y python3-requests ansible # If BOSKOS_HOST is set then acquire a resource of type ${RESOURCE_TYPE} from Boskos. if [ -n "${BOSKOS_HOST:-}" ]; then @@ -97,6 +95,11 @@ stdout_callback = debug EOF fi +# Upload CPO code +scp -i ~/.ssh/google_compute_engine \ + -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + -r ${GOPATH}/* ${USERNAME}@${PUBLIC_IP}:/root/ + # Run ansible playbook on the CI host, e.g. a VM in GCP # USERNAME and PUBLIC_IP are global env variables set after creating the CI host. ansible-playbook -v \ @@ -104,9 +107,7 @@ ansible-playbook -v \ --private-key ~/.ssh/google_compute_engine \ --inventory ${PUBLIC_IP}, \ --ssh-common-args "-o StrictHostKeyChecking=no" \ - tests/playbooks/test-csi-cinder-e2e.yaml \ - -e github_pr=${PR_NUMBER} \ - -e github_pr_branch=${PR_BRANCH} + tests/playbooks/test-csi-cinder-e2e.yaml exit_code=$? # Fetch cinder-csi tests logs for debugging purpose diff --git a/tests/ci-csi-manila-e2e.sh b/tests/ci-csi-manila-e2e.sh index dd9f146f..63bf7647 100755 --- a/tests/ci-csi-manila-e2e.sh +++ b/tests/ci-csi-manila-e2e.sh @@ -29,10 +29,7 @@ set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. cd "${REPO_ROOT}" || exit 1 -# PULL_NUMBER and PULL_BASE_REF are Prow job environment variables -PR_NUMBER="${PULL_NUMBER:-}" -[[ -z $PR_NUMBER ]] && echo "PR_NUMBER is required" && exit 1 -PR_BRANCH="${PULL_BASE_REF:-master}" +GOPATH=${PWD%/*/*/*} # /home/prow/go will be 3 directories up from where code is CONFIG_ANSIBLE="${CONFIG_ANSIBLE:-"true"}" RESOURCE_TYPE="${RESOURCE_TYPE:-"gce-project"}" ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" @@ -44,7 +41,8 @@ cleanup() { } trap cleanup EXIT -python3 -m pip install requests ansible +apt-get update +apt-get install -y python3-requests ansible # If BOSKOS_HOST is set then acquire a resource of type ${RESOURCE_TYPE} from Boskos. if [ -n "${BOSKOS_HOST:-}" ]; then @@ -97,6 +95,11 @@ stdout_callback = debug EOF fi +# Upload CPO code +scp -i ~/.ssh/google_compute_engine \ + -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + -r ${GOPATH}/* ${USERNAME}@${PUBLIC_IP}:/root/ + # Run ansible playbook on the CI host, e.g. a VM in GCP # USERNAME and PUBLIC_IP are global env variables set after creating the CI host. ansible-playbook -v \ @@ -104,9 +107,7 @@ ansible-playbook -v \ --private-key ~/.ssh/google_compute_engine \ --inventory ${PUBLIC_IP}, \ --ssh-common-args "-o StrictHostKeyChecking=no" \ - tests/playbooks/test-csi-manila-e2e.yaml \ - -e github_pr=${PR_NUMBER} \ - -e github_pr_branch=${PR_BRANCH} + tests/playbooks/test-csi-manila-e2e.yaml exit_code=$? # Fetch manila-csi tests results diff --git a/tests/ci-occm-e2e.sh b/tests/ci-occm-e2e.sh index f03f618b..71a6a9f8 100755 --- a/tests/ci-occm-e2e.sh +++ b/tests/ci-occm-e2e.sh @@ -29,13 +29,11 @@ set -o pipefail REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. cd "${REPO_ROOT}" || exit 1 -# PULL_NUMBER and PULL_BASE_REF are Prow job environment variables -PR_NUMBER="${PULL_NUMBER:-}" -[[ -z $PR_NUMBER ]] && echo "PR_NUMBER is required" && exit 1 -PR_BRANCH="${PULL_BASE_REF:-master}" +GOPATH=${PWD%/*/*/*} # /home/prow/go will be 3 directories up from where code is CONFIG_ANSIBLE="${CONFIG_ANSIBLE:-"true"}" RESOURCE_TYPE="${RESOURCE_TYPE:-"gce-project"}" ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" +OCTAVIA_PROVIDER="${OCTAVIA_PROVIDER:-""}" mkdir -p "${ARTIFACTS}/logs" cleanup() { @@ -44,7 +42,8 @@ cleanup() { } trap cleanup EXIT -python3 -m pip install requests ansible +apt-get update +apt-get install -y python3-requests ansible # If BOSKOS_HOST is set then acquire a resource of type ${RESOURCE_TYPE} from Boskos. if [ -n "${BOSKOS_HOST:-}" ]; then @@ -97,6 +96,11 @@ stdout_callback = debug EOF fi +# Upload CPO code +scp -i ~/.ssh/google_compute_engine \ + -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + -r ${GOPATH}/* ${USERNAME}@${PUBLIC_IP}:/root/ + # Run ansible playbook on the CI host, e.g. a VM in GCP # USERNAME and PUBLIC_IP are global env variables set after creating the CI host. ansible-playbook -v \ @@ -105,14 +109,14 @@ ansible-playbook -v \ --inventory ${PUBLIC_IP}, \ --ssh-common-args "-o StrictHostKeyChecking=no" \ tests/playbooks/test-occm-e2e.yaml \ - -e github_pr=${PR_NUMBER} \ - -e github_pr_branch=${PR_BRANCH} + -e octavia_provider=${OCTAVIA_PROVIDER} \ + -e run_e2e=true exit_code=$? # Fetch devstack logs for debugging purpose -#scp -i ~/.ssh/google_compute_engine \ -# -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ -# -r ${USERNAME}@${PUBLIC_IP}:/opt/stack/logs $ARTIFACTS/logs/devstack || true +# scp -i ~/.ssh/google_compute_engine \ +# -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ +# -r ${USERNAME}@${PUBLIC_IP}:/opt/stack/logs $ARTIFACTS/logs/devstack || true # Fetch octavia amphora image build logs for debugging purpose scp -i ~/.ssh/google_compute_engine \ diff --git a/tests/e2e/cloudprovider/test-lb-service.sh b/tests/e2e/cloudprovider/test-lb-service.sh index c168aa2d..3a7a8dc9 100755 --- a/tests/e2e/cloudprovider/test-lb-service.sh +++ b/tests/e2e/cloudprovider/test-lb-service.sh @@ -18,14 +18,15 @@ CLUSTER_TENANT=${CLUSTER_TENANT:-"demo"} CLUSTER_USER=${CLUSTER_USER:-"demo"} LB_SUBNET_NAME=${LB_SUBNET_NAME:-"private-subnet"} AUTO_CLEAN_UP=${AUTO_CLEAN_UP:-"true"} +OCTAVIA_PROVIDER=${OCTAVIA_PROVIDER:-""} function delete_resources() { + ERROR_CODE="$?" + if [[ ${AUTO_CLEAN_UP} != "true" ]]; then exit ${ERROR_CODE} fi - ERROR_CODE="$?" - printf "\n>>>>>>> Deleting k8s services\n" kubectl -n ${NAMESPACE} get svc -o name | xargs -r kubectl -n $NAMESPACE delete printf "\n>>>>>>> Deleting k8s deployments\n" @@ -34,6 +35,12 @@ function delete_resources() { printf "\n>>>>>>> Deleting openstack load balancer \n" openstack loadbalancer delete test_shared_user_lb --cascade + printf "\n>>>>>>> Deleting openstack FIPs \n" + fips=$(openstack floating ip list --tag occm-test -f value -c ID) + for fip in $fips; do + openstack floating ip delete ${fip} + done + if [[ "$ERROR_CODE" != "0" ]]; then printf "\n>>>>>>> Dump openstack-cloud-controller-manager logs \n" pod_name=$(kubectl -n kube-system get pod -l k8s-app=openstack-cloud-controller-manager -o name | awk 'NR==1 {print}') @@ -284,6 +291,11 @@ function test_forwarded { local public_ip=$(curl -sS ifconfig.me) local local_ip=$(ip route get 8.8.8.8 | head -1 | awk '{print $7}') + if [[ ${OCTAVIA_PROVIDER} == "ovn" ]]; then + printf "\n>>>>>>> Skipping Service ${service} test for OVN provider\n" + return 0 + fi + printf "\n>>>>>>> Create the Service ${service}\n" cat <>>>>>> Creating openstack load balancer: --vip-subnet-id $subid \n" - lbID=$(openstack loadbalancer create --vip-subnet-id $subid --name test_shared_user_lb -f value -c id) + provider_option="" + if [[ ${OCTAVIA_PROVIDER} != "" ]]; then + provider_option="--provider=${OCTAVIA_PROVIDER}" + fi + lbID=$(openstack loadbalancer create --vip-subnet-id $subid --name test_shared_user_lb -f value -c id ${provider_option}) if [ $? -ne 0 ]; then printf "\n>>>>>>> FAIL: failed to create load balancer\n" exit 1 @@ -725,6 +736,20 @@ function test_shared_user_lb { printf "\n>>>>>>> Waiting for openstack load balancer $lbID ACTIVE after creating listener \n" wait_for_loadbalancer $lbID + printf "\n>>>>>>> Getting an external network \n" + extNetID=$(openstack network list --external -f value -c ID | head -1) + if [[ -z extNetID ]]; then + printf "\n>>>>>>> FAIL: failed to find an external network\n" + exit 1 + fi + fip=$(openstack floating ip create --tag occm-test -f value -c id ${extNetID}) + if [ $? -ne 0 ]; then + printf "\n>>>>>>> FAIL: failed to create FIP\n" + exit 1 + fi + vip=$(openstack loadbalancer show $lbID -f value -c vip_port_id) + openstack floating ip set --port ${vip} ${fip} + local service1="test-shared-user-lb" printf "\n>>>>>>> Create Service ${service1}\n" cat <> {{ ansible_user_dir }}/cloud.conf + lb-provider={{ octavia_provider }} + EOF + fi + + if [[ "{{ octavia_provider }}" == "ovn" ]]; then + cat <> {{ ansible_user_dir }}/cloud.conf + lb-method=SOURCE_IP_PORT + EOF + fi + kubectl create secret -n kube-system generic cloud-config --from-file={{ ansible_user_dir }}/cloud.conf +- name: Replace manifests + shell: + executable: /bin/bash + cmd: | + VERSION="v0.0.99" # Fake version, but in proper format. + + cd $GOPATH/src/k8s.io/cloud-provider-openstack + # replace image with built image + sed -i "s#registry.k8s.io/provider-os/openstack-cloud-controller-manager:[^'\"]\+#{{ remote_registry_host }}/openstack-cloud-controller-manager:${VERSION}#" manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml + sed -i "s#node-role.kubernetes.io/control-plane: \"\"#node-role.kubernetes.io/control-plane: \"true\"#" manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml + sed -i "s#--v=1#--v=5#" manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml + cat manifests/controller-manager/openstack-cloud-controller-manager-ds.yaml + - name: Deploy openstack-cloud-controller-manager shell: executable: /bin/bash cmd: | - set -x - - cat < /var/log/csi-pod/csi-cinder-controllerplugin.log kubectl logs daemonset/csi-cinder-nodeplugin -n kube-system -c cinder-csi-plugin > /var/log/csi-pod/csi-cinder-nodeplugin.log + kubectl -n kube-system exec $(kubectl -n kube-system get pod -l app=csi-cinder-nodeplugin -o name) -c cinder-csi-plugin -- dmesg -T > /var/log/csi-pod/dmesg.log + ignore_errors: true + +- name: Show dmesg logs + become: true + shell: + executable: /bin/bash + cmd: | + sudo dmesg -T > /var/log/csi-pod/dmesg_local.log ignore_errors: true - fail: msg="The execution has failed because of errors." diff --git a/tests/playbooks/roles/install-csi-manila/defaults/main.yaml b/tests/playbooks/roles/install-csi-manila/defaults/main.yaml index 796335d7..c455cdad 100644 --- a/tests/playbooks/roles/install-csi-manila/defaults/main.yaml +++ b/tests/playbooks/roles/install-csi-manila/defaults/main.yaml @@ -1,5 +1,4 @@ --- -github_pr: 123 devstack_workdir: "{{ ansible_user_dir }}/devstack" # Used for uploading image to local registry. diff --git a/tests/playbooks/roles/install-csi-manila/tasks/main.yaml b/tests/playbooks/roles/install-csi-manila/tasks/main.yaml index 6cbbe7c1..a95001e6 100644 --- a/tests/playbooks/roles/install-csi-manila/tasks/main.yaml +++ b/tests/playbooks/roles/install-csi-manila/tasks/main.yaml @@ -3,23 +3,22 @@ shell: executable: /bin/bash cmd: | - rm -rf {{ ansible_user_dir }}/src/k8s.io/cloud-provider-openstack - mkdir -p {{ ansible_user_dir }}/src/k8s.io; cd $_ - git clone https://github.com/kubernetes/cloud-provider-openstack - cd cloud-provider-openstack - git fetch origin +refs/pull/{{ github_pr }}/merge - git checkout FETCH_HEAD; git checkout -b PR{{ github_pr }} + if [ ! -d $GOPATH/src/k8s.io/cloud-provider-openstack ]; then + mkdir -p $GOPATH/src/k8s.io; cd $_ + git clone https://github.com/kubernetes/cloud-provider-openstack + fi - name: Build and upload manila-csi-plugin image shell: executable: /bin/bash cmd: | - cd {{ ansible_user_dir }}/src/k8s.io/cloud-provider-openstack + cd $GOPATH/src/k8s.io/cloud-provider-openstack + VERSION="v0.0.99" # Fake version, but in proper format. make push-multiarch-image-manila-csi-plugin \ ARCHS='amd64' \ - REGISTRY={{ image_registry_host }} \ - VERSION=v0.0.{{ github_pr }} + VERSION=${VERSION} \ + REGISTRY={{ image_registry_host }} - name: Prepare cloud config shell: @@ -118,12 +117,13 @@ shell: executable: /bin/bash cmd: | + VERSION="v0.0.99" # Fake version, but in proper format. cd {{ ansible_user_dir }}/src/k8s.io/cloud-provider-openstack/charts/manila-csi-plugin cat <> override-helm-values.yaml csimanila: image: repository: {{ remote_registry_host }}/manila-csi-plugin - tag: {{ github_pr }} + tag: ${VERSION} shareProtocols: - protocolSelector: NFS fsGroupPolicy: None @@ -224,7 +224,7 @@ --ginkgo.v \ --ginkgo.noColor \ --ginkgo.progress \ - --ginkgo.skip="\[Disruptive\]|\[sig-storage\]\s+\[manila-csi-e2e\]\s+CSI\s+Volumes\s+\[Driver:\s+nfs.manila.csi.openstack.org\]\s+\[Testpattern:\s+Dynamic\s+PV\s+\(default\s+fs\)\]\s+provisioning\s+should\s+provision\s+storage\s+with\s+any\s+volume\s+data\s+source\s+\[Serial\]|should\s+provision\s+storage\s+with\s+snapshot\s+data\s+source" \ + --ginkgo.skip="\[Disruptive\]|\[sig-storage\]\s+\[manila-csi-e2e\]\s+CSI\s+Volumes\s+\[Driver:\s+nfs.manila.csi.openstack.org\]\s+\[Testpattern:\s+Dynamic\s+PV\s+\(default\s+fs\)\]\s+provisioning\s+should\s+provision\s+storage\s+with\s+any\s+volume\s+data\s+source\s+\[Serial\]|should\s+provision\s+storage\s+with\s+snapshot\s+data\s+source|restoring\s+snapshot\s+to\s+larger\s+size" \ --ginkgo.focus="\[manila-csi-e2e\]" \ -report-dir /var/log/csi-pod \ -timeout=0 | tee "/var/log/csi-pod/manila-csi-e2e.log" @@ -238,8 +238,8 @@ set -x set -e - kubectl logs statefulset/manila-openstack-manila-csi-controllerplugin -n default -c nfs-nodeplugin > /var/log/csi-pod/csi-manila-controllerplugin.log - kubectl logs daemonset/manila-openstack-manila-csi-nodeplugin -n default -c nfs-nodeplugin > /var/log/csi-pod/csi-manila-nodeplugin.log + kubectl logs -l app=openstack-manila-csi,component=controllerplugin -n default -c nfs-nodeplugin --tail=-1 > /var/log/csi-pod/csi-manila-controllerplugin.log + kubectl logs -l app=openstack-manila-csi,component=nodeplugin -n default -c nfs-nodeplugin --tail=-1 > /var/log/csi-pod/csi-manila-nodeplugin.log ignore_errors: true - fail: msg="The execution has failed because of errors." diff --git a/tests/playbooks/roles/install-devstack/defaults/main.yaml b/tests/playbooks/roles/install-devstack/defaults/main.yaml index 00113444..d5d704f4 100644 --- a/tests/playbooks/roles/install-devstack/defaults/main.yaml +++ b/tests/playbooks/roles/install-devstack/defaults/main.yaml @@ -1,14 +1,15 @@ --- user: "stack" workdir: "/home/{{ user }}/devstack" -branch: "stable/zed" +branch: "stable/2023.1" enable_services: - nova - glance - cinder - neutron - octavia + - ovn-octavia - barbican -octavia_amphora_url: "https://tarballs.opendev.org/openstack/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-focal.qcow2" +octavia_amphora_url: "https://tarballs.opendev.org/openstack/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-jammy.qcow2" octavia_amphora_dir: /opt/octavia-amphora octavia_amphora_filename: amphora-x64-haproxy.qcow2 diff --git a/tests/playbooks/roles/install-devstack/tasks/main.yml b/tests/playbooks/roles/install-devstack/tasks/main.yml index 390e1359..fa58b4a1 100644 --- a/tests/playbooks/roles/install-devstack/tasks/main.yml +++ b/tests/playbooks/roles/install-devstack/tasks/main.yml @@ -17,6 +17,10 @@ line: "{{ user }} ALL=(ALL) NOPASSWD: ALL" validate: 'visudo -cf %s' +- name: Update repositories cache + ansible.builtin.apt: + update_cache: yes + - name: Check if devstack is already installed shell: executable: /bin/bash diff --git a/tests/playbooks/roles/install-devstack/templates/local.conf.j2 b/tests/playbooks/roles/install-devstack/templates/local.conf.j2 index 2a1afc61..b75b4598 100644 --- a/tests/playbooks/roles/install-devstack/templates/local.conf.j2 +++ b/tests/playbooks/roles/install-devstack/templates/local.conf.j2 @@ -51,21 +51,24 @@ enable_service c-sch # Neutron enable_plugin neutron ${GIT_BASE}/openstack/neutron.git {{ branch }} enable_service q-svc -enable_service q-agt -enable_service q-dhcp -enable_service q-l3 -enable_service q-meta +enable_service q-ovn-metadata-agent +enable_service q-trunk +enable_service q-qos +enable_service ovn-controller +enable_service ovn-northd +enable_service ovs-vswitchd +enable_service ovsdb-server + +ML2_L3_PLUGIN="ovn-router,trunk,qos" +OVN_L3_CREATE_PUBLIC_NETWORK="True" +PUBLIC_BRIDGE_MTU="1430" + IP_VERSION=4 IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/26 FIXED_RANGE=10.1.0.0/26 NETWORK_GATEWAY=10.1.0.1 FLOATING_RANGE=172.24.5.0/24 PUBLIC_NETWORK_GATEWAY=172.24.5.1 -Q_PLUGIN=ml2 -Q_ML2_TENANT_NETWORK_TYPE=vxlan -Q_DVR_MODE=legacy -Q_AGENT=openvswitch -Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch {% endif %} {% if "octavia" in enable_services %} @@ -75,6 +78,7 @@ enable_service octavia enable_service o-cw enable_service o-hm enable_service o-hk +enable_service o-da enable_service o-api LIBS_FROM_GIT+=,python-octaviaclient @@ -89,6 +93,10 @@ OCTAVIA_AMP_IMAGE_FILE={{ octavia_amphora_dir }}/{{ octavia_amphora_filename }} {% endif %} {% endif %} +{% if "ovn-octavia" in enable_services %} +enable_plugin ovn-octavia-provider https://opendev.org/openstack/ovn-octavia-provider {{ branch }} +{% endif %} + {% if "barbican" in enable_services %} # Barbican enable_plugin barbican ${GIT_BASE}/openstack/barbican.git {{ branch }} @@ -130,6 +138,9 @@ global_physnet_mtu = 1430 [[post-config|$OCTAVIA_CONF]] [api_settings] allow_tls_terminated_listeners = True +{% if "ovn-octavia" in enable_services %} +enabled_provider_drivers = amphora:'Octavia Amphora driver',ovn:'Octavia OVN driver' +{% endif %} [controller_worker] loadbalancer_topology = SINGLE amp_active_retries = 60 diff --git a/tests/playbooks/roles/install-docker/defaults/main.yml b/tests/playbooks/roles/install-docker/defaults/main.yml index b2fd8a8d..2482005c 100644 --- a/tests/playbooks/roles/install-docker/defaults/main.yml +++ b/tests/playbooks/roles/install-docker/defaults/main.yml @@ -1,2 +1,2 @@ --- -docker_version: 5:19.03.15~3-0~ubuntu-focal \ No newline at end of file +docker_version: 5:20.10.18~3-0~ubuntu-focal diff --git a/tests/playbooks/roles/install-docker/tasks/main.yml b/tests/playbooks/roles/install-docker/tasks/main.yml index adbc25fe..a2792142 100644 --- a/tests/playbooks/roles/install-docker/tasks/main.yml +++ b/tests/playbooks/roles/install-docker/tasks/main.yml @@ -14,7 +14,7 @@ - curl # curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - -- name: Add Docker’s official GPG key +- name: Add Docker's official GPG key apt_key: url: https://download.docker.com/linux/ubuntu/gpg state: present @@ -28,7 +28,7 @@ # apt-get update; apt install -y docker-ce= - name: Install docker-ce apt: - name: docker-ce={{ docker_version }} + name: docker-ce #={{ docker_version }} state: present update_cache: yes diff --git a/tests/playbooks/roles/install-golang/defaults/main.yml b/tests/playbooks/roles/install-golang/defaults/main.yml index 6d187369..6af34fed 100644 --- a/tests/playbooks/roles/install-golang/defaults/main.yml +++ b/tests/playbooks/roles/install-golang/defaults/main.yml @@ -1,5 +1,5 @@ --- -go_version: '1.20' +go_version: '1.21.5' arch: 'amd64' go_tarball: 'go{{ go_version }}.linux-{{ arch }}.tar.gz' go_download_location: 'https://go.dev/dl/{{ go_tarball }}' diff --git a/tests/playbooks/roles/install-k3s/defaults/main.yaml b/tests/playbooks/roles/install-k3s/defaults/main.yaml index 71f6a6f3..32ac816b 100644 --- a/tests/playbooks/roles/install-k3s/defaults/main.yaml +++ b/tests/playbooks/roles/install-k3s/defaults/main.yaml @@ -1,11 +1,11 @@ --- -k3s_release: v1.26.1+k3s1 +k3s_release: v1.29.0+k3s1 worker_node_count: 1 -cluster_token: "9a08jv.c0izixklcxtmnze7" +cluster_token: "K1039d1cf76d1f8b0e8b0d48e7c60d9c4a43c2e7a56de5d86f346f2288a2677f1d7::server:2acba4e60918c0e2d1f1d1a7c4e81e7b" devstack_workdir: "{{ ansible_user_dir }}/devstack" flavor_name: "ds2G" sg_name: "k3s_sg" keypair_name: "k3s_keypair" -image_url: "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-amd64.img" -image_name: "ubuntu-focal" +image_url: "https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img" +image_name: "ubuntu-jammy" master_port_name: "k3s_master" diff --git a/tests/playbooks/roles/install-k3s/tasks/main.yaml b/tests/playbooks/roles/install-k3s/tasks/main.yaml index efdc3600..cd9acb76 100644 --- a/tests/playbooks/roles/install-k3s/tasks/main.yaml +++ b/tests/playbooks/roles/install-k3s/tasks/main.yaml @@ -16,8 +16,17 @@ set +x; source openrc admin admin > /dev/null; set -x openstack image show {{ image_name }} > /dev/null 2>&1 + if [[ "$?" != "0" ]]; then - curl -sSL {{ image_url }} -o {{ image_name }}.img + # retry ubuntu image download on failure, + # e.g. "curl: (35) OpenSSL SSL_connect: Connection reset by peer in connection to cloud-images.ubuntu.com:443" + tries=0 + until [ "$tries" -ge 5 ]; do + curl -sSL {{ image_url }} -o {{ image_name }}.img && break + echo "Error downloading an image" + ((tries++)) + sleep 10 + done openstack image create {{ image_name }} --container-format bare --disk-format qcow2 --public --file {{ image_name }}.img fi @@ -157,7 +166,7 @@ mkdir -p {{ ansible_user_dir }}/.kube scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {{ ansible_user_dir }}/.ssh/id_rsa ubuntu@{{ k3s_fip }}:/etc/rancher/k3s/k3s.yaml {{ ansible_user_dir }}/.kube/config - curl -sLO# https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl + curl -sLO# https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl chmod +x ./kubectl; sudo mv ./kubectl /usr/local/bin/kubectl kubectl config set-cluster default --server=https://{{ k3s_fip }}:6443 --kubeconfig {{ ansible_user_dir }}/.kube/config diff --git a/tests/playbooks/test-csi-cinder-e2e.yaml b/tests/playbooks/test-csi-cinder-e2e.yaml index 08d5d2a3..52fec9d1 100644 --- a/tests/playbooks/test-csi-cinder-e2e.yaml +++ b/tests/playbooks/test-csi-cinder-e2e.yaml @@ -4,9 +4,8 @@ gather_facts: true vars: - e2e_test_version: v1.25.4 + e2e_test_version: v1.29.0 user: stack - github_pr: 123 global_env: {} devstack_workdir: /home/{{ user }}/devstack @@ -20,13 +19,11 @@ - cinder - role: install-k3s worker_node_count: 0 - k3s_release: v1.26.1+k3s1 - role: install-docker - role: install-docker-registry cert_hosts: ' ["{{ ansible_default_ipv4.address }}"]' - role: install-cpo-occm run_e2e: false - build_image: false environment: "{{ global_env }}" - role: install-csi-cinder environment: "{{ global_env }}" diff --git a/tests/playbooks/test-csi-manila-e2e.yaml b/tests/playbooks/test-csi-manila-e2e.yaml index 33d4ed64..38b4c904 100644 --- a/tests/playbooks/test-csi-manila-e2e.yaml +++ b/tests/playbooks/test-csi-manila-e2e.yaml @@ -5,7 +5,6 @@ vars: user: stack - github_pr: 123 global_env: {} devstack_workdir: /home/{{ user }}/devstack @@ -19,13 +18,11 @@ - manila - role: install-k3s worker_node_count: 0 - k3s_release: v1.26.1+k3s1 - role: install-docker - role: install-docker-registry cert_hosts: ' ["{{ ansible_default_ipv4.address }}"]' - role: install-cpo-occm run_e2e: false - build_image: false - role: install-helm - role: install-csi-manila environment: "{{ global_env }}" diff --git a/tests/playbooks/test-occm-e2e.yaml b/tests/playbooks/test-occm-e2e.yaml index fbb6d1ec..b5290477 100644 --- a/tests/playbooks/test-occm-e2e.yaml +++ b/tests/playbooks/test-occm-e2e.yaml @@ -5,9 +5,9 @@ vars: user: stack - github_pr: 123 global_env: {} devstack_workdir: /home/{{ user }}/devstack + octavia_provider: "" roles: - role: install-golang @@ -18,14 +18,14 @@ - cinder - neutron - octavia + - ovn-octavia - barbican - role: install-k3s worker_node_count: 0 - k3s_release: v1.26.1+k3s1 - role: install-docker - role: install-docker-registry cert_hosts: ' ["{{ ansible_default_ipv4.address }}"]' - role: install-cpo-occm - run_e2e: true - build_image: true + run_e2e: "{{ run_e2e }}" + octavia_provider: "{{ octavia_provider }}" environment: "{{ global_env }}" diff --git a/tests/sanity/cinder/fakecloud.go b/tests/sanity/cinder/fakecloud.go index e167ddca..d1bbed94 100644 --- a/tests/sanity/cinder/fakecloud.go +++ b/tests/sanity/cinder/fakecloud.go @@ -57,7 +57,7 @@ func (cloud *cloud) DeleteVolume(volumeID string) error { } func (cloud *cloud) AttachVolume(instanceID, volumeID string) (string, error) { - // update the volume with attachement + // update the volume with attachment vol, ok := cloud.volumes[volumeID] diff --git a/tests/sanity/manila/fakecsiclient.go b/tests/sanity/manila/fakecsiclient.go index f18bba8b..a85e72bc 100644 --- a/tests/sanity/manila/fakecsiclient.go +++ b/tests/sanity/manila/fakecsiclient.go @@ -99,11 +99,11 @@ func (c fakeNodeSvcClient) UnpublishVolume(ctx context.Context, req *csi.NodeUnp type fakeCSIClientBuilder struct{} func (b fakeCSIClientBuilder) NewConnection(string) (*grpc.ClientConn, error) { - return grpc.Dial("", grpc.WithTransportCredentials(insecure.NewCredentials())) + return grpc.Dial("localhost", grpc.WithTransportCredentials(insecure.NewCredentials())) } func (b fakeCSIClientBuilder) NewConnectionWithContext(context.Context, string) (*grpc.ClientConn, error) { - return grpc.Dial("", grpc.WithTransportCredentials(insecure.NewCredentials())) + return grpc.Dial("localhost", grpc.WithTransportCredentials(insecure.NewCredentials())) } func (b fakeCSIClientBuilder) NewNodeServiceClient(conn *grpc.ClientConn) csiclient.Node { diff --git a/tests/sanity/manila/sanity_test.go b/tests/sanity/manila/sanity_test.go index 66249f9b..8fb7b17b 100644 --- a/tests/sanity/manila/sanity_test.go +++ b/tests/sanity/manila/sanity_test.go @@ -23,7 +23,6 @@ import ( "github.com/kubernetes-csi/csi-test/v5/pkg/sanity" "k8s.io/cloud-provider-openstack/pkg/csi/manila" - "k8s.io/cloud-provider-openstack/pkg/csi/manila/options" ) func TestDriver(t *testing.T) { @@ -44,7 +43,6 @@ func TestDriver(t *testing.T) { FwdCSIEndpoint: fwdEndpoint, ManilaClientBuilder: &fakeManilaClientBuilder{}, CSIClientBuilder: &fakeCSIClientBuilder{}, - CompatOpts: &options.CompatibilityOptions{}, }) if err != nil { diff --git a/tools/csi-deps-check.sh b/tools/csi-deps-check.sh new file mode 100755 index 00000000..097c409e --- /dev/null +++ b/tools/csi-deps-check.sh @@ -0,0 +1,41 @@ +#!/bin/sh + +# Copyright 2022 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit + +# We will check all necessary utils in the image. +# They all have to launch without errors. + +# This utils are using by +# go mod k8s.io/mount-utils +/bin/mount -V +/bin/umount -V +/sbin/blkid -V +/sbin/blockdev -V +/sbin/dumpe2fs -V +/sbin/fsck --version +/sbin/mke2fs -V +/sbin/mkfs.ext4 -V +/sbin/mkfs.xfs -V +/usr/sbin/xfs_io -V +/sbin/xfs_repair -V +/usr/sbin/xfs_growfs -V +/bin/btrfs --version + +# This utils are using by +# go mod k8s.io/cloud-provider-openstack/pkg/util/mount +/bin/udevadm --version +/bin/findmnt -V diff --git a/tools/csi-deps.sh b/tools/csi-deps.sh new file mode 100755 index 00000000..58f81e38 --- /dev/null +++ b/tools/csi-deps.sh @@ -0,0 +1,116 @@ +#!/bin/sh -x + +# Copyright 2022 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset + +# +# We will copy all dependencies for CSI Node driver to /dest directory +# all utils are using by cinder-csi-plugin +# to format/mount/unmount/resize the volumes. +# +# It is very important to have slim image, +# because it runs as root (privileged mode) on the nodes +# + +DEST=/dest + +copy_deps() { + PROG="$1" + + mkdir -p "${DEST}$(dirname $PROG)" + + if [ -d "${PROG}" ]; then + rsync -av "${PROG}/" "${DEST}${PROG}/" + else + cp -Lv "$PROG" "${DEST}${PROG}" + fi + + if [ -x ${PROG} -o $(/usr/bin/ldd "$PROG" >/dev/null) ]; then + DEPS="$(/usr/bin/ldd "$PROG" | /bin/grep '=>' | /usr/bin/awk '{ print $3 }')" + + for d in $DEPS; do + mkdir -p "${DEST}$(dirname $d)" + cp -Lv "$d" "${DEST}${d}" + done + fi +} + +# Common lib /lib64/ld-linux-*.so.2 +# needs for all utils +ARCH=$(uname -m) +if [ $ARCH = "aarch64" ] || [ $ARCH = "armv7l" ]; then + mkdir -p ${DEST}/lib && cp -Lv /lib/ld-linux-*.so.* ${DEST}/lib/ +elif [ $ARCH = "s390x" ]; then + mkdir -p ${DEST}/lib && cp -Lv /lib/ld64.so.* ${DEST}/lib/ +elif [ $ARCH = "ppc64le" ]; then + mkdir -p ${DEST}/lib64 && cp -Lv /lib64/ld64.so.* ${DEST}/lib64/ +else + mkdir -p ${DEST}/lib64 && cp -Lv /lib64/ld-linux-*.so.* ${DEST}/lib64/ +fi + +# To collect dmesg logs +copy_deps /usr/bin/dmesg || true +copy_deps /bin/dmesg || true + +# This utils are using by +# go mod k8s.io/mount-utils +copy_deps /etc/mke2fs.conf +copy_deps /bin/mount +copy_deps /bin/umount +copy_deps /sbin/blkid +copy_deps /sbin/blockdev +copy_deps /sbin/dumpe2fs +copy_deps /sbin/fsck +copy_deps /sbin/fsck.xfs +cp /sbin/fsck* ${DEST}/sbin/ +copy_deps /sbin/e2fsck +# from pkg e2fsprogs - e2image, e2label, e2scrub and etc. +cp /sbin/e2* ${DEST}/sbin/ +copy_deps /sbin/mke2fs +copy_deps /sbin/resize2fs +cp /sbin/mkfs* ${DEST}/sbin/ +copy_deps /sbin/mkfs.xfs +copy_deps /sbin/xfs_repair +copy_deps /usr/sbin/xfs_growfs +copy_deps /usr/sbin/xfs_io +cp /usr/sbin/xfs* ${DEST}/usr/sbin/ +copy_deps /bin/btrfs +cp /bin/btrfs* ${DEST}/bin/ + +# This utils are using by +# go mod k8s.io/cloud-provider-openstack/pkg/util/mount +copy_deps /bin/udevadm +copy_deps /lib/udev/rules.d +copy_deps /bin/findmnt + +# Edgeless cryptsetup deps +cp /sbin/dmsetup ${DEST}/sbin/ +mkdir -p ${DEST}/lib/x86_64-linux-gnu/ +mkdir -p ${DEST}/usr/lib/x86_64-linux-gnu/ +cp -Lv /lib/x86_64-linux-gnu/libgcc_s.so.1 ${DEST}/lib/x86_64-linux-gnu/ +cp -Lv /lib/x86_64-linux-gnu/libcryptsetup.so* ${DEST}/lib/x86_64-linux-gnu/ +cp -Lv /lib/x86_64-linux-gnu/libdevmapper.so* ${DEST}/lib/x86_64-linux-gnu/ +cp -Lv /lib/x86_64-linux-gnu/libselinux.so* ${DEST}/lib/x86_64-linux-gnu/ +cp -Lv /lib/x86_64-linux-gnu/libm.so* ${DEST}/lib/x86_64-linux-gnu/ +cp -Lv /lib/x86_64-linux-gnu/libresolv.so* ${DEST}/lib/x86_64-linux-gnu/ +cp -Lv /usr/lib/x86_64-linux-gnu/libuuid.so* ${DEST}/usr/lib/x86_64-linux-gnu/ +cp -Lv /usr/lib/x86_64-linux-gnu/libcrypto.so* ${DEST}/usr/lib/x86_64-linux-gnu/ +cp -Lv /usr/lib/x86_64-linux-gnu/libargon2.so* ${DEST}/usr/lib/x86_64-linux-gnu/ +cp -Lv /usr/lib/x86_64-linux-gnu/libjson-c.so* ${DEST}/usr/lib/x86_64-linux-gnu/ +cp -Lv /usr/lib/x86_64-linux-gnu/libblkid.so* ${DEST}/usr/lib/x86_64-linux-gnu/ +cp -Lv /usr/lib/x86_64-linux-gnu/libudev.so* ${DEST}/usr/lib/x86_64-linux-gnu/ +cp -Lv /usr/lib/x86_64-linux-gnu/libpcre2-8.so* ${DEST}/usr/lib/x86_64-linux-gnu/ diff --git a/tools/test-setup.sh b/tools/test-setup.sh index 6330972d..96610a98 100755 --- a/tools/test-setup.sh +++ b/tools/test-setup.sh @@ -14,7 +14,7 @@ case $(uname -s) in if LSB_RELEASE=$(which lsb_release); then OS=$($LSB_RELEASE -s -c) else - # No lsb-release, trya hack or two + # No lsb-release, try a hack or two if which dpkg 1>/dev/null; then OS=debian elif which yum 1>/dev/null || which dnf 1>/dev/null; then