diff --git a/.dockerignore b/.dockerignore index c055f7b3fb..2b1af21239 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,9 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ + *.md blogs -bin .vscode .github .devcontainer diff --git a/.gitignore b/.gitignore index 4b7ccf15d1..a6f958bdc8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,35 @@ -bin/ -.vscode/ -.idea/ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin/* +Dockerfile.cross + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Go workspace file +go.work + +# Kubernetes Generated files - skip generated files, except for vendored files +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +.vscode +*.swp +*.swo +*~ + +# OADP tests/e2e/e2e.test -cover.out -.DS_Store tests/e2e/templates/*.yaml +.DS_Store +test-upgrade/ # `operator-sdk run bundle` caches files in this directory cache/ -*.test - -test-upgrade/ diff --git a/Dockerfile b/Dockerfile index 986a6ae2f8..d2d936e67c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,13 +13,17 @@ COPY go.sum go.sum RUN go mod download # Copy the go source -COPY main.go main.go +COPY cmd/main.go cmd/main.go COPY api/ api/ +COPY internal/controller/ internal/controller/ COPY pkg/ pkg/ -COPY controllers/ controllers/ # Build -RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -mod=mod -a -o /go/src/manager main.go +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -mod=mod -a -o /go/src/manager cmd/main.go # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details @@ -27,4 +31,5 @@ FROM registry.access.redhat.com/ubi9-minimal WORKDIR / COPY --from=builder /go/src/manager . USER 65532:65532 + ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile index f93536036a..f0434573e2 100644 --- a/Makefile +++ b/Makefile @@ -1,96 +1,3 @@ -OADP_TEST_NAMESPACE ?= openshift-adp - -# CONFIGS FOR CLOUD -# bsl / blob storage cred dir -OADP_CRED_DIR ?= /var/run/oadp-credentials -# vsl / volume/cluster cred dir -CLUSTER_PROFILE_DIR ?= /Users/drajds/.aws - -# bsl cred file -OADP_CRED_FILE ?= ${OADP_CRED_DIR}/new-aws-credentials -# vsl cred file -CI_CRED_FILE ?= ${CLUSTER_PROFILE_DIR}/.awscred - -# aws configs - default -BSL_REGION ?= us-east-1 -VSL_REGION ?= ${LEASED_RESOURCE} -BSL_AWS_PROFILE ?= default -# BSL_AWS_PROFILE ?= migration-engineering - -# bucket file -OADP_BUCKET_FILE ?= ${OADP_CRED_DIR}/new-velero-bucket-name -# azure cluster resource file - only in CI -AZURE_RESOURCE_FILE ?= /var/run/secrets/ci.openshift.io/multi-stage/metadata.json -AZURE_CI_JSON_CRED_FILE ?= ${CLUSTER_PROFILE_DIR}/osServicePrincipal.json -AZURE_OADP_JSON_CRED_FILE ?= ${OADP_CRED_DIR}/azure-credentials - -# CONTAINER_TOOL defines the container tool to be used for building images. -# By default, this Makefile uses docker, as the target commands have been tested primarily with it. -# However, if docker is not available, the Makefile will attempt to use podman if it's installed. -# You may also set CONTAINER_TOOL directly as an environment variable to specify a different tool. -# If neither docker nor podman is found, or if the specified tool is unavailable, the Makefile will exit with an error. - -# Set CONTAINER_TOOL to Docker or Podman if not already defined by the user -CONTAINER_TOOL ?= $(shell \ - if command -v docker >/dev/null 2>&1; then echo docker; \ - elif command -v podman >/dev/null 2>&1; then echo podman; \ - else echo ""; \ - fi \ -) -ifeq ($(shell command -v $(CONTAINER_TOOL) >/dev/null 2>&1 && echo found),) - $(error The selected container tool '$(CONTAINER_TOOL)' is not available on this system. Please install it or choose a different tool.) -endif -$(info Using Container Tool: $(CONTAINER_TOOL)) - -# Misc -OPENSHIFT_CI ?= true -VELERO_INSTANCE_NAME ?= velero-test -ARTIFACT_DIR ?= /tmp -OC_CLI = $(shell which oc) -TEST_VIRT ?= false -KVM_EMULATION ?= true -TEST_UPGRADE ?= false -HCO_UPSTREAM ?= false - -ifdef CLI_DIR - OC_CLI = ${CLI_DIR}/oc -endif -# makes CLUSTER_TYPE quieter when unauthenticated -CLUSTER_TYPE_SHELL := $(shell $(OC_CLI) get infrastructures cluster -o jsonpath='{.status.platform}' 2> /dev/null | tr A-Z a-z) -CLUSTER_TYPE ?= $(CLUSTER_TYPE_SHELL) -CLUSTER_OS = $(shell $(OC_CLI) get node -o jsonpath='{.items[0].status.nodeInfo.operatingSystem}' 2> /dev/null) -CLUSTER_ARCH = $(shell $(OC_CLI) get node -o jsonpath='{.items[0].status.nodeInfo.architecture}' 2> /dev/null) - -ifeq ($(CLUSTER_TYPE), gcp) - CI_CRED_FILE = ${CLUSTER_PROFILE_DIR}/gce.json - OADP_CRED_FILE = ${OADP_CRED_DIR}/gcp-credentials - OADP_BUCKET_FILE = ${OADP_CRED_DIR}/gcp-velero-bucket-name -endif - -ifeq ($(CLUSTER_TYPE), azure4) - CLUSTER_TYPE = azure -endif - -ifeq ($(CLUSTER_TYPE), azure) - CI_CRED_FILE = /tmp/ci-azure-credentials - OADP_CRED_FILE = /tmp/oadp-azure-credentials - OADP_BUCKET_FILE = ${OADP_CRED_DIR}/azure-velero-bucket-name -endif - -VELERO_PLUGIN ?= ${CLUSTER_TYPE} - -ifeq ($(CLUSTER_TYPE), ibmcloud) - VELERO_PLUGIN = aws -endif - -ifeq ($(CLUSTER_TYPE), openstack) - KVM_EMULATION = false -endif - -# Kubernetes version from OpenShift 4.16.x https://openshift-release.apps.ci.l2s4.p1.openshiftapps.com/#4-stable -# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.29 - # VERSION defines the project version for the bundle. # Update this value when you upgrade the version of your project. # To re-generate a bundle for another specific version without changing the standard setup, you can: @@ -131,9 +38,25 @@ IMAGE_TAG_BASE ?= openshift.io/oadp-operator # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --extra-service-accounts "velero,non-admin-controller" --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.34.2 + # Image URL to use all building/pushing image targets IMG ?= quay.io/konveyor/oadp-operator:latest -CRD_OPTIONS ?= "crd" +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.29 # Kubernetes version from OpenShift 4.16.x https://openshift-release.apps.ci.l2s4.p1.openshiftapps.com/#4-stable # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -142,19 +65,37 @@ else GOBIN=$(shell go env GOBIN) endif +# CONTAINER_TOOL defines the container tool to be used for building images. +# By default, this Makefile uses docker, as the target commands have been tested primarily with it. +# However, if docker is not available, the Makefile will attempt to use podman if it's installed. +# You may also set CONTAINER_TOOL directly as an environment variable to specify a different tool. +# If neither docker nor podman is found, or if the specified tool is unavailable, the Makefile will exit with an error. + +# Set CONTAINER_TOOL to Docker or Podman if not already defined by the user +CONTAINER_TOOL ?= $(shell \ + if command -v docker >/dev/null 2>&1; then echo docker; \ + elif command -v podman >/dev/null 2>&1; then echo podman; \ + else echo ""; \ + fi \ +) +ifeq ($(shell command -v $(CONTAINER_TOOL) >/dev/null 2>&1 && echo found),) + $(error The selected container tool '$(CONTAINER_TOOL)' is not available on this system. Please install it or choose a different tool.) +endif +$(info Using Container Tool: $(CONTAINER_TOOL)) + # Setting SHELL to bash allows bash commands to be executed by recipes. -# This is a requirement for 'setup-envtest.sh' in the test target. # Options are set to exit when a recipe line exits non-zero or a piped command fails. SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec +.PHONY: all all: build ##@ General # The help target prints out all targets with their descriptions organized # beneath their categories. The categories are represented by '##@' and the -# target descriptions by '##'. The awk commands is responsible for reading the +# target descriptions by '##'. The awk command is responsible for reading the # entire set of makefiles included in this invocation, looking for lines of the # file as xyz: ## something, and then pretty-format the target and help. Then, # if there's a line with ##@ something, that gets pretty-printed as a category. @@ -163,39 +104,28 @@ all: build # More info on the awk command: # http://linuxcommand.org/lc3_adv_awk.php +.PHONY: help help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ##@ Development +.PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. - # Commenting out default which overwrites scoped config/rbac/role.yaml - # GOFLAGS="-mod=mod" $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases - GOFLAGS="-mod=mod" $(CONTROLLER_GEN) $(CRD_OPTIONS) webhook paths="./..." output:crd:artifacts:config=config/crd/bases - # run make nullables to generate nullable fields after all manifest changesin dependent targets. - # It's not included here because `test` and `bundle` target have different yaml styes. - # To keep dpa CRD the same, nullables have been added to test and bundle target separately. + GOFLAGS="-mod=mod" $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases +.PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. GOFLAGS="-mod=mod" $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." +.PHONY: fmt fmt: ## Run go fmt against code. go fmt -mod=mod ./... +.PHONY: vet vet: ## Run go vet against code. go vet -mod=mod ./... -ENVTEST := $(shell pwd)/bin/setup-envtest -ENVTESTPATH = $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(shell pwd)/bin -p path) -ifeq ($(shell $(ENVTEST) list | grep $(ENVTEST_K8S_VERSION)),) - ENVTESTPATH = $(shell $(ENVTEST) --arch=amd64 use $(ENVTEST_K8S_VERSION) -p path) -endif -$(ENVTEST): ## Download envtest-setup locally if necessary. - $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20240320141353-395cfc7486e6) - -.PHONY: envtest -envtest: $(ENVTEST) - # If test results in prow are different, it is because the environment used. # You can simulate their env by running # docker run --platform linux/amd64 -w $PWD -v $PWD:$PWD -it registry.ci.openshift.org/ocp/builder:rhel-8-golang-1.20-openshift-4.14 sh -c "make test" @@ -204,32 +134,36 @@ envtest: $(ENVTEST) # If bin/ contains binaries of different arch, you may remove them so the container can install their arch. .PHONY: test test: vet envtest ## Run unit tests; run Go linters checks; check if api and bundle folders are up to date; and check if go dependencies are valid - KUBEBUILDER_ASSETS="$(ENVTESTPATH)" go test -mod=mod $(shell go list -mod=mod ./... | grep -v /tests/e2e) -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -mod=mod $(shell go list -mod=mod ./... | grep -v /tests/e2e) -coverprofile cover.out @make lint @make api-isupdated @make bundle-isupdated @make check-go-dependencies -.PHONY: api-isupdated -api-isupdated: TEMP:= $(shell mktemp -d) -api-isupdated: - @cp -r ./ $(TEMP) && cd $(TEMP) && make generate && cd - && diff -ruN api/ $(TEMP)/api/ && echo "api is up to date" || (echo "api is out of date, run 'make generate' to update" && exit 1) - @chmod -R 777 $(TEMP) && rm -rf $(TEMP) +GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint +GOLANGCI_LINT_VERSION ?= v1.55.2 +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)) -.PHONY: bundle-isupdated -bundle-isupdated: TEMP:= $(shell mktemp -d) -bundle-isupdated: VERSION:= $(DEFAULT_VERSION) #prevent VERSION overrides from https://github.com/openshift/release/blob/f1a388ab05d493b6d95b8908e28687b4c0679498/clusters/build-clusters/01_cluster/ci/_origin-release-build/golang-1.19/Dockerfile#LL9C1-L9C1 -bundle-isupdated: - @cp -r ./ $(TEMP) && cd $(TEMP) && make bundle && cd - && diff -ruN bundle/ $(TEMP)/bundle/ && echo "bundle is up to date" || (echo "bundle is out of date, run 'make bundle' to update" && exit 1) - @chmod -R 777 $(TEMP) && rm -rf $(TEMP) +.PHONY: lint +lint: golangci-lint ## Run Go linters checks against all project's Go files. + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Fix Go linters issues. + $(GOLANGCI_LINT) run --fix ##@ Build -build: generate fmt vet ## Build manager binary. - go build -o bin/manager main.go +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go +.PHONY: run run: manifests generate fmt vet ## Run a controller from your host. - go run ./main.go + go run ./cmd/main.go # If using podman machine, and host platform is not linux/amd64 run # - podman machine ssh sudo rpm-ostree install qemu-user-static && sudo systemctl reboot @@ -239,50 +173,186 @@ DOCKER_BUILD_ARGS ?= --platform=linux/amd64 ifneq ($(CLUSTER_TYPE),) DOCKER_BUILD_ARGS = --platform=$(CLUSTER_OS)/$(CLUSTER_ARCH) endif +.PHONY: docker-build docker-build: ## Build docker image with the manager. $(CONTAINER_TOOL) build --load -t $(IMG) . $(DOCKER_BUILD_ARGS) +.PHONY: docker-push docker-push: ## Push docker image with the manager. $(CONTAINER_TOOL) push ${IMG} +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name project-v3-builder + $(CONTAINER_TOOL) buildx use project-v3-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm project-v3-builder + rm Dockerfile.cross + ##@ Deployment +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl apply -f - - -uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | kubectl delete -f - - -VELERO_ROLE_TMP?=/tmp/oadp-make-deploy -velero-role-tmp: kustomize - mkdir -p $(VELERO_ROLE_TMP) - sed -e 's/namespace: system/namespace: $(OADP_TEST_NAMESPACE)/g' config/velero/velero-service_account.yaml > $(VELERO_ROLE_TMP)/velero-service_account.yaml - sed -e 's/namespace: system/namespace: $(OADP_TEST_NAMESPACE)/g' config/velero/velero-role.yaml > $(VELERO_ROLE_TMP)/velero-role.yaml - sed -e 's/namespace: system/namespace: $(OADP_TEST_NAMESPACE)/g' config/velero/velero-role_binding.yaml > $(VELERO_ROLE_TMP)/velero-role_binding.yaml -velero-role-tmp-cleanup: - rm -rf $(VELERO_ROLE_TMP) -apply-velerosa-role: velero-role-tmp - kubectl apply -f $(VELERO_ROLE_TMP)/velero-service_account.yaml - kubectl apply -f $(VELERO_ROLE_TMP)/velero-role.yaml - kubectl apply -f $(VELERO_ROLE_TMP)/velero-role_binding.yaml - VELERO_ROLE_TMP=$(VELERO_ROLE_TMP) make velero-role-tmp-cleanup -unapply-velerosa-role: velero-role-tmp - kubectl delete -f $(VELERO_ROLE_TMP)/velero-service_account.yaml - kubectl delete -f $(VELERO_ROLE_TMP)/velero-role.yaml - kubectl delete -f $(VELERO_ROLE_TMP)/velero-role_binding.yaml - VELERO_ROLE_TMP=$(VELERO_ROLE_TMP) make velero-role-tmp-cleanup - -build-deploy: THIS_IMAGE=ttl.sh/oadp-operator-$(shell git rev-parse --short HEAD):1h # Set target specific variable -build-deploy: ## Build current branch image and deploy controller to the k8s cluster specified in ~/.kube/config. - IMG=$(THIS_IMAGE) make docker-build docker-push deploy - -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen -controller-gen: ## Download controller-gen locally if necessary. - $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.14.0) - -KUSTOMIZE = $(shell pwd)/bin/kustomize -kustomize: ## Download kustomize locally if necessary. - $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v4@v4.5.5) + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Build Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.2.1 +CONTROLLER_TOOLS_VERSION ?= v0.14.0 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5@$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20240320141353-395cfc7486e6) + +.PHONY: operator-sdk +OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$${OS}_$${ARCH} ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + +.PHONY: bundle +bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + GOFLAGS="-mod=mod" $(OPERATOR_SDK) generate kustomize manifests -q + cd config/manager && GOFLAGS="-mod=mod" $(KUSTOMIZE) edit set image controller=$(IMG) + GOFLAGS="-mod=mod" $(KUSTOMIZE) build config/manifests | GOFLAGS="-mod=mod" $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + @make nullables + # Copy updated bundle.Dockerfile to CI's Dockerfile.bundle + # TODO: update CI to use generated one + cp bundle.Dockerfile build/Dockerfile.bundle + GOFLAGS="-mod=mod" $(OPERATOR_SDK) bundle validate ./bundle + sed -e 's/ createdAt: .*/$(shell grep -I '^ createdAt: ' bundle/manifests/oadp-operator.clusterserviceversion.yaml)/' bundle/manifests/oadp-operator.clusterserviceversion.yaml > bundle/manifests/oadp-operator.clusterserviceversion.yaml.tmp + mv bundle/manifests/oadp-operator.clusterserviceversion.yaml.tmp bundle/manifests/oadp-operator.clusterserviceversion.yaml + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + $(CONTAINER_TOOL) build --load -f bundle.Dockerfile -t $(BUNDLE_IMG) . $(DOCKER_BUILD_ARGS) + +.PHONY: bundle-push +bundle-push: ## Push the bundle image. + $(MAKE) docker-push IMG=$(BUNDLE_IMG) + +.PHONY: opm +OPM = $(LOCALBIN)/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) + +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add --container-tool $(CONTAINER_TOOL) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +# Push the catalog image. +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) + +##@ oadp specifics + +.PHONY: api-isupdated +api-isupdated: TEMP:= $(shell mktemp -d) +api-isupdated: + @cp -r ./ $(TEMP) && cd $(TEMP) && make generate && cd - && diff -ruN api/ $(TEMP)/api/ && echo "api is up to date" || (echo "api is out of date, run 'make generate' to update" && exit 1) + @chmod -R 777 $(TEMP) && rm -rf $(TEMP) + +.PHONY: bundle-isupdated +bundle-isupdated: TEMP:= $(shell mktemp -d) +bundle-isupdated: VERSION:= $(DEFAULT_VERSION) #prevent VERSION overrides from https://github.com/openshift/release/blob/f1a388ab05d493b6d95b8908e28687b4c0679498/clusters/build-clusters/01_cluster/ci/_origin-release-build/golang-1.19/Dockerfile#LL9C1-L9C1 +bundle-isupdated: + @cp -r ./ $(TEMP) && cd $(TEMP) && make bundle && cd - && diff -ruN bundle/ $(TEMP)/bundle/ && echo "bundle is up to date" || (echo "bundle is out of date, run 'make bundle' to update" && exit 1) + @chmod -R 777 $(TEMP) && rm -rf $(TEMP) + +.PHONY: check-go-dependencies +check-go-dependencies: TEMP:= $(shell mktemp -d) +check-go-dependencies: + @cp -r ./ $(TEMP) && cd $(TEMP) && go mod tidy && cd - && diff -ruN ./ $(TEMP)/ && echo "go dependencies checked" || (echo "go dependencies are out of date, run 'go mod tidy' to update" && exit 1) + @chmod -R 777 $(TEMP) && rm -rf $(TEMP) + go mod verify # Codecov OS String for use in download url ifeq ($(OS),Windows_NT) @@ -316,36 +386,14 @@ rm -rf $$TMP_DIR ;\ } endef -YQ = $(shell pwd)/bin/yq +YQ = $(LOCALBIN)/yq yq: ## Download yq locally if necessary. # 4.28.1 is latest with go 1.17 go.mod $(call go-install-tool,$(YQ),github.com/mikefarah/yq/v4@v4.28.1) -OPERATOR_SDK = $(shell pwd)/bin/operator-sdk -operator-sdk: - # Download operator-sdk locally if does not exist - if [ ! -f $(OPERATOR_SDK) ]; then \ - mkdir -p bin ;\ - curl -Lo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/v1.34.2/operator-sdk_$(shell go env GOOS)_$(shell go env GOARCH) ; \ - chmod +x $(OPERATOR_SDK); \ - fi - -.PHONY: bundle -bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. - GOFLAGS="-mod=mod" $(OPERATOR_SDK) generate kustomize manifests -q - cd config/manager && GOFLAGS="-mod=mod" $(KUSTOMIZE) edit set image controller=$(IMG) - GOFLAGS="-mod=mod" $(KUSTOMIZE) build config/manifests | GOFLAGS="-mod=mod" $(OPERATOR_SDK) generate bundle -q --extra-service-accounts "velero,non-admin-controller" --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) - @make nullables - # Copy updated bundle.Dockerfile to CI's Dockerfile.bundle - # TODO: update CI to use generated one - cp bundle.Dockerfile build/Dockerfile.bundle - GOFLAGS="-mod=mod" $(OPERATOR_SDK) bundle validate ./bundle - sed -e 's/ createdAt: .*/$(shell grep -I '^ createdAt: ' bundle/manifests/oadp-operator.clusterserviceversion.yaml)/' bundle/manifests/oadp-operator.clusterserviceversion.yaml > bundle/manifests/oadp-operator.clusterserviceversion.yaml.tmp - mv bundle/manifests/oadp-operator.clusterserviceversion.yaml.tmp bundle/manifests/oadp-operator.clusterserviceversion.yaml - .PHONY: nullables -nullables: - @make nullable-crds-bundle nullable-crds-config # patch nullables in CRDs +nullables: ## patch nullables in CRDs + @make nullable-crds-bundle nullable-crds-config .PHONY: nullable-crds-bundle nullable-crds-bundle: DPA_SPEC_CONFIG_PROP = .spec.versions.0.schema.openAPIV3Schema.properties.spec.properties.configuration.properties @@ -382,20 +430,32 @@ nullable-crds-config: DPA_CRD_YAML ?= config/crd/bases/oadp.openshift.io_datapro nullable-crds-config: @ DPA_CRD_YAML=$(DPA_CRD_YAML) make nullable-crds-bundle -.PHONY: bundle-build -bundle-build: ## Build the bundle image. - $(CONTAINER_TOOL) build --load -f bundle.Dockerfile -t $(BUNDLE_IMG) . $(DOCKER_BUILD_ARGS) +OC_CLI ?= $(shell which oc) -.PHONY: bundle-push -bundle-push: ## Push the bundle image. - $(MAKE) docker-push IMG=$(BUNDLE_IMG) +# makes CLUSTER_TYPE quieter when unauthenticated +CLUSTER_TYPE_SHELL := $(shell $(OC_CLI) get infrastructures cluster -o jsonpath='{.status.platform}' 2> /dev/null | tr A-Z a-z) +CLUSTER_TYPE ?= $(CLUSTER_TYPE_SHELL) +CLUSTER_OS = $(shell $(OC_CLI) get node -o jsonpath='{.items[0].status.nodeInfo.operatingSystem}' 2> /dev/null) +CLUSTER_ARCH = $(shell $(OC_CLI) get node -o jsonpath='{.items[0].status.nodeInfo.architecture}' 2> /dev/null) + +.PHONY: login-required +login-required: +ifeq ($(CLUSTER_TYPE),) + $(error You must be logged in to a cluster to run this command) +else + $(info $$CLUSTER_TYPE is [${CLUSTER_TYPE}]) +endif GIT_REV:=$(shell git rev-parse --short HEAD) + +# Namespace to deploy OADP operator, used by Makefile commands +OADP_TEST_NAMESPACE ?= openshift-adp + .PHONY: deploy-olm deploy-olm: THIS_OPERATOR_IMAGE?=ttl.sh/oadp-operator-$(GIT_REV):1h # Set target specific variable deploy-olm: THIS_BUNDLE_IMAGE?=ttl.sh/oadp-operator-bundle-$(GIT_REV):1h # Set target specific variable deploy-olm: DEPLOY_TMP:=$(shell mktemp -d)/ # Set target specific variable -deploy-olm: undeploy-olm ## Build current branch operator image, bundle image, push and install via OLM +deploy-olm: undeploy-olm ## Build current branch operator image, bundle image, push and install via OLM. For more information, check docs/developer/install_from_source.md @echo "DEPLOY_TMP: $(DEPLOY_TMP)" # build and push operator and bundle image # use $(OPERATOR_SDK) to install bundle to authenticated cluster @@ -411,52 +471,6 @@ undeploy-olm: login-required operator-sdk ## Uninstall current branch operator v $(OC_CLI) create namespace $(OADP_TEST_NAMESPACE) || true $(OPERATOR_SDK) cleanup oadp-operator --namespace $(OADP_TEST_NAMESPACE) -.PHONY: opm -OPM = ./bin/opm -opm: ## Download opm locally if necessary. -ifeq (,$(wildcard $(OPM))) -ifeq (,$(shell which opm 2>/dev/null)) - @{ \ - set -e ;\ - mkdir -p $(dir $(OPM)) ;\ - OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ - curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\ - chmod +x $(OPM) ;\ - } -else -OPM = $(shell which opm) -endif -endif - -# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). -# These images MUST exist in a registry and be pull-able. -BUNDLE_IMGS ?= $(BUNDLE_IMG) - -# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). -CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) - -# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. -ifneq ($(origin CATALOG_BASE_IMG), undefined) -FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) -endif - -# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. -# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: -# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator -.PHONY: catalog-build -catalog-build: opm ## Build a catalog image. - $(OPM) index add --container-tool $(CONTAINER_TOOL) --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) - -# For testing oeprator upgrade -# opm upgrade -catalog-build-replaces: opm ## Build a catalog image using replace mode - $(OPM) index add --container-tool $(CONTAINER_TOOL) --mode replaces --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) - -# Push the catalog image. -.PHONY: catalog-push -catalog-push: ## Push a catalog image. - $(MAKE) docker-push IMG=$(CATALOG_IMG) - # A valid Git branch from https://github.com/openshift/oadp-operator PREVIOUS_CHANNEL ?= oadp-1.4 # Go version in go.mod in that branch @@ -468,7 +482,7 @@ catalog-test-upgrade: PREVIOUS_BUNDLE_IMAGE?=ttl.sh/oadp-operator-previous-bundl catalog-test-upgrade: THIS_OPERATOR_IMAGE?=ttl.sh/oadp-operator-$(GIT_REV):1h catalog-test-upgrade: THIS_BUNDLE_IMAGE?=ttl.sh/oadp-operator-bundle-$(GIT_REV):1h catalog-test-upgrade: CATALOG_IMAGE?=ttl.sh/oadp-operator-catalog-$(GIT_REV):1h -catalog-test-upgrade: opm login-required ## Prepare a catalog image with two channels: PREVIOUS_CHANNEL and from current branch +catalog-test-upgrade: opm login-required ## Prepare a catalog image with two channels: PREVIOUS_CHANNEL and from current branch. For more information, check docs/developer/testing/test_oadp_version_upgrade.md mkdir test-upgrade && rsync -a --exclude=test-upgrade ./ test-upgrade/current git clone --depth=1 git@github.com:openshift/oadp-operator.git -b $(PREVIOUS_CHANNEL) test-upgrade/$(PREVIOUS_CHANNEL) cd test-upgrade/$(PREVIOUS_CHANNEL) && \ @@ -486,32 +500,64 @@ catalog-test-upgrade: opm login-required ## Prepare a catalog image with two cha echo -e "apiVersion: operators.coreos.com/v1alpha1\nkind: CatalogSource\nmetadata:\n name: oadp-operator-catalog-test-upgrade\n namespace: openshift-marketplace\nspec:\n sourceType: grpc\n image: $(CATALOG_IMAGE)" | $(OC_CLI) create -f - chmod -R 777 test-upgrade && rm -rf test-upgrade && $(CONTAINER_TOOL) image rm catalog-test-upgrade -.PHONY: login-required -login-required: -ifeq ($(CLUSTER_TYPE),) - $(error You must be logged in to a cluster to run this command) -else - $(info $$CLUSTER_TYPE is [${CLUSTER_TYPE}]) -endif - .PHONY: install-ginkgo -install-ginkgo: # Make sure ginkgo is in $GOPATH/bin +install-ginkgo: ## Make sure ginkgo is in $GOPATH/bin go install -v -mod=mod github.com/onsi/ginkgo/v2/ginkgo -OADP_BUCKET ?= $(shell cat $(OADP_BUCKET_FILE)) -TEST_FILTER = (($(shell echo '! aws && ! gcp && ! azure && ! ibmcloud' | \ -sed -r "s/[&]* [!] $(CLUSTER_TYPE)|[!] $(CLUSTER_TYPE) [&]*//")) || $(CLUSTER_TYPE)) -#TEST_FILTER := $(shell echo '! aws && ! gcp && ! azure' | sed -r "s/[&]* [!] $(CLUSTER_TYPE)|[!] $(CLUSTER_TYPE) [&]*//") -ifeq ($(TEST_VIRT),true) - TEST_FILTER += && (virt) -else - TEST_FILTER += && (! virt) +# CONFIGS FOR CLOUD +# bsl / blob storage cred dir +OADP_CRED_DIR ?= /var/run/oadp-credentials +# vsl / volume/cluster cred dir +CLUSTER_PROFILE_DIR ?= /Users/drajds/.aws + +# bsl cred file +OADP_CRED_FILE ?= ${OADP_CRED_DIR}/new-aws-credentials +# vsl cred file +CI_CRED_FILE ?= ${CLUSTER_PROFILE_DIR}/.awscred + +# aws configs - default +BSL_REGION ?= us-east-1 +VSL_REGION ?= ${LEASED_RESOURCE} +BSL_AWS_PROFILE ?= default +# BSL_AWS_PROFILE ?= migration-engineering + +# bucket file +OADP_BUCKET_FILE ?= ${OADP_CRED_DIR}/new-velero-bucket-name +# azure cluster resource file - only in CI +AZURE_RESOURCE_FILE ?= /var/run/secrets/ci.openshift.io/multi-stage/metadata.json +AZURE_CI_JSON_CRED_FILE ?= ${CLUSTER_PROFILE_DIR}/osServicePrincipal.json +AZURE_OADP_JSON_CRED_FILE ?= ${OADP_CRED_DIR}/azure-credentials + +ifeq ($(CLUSTER_TYPE), gcp) + CI_CRED_FILE = ${CLUSTER_PROFILE_DIR}/gce.json + OADP_CRED_FILE = ${OADP_CRED_DIR}/gcp-credentials + OADP_BUCKET_FILE = ${OADP_CRED_DIR}/gcp-velero-bucket-name endif -ifeq ($(TEST_UPGRADE),true) - TEST_FILTER += && (upgrade) -else - TEST_FILTER += && (! upgrade) + +ifeq ($(CLUSTER_TYPE), azure4) + CLUSTER_TYPE = azure endif + +ifeq ($(CLUSTER_TYPE), azure) + CI_CRED_FILE = /tmp/ci-azure-credentials + OADP_CRED_FILE = /tmp/oadp-azure-credentials + OADP_BUCKET_FILE = ${OADP_CRED_DIR}/azure-velero-bucket-name +endif + +VELERO_PLUGIN ?= ${CLUSTER_TYPE} + +ifeq ($(CLUSTER_TYPE), ibmcloud) + VELERO_PLUGIN = aws +endif + +KVM_EMULATION ?= true + +ifeq ($(CLUSTER_TYPE), openstack) + KVM_EMULATION = false +endif + +OPENSHIFT_CI ?= true +OADP_BUCKET ?= $(shell cat $(OADP_BUCKET_FILE)) SETTINGS_TMP=/tmp/test-settings .PHONY: test-e2e-setup @@ -531,8 +577,27 @@ test-e2e-setup: login-required BSL_AWS_PROFILE="$(BSL_AWS_PROFILE)" \ /bin/bash "tests/e2e/scripts/$(CLUSTER_TYPE)_settings.sh" +VELERO_INSTANCE_NAME ?= velero-test +ARTIFACT_DIR ?= /tmp +HCO_UPSTREAM ?= false +TEST_VIRT ?= false +TEST_UPGRADE ?= false +TEST_FILTER = (($(shell echo '! aws && ! gcp && ! azure && ! ibmcloud' | \ +sed -r "s/[&]* [!] $(CLUSTER_TYPE)|[!] $(CLUSTER_TYPE) [&]*//")) || $(CLUSTER_TYPE)) +#TEST_FILTER := $(shell echo '! aws && ! gcp && ! azure' | sed -r "s/[&]* [!] $(CLUSTER_TYPE)|[!] $(CLUSTER_TYPE) [&]*//") +ifeq ($(TEST_VIRT),true) + TEST_FILTER += && (virt) +else + TEST_FILTER += && (! virt) +endif +ifeq ($(TEST_UPGRADE),true) + TEST_FILTER += && (upgrade) +else + TEST_FILTER += && (! upgrade) +endif + .PHONY: test-e2e -test-e2e: test-e2e-setup install-ginkgo +test-e2e: test-e2e-setup install-ginkgo ## Run E2E tests against OADP operator installed in cluster. For more information, check docs/developer/testing/TESTING.md ginkgo run -mod=mod tests/e2e/ -- \ -settings=$(SETTINGS_TMP)/oadpcreds \ -provider=$(CLUSTER_TYPE) \ @@ -564,27 +629,6 @@ test-e2e-cleanup: login-required for restore_name in $(shell $(OC_CLI) get restore -n $(OADP_TEST_NAMESPACE) -o name);do $(OC_CLI) patch "$$restore_name" -n $(OADP_TEST_NAMESPACE) -p '{"metadata":{"finalizers":null}}' --type=merge;done rm -rf $(SETTINGS_TMP) -GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint - -.PHONY: golangci-lint -golangci-lint: ## Download golangci-lint locally if necessary. - $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.2) - -.PHONY: lint -lint: golangci-lint ## Run Go linters checks against all project's Go files. - $(GOLANGCI_LINT) run - -.PHONY: lint-fix -lint-fix: golangci-lint ## Fix Go linters issues. - $(GOLANGCI_LINT) run --fix - -.PHONY: check-go-dependencies -check-go-dependencies: TEMP:= $(shell mktemp -d) -check-go-dependencies: - @cp -r ./ $(TEMP) && cd $(TEMP) && go mod tidy && cd - && diff -ruN ./ $(TEMP)/ && echo "go dependencies checked" || (echo "go dependencies are out of date, run 'go mod tidy' to update" && exit 1) - @chmod -R 777 $(TEMP) && rm -rf $(TEMP) - go mod verify - .PHONY: update-non-admin-manifests update-non-admin-manifests: NON_ADMIN_CONTROLLER_IMG?=quay.io/konveyor/oadp-non-admin:latest update-non-admin-manifests: ## Update Non Admin Controller (NAC) manifests shipped with OADP, from NON_ADMIN_CONTROLLER_PATH diff --git a/PROJECT b/PROJECT index 77a72ae276..878e660b95 100644 --- a/PROJECT +++ b/PROJECT @@ -1,9 +1,32 @@ -domain: quay.io +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: openshift.io layout: -- go.kubebuilder.io/v3 +- go.kubebuilder.io/v4 plugins: manifests.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {} projectName: oadp-operator repo: github.com/openshift/oadp-operator +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openshift.io + group: oadp + kind: DataProtectionApplication + path: github.com/openshift/oadp-operator/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openshift.io + group: oadp + kind: CloudStorage + path: github.com/openshift/oadp-operator/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/api/v1alpha1/cloud_storage_types.go b/api/v1alpha1/cloudstorage_types.go similarity index 64% rename from api/v1alpha1/cloud_storage_types.go rename to api/v1alpha1/cloudstorage_types.go index 295be54ff5..5ea1b3d612 100644 --- a/api/v1alpha1/cloud_storage_types.go +++ b/api/v1alpha1/cloudstorage_types.go @@ -1,3 +1,19 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1alpha1 import ( @@ -5,19 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CloudStorage types are APIs for automatic bucket creation at cloud providers if defined name do not exists. - -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status - -type CloudStorage struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec CloudStorageSpec `json:"spec,omitempty"` - Status CloudStorageStatus `json:"status,omitempty"` -} - type CloudStorageProvider string const ( @@ -48,18 +51,38 @@ type CloudStorageSpec struct { // need storage account name and key to create azure container // az storage container create -n --account-name --account-key // azure account key will use CreationSecret to store key and account name - } type CloudStorageStatus struct { - Name string `json:"name"` + // Name is the name requested for the bucket (aws, gcp) or container (azure) + // +operator-sdk:csv:customresourcedefinitions:type=status + Name string `json:"name"` + // LastSyncTimestamp is the last time the contents of the CloudStorage was synced + // +operator-sdk:csv:customresourcedefinitions:type=status,displayName="LastSyncTimestamp" LastSynced *metav1.Time `json:"lastSyncTimestamp,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// The CloudStorage API automates the creation of a bucket for object storage. +type CloudStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CloudStorageSpec `json:"spec,omitempty"` + Status CloudStorageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CloudStorageList contains a list of CloudStorage type CloudStorageList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []CloudStorage `json:"items"` } + +func init() { + SchemeBuilder.Register(&CloudStorage{}, &CloudStorageList{}) +} diff --git a/api/v1alpha1/oadp_types.go b/api/v1alpha1/dataprotectionapplication_types.go similarity index 98% rename from api/v1alpha1/oadp_types.go rename to api/v1alpha1/dataprotectionapplication_types.go index c174b3dfdf..d6e642638b 100644 --- a/api/v1alpha1/oadp_types.go +++ b/api/v1alpha1/dataprotectionapplication_types.go @@ -597,6 +597,8 @@ type DataProtectionApplicationSpec struct { // DataProtectionApplicationStatus defines the observed state of DataProtectionApplication type DataProtectionApplicationStatus struct { + // Conditions defines the observed state of DataProtectionApplication + //+operator-sdk:csv:customresourcedefinitions:type=status Conditions []metav1.Condition `json:"conditions,omitempty"` } @@ -604,7 +606,9 @@ type DataProtectionApplicationStatus struct { //+kubebuilder:subresource:status //+kubebuilder:resource:path=dataprotectionapplications,shortName=dpa -// DataProtectionApplication is the Schema for the dpa API +// DataProtectionApplication represents configuration to install a data protection +// application to safely backup and restore, perform disaster recovery and migrate +// Kubernetes cluster resources and persistent volumes. type DataProtectionApplication struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -615,13 +619,17 @@ type DataProtectionApplication struct { //+kubebuilder:object:root=true -// DataProtectionApplicationList contains a list of Velero +// DataProtectionApplicationList contains a list of DataProtectionApplication type DataProtectionApplicationList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []DataProtectionApplication `json:"items"` } +func init() { + SchemeBuilder.Register(&DataProtectionApplication{}, &DataProtectionApplicationList{}) +} + // Default BackupImages behavior when nil to true func (dpa *DataProtectionApplication) BackupImages() bool { return dpa.Spec.BackupImages == nil || *dpa.Spec.BackupImages @@ -644,10 +652,6 @@ func (veleroConfig *VeleroConfig) HasFeatureFlag(flag string) bool { return false } -func init() { - SchemeBuilder.Register(&DataProtectionApplication{}, &DataProtectionApplicationList{}, &CloudStorage{}, &CloudStorageList{}) -} - // AutoCorrect is a collection of auto-correction functions for the DPA CR // These auto corrects are in-memory only and do not persist to the CR // There should not be another place where these auto-corrects are done diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go index 636f70642e..88d3716506 100644 --- a/api/v1alpha1/groupversion_info.go +++ b/api/v1alpha1/groupversion_info.go @@ -28,8 +28,6 @@ var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "oadp.openshift.io", Version: "v1alpha1"} - Kind = "DataProtectionApplication" - // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} diff --git a/build/Dockerfile.bundle b/build/Dockerfile.bundle index 7b8f794607..2560612d87 100644 --- a/build/Dockerfile.bundle +++ b/build/Dockerfile.bundle @@ -9,7 +9,7 @@ LABEL operators.operatorframework.io.bundle.channels.v1=stable LABEL operators.operatorframework.io.bundle.channel.default.v1=stable LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.34.2 LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 -LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 # Labels for testing. LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 diff --git a/bundle.Dockerfile b/bundle.Dockerfile index 7b8f794607..2560612d87 100644 --- a/bundle.Dockerfile +++ b/bundle.Dockerfile @@ -9,7 +9,7 @@ LABEL operators.operatorframework.io.bundle.channels.v1=stable LABEL operators.operatorframework.io.bundle.channel.default.v1=stable LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.34.2 LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 -LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 # Labels for testing. LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 diff --git a/bundle/manifests/dpa-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle/manifests/dpa-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml deleted file mode 100644 index 6bc04f7498..0000000000 --- a/bundle/manifests/dpa-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: dpa-editor-role -rules: -- apiGroups: - - oadp.openshift.io - resources: - - dataprotectionapplications - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - oadp.openshift.io - resources: - - dataprotectionapplications/status - verbs: - - get diff --git a/bundle/manifests/dpa-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle/manifests/dpa-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml deleted file mode 100644 index 695074ee00..0000000000 --- a/bundle/manifests/dpa-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: dpa-viewer-role -rules: -- apiGroups: - - oadp.openshift.io - resources: - - dataprotectionapplications - verbs: - - get - - list - - watch -- apiGroups: - - oadp.openshift.io - resources: - - dataprotectionapplications/status - verbs: - - get diff --git a/bundle/manifests/oadp-operator.clusterserviceversion.yaml b/bundle/manifests/oadp-operator.clusterserviceversion.yaml index 08fe838b19..c8ee37616e 100644 --- a/bundle/manifests/oadp-operator.clusterserviceversion.yaml +++ b/bundle/manifests/oadp-operator.clusterserviceversion.yaml @@ -208,7 +208,7 @@ metadata: operators.openshift.io/valid-subscription: '["OpenShift Kubernetes Engine", "OpenShift Container Platform", "OpenShift Platform Plus"]' operators.operatorframework.io/builder: operator-sdk-v1.34.2 - operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 repository: https://github.com/openshift/oadp-operator support: Red Hat labels: @@ -311,7 +311,7 @@ spec: version: v1 - description: The CloudStorage API automates the creation of a bucket for object storage. - displayName: CloudStorage + displayName: Cloud Storage kind: CloudStorage name: cloudstorages.oadp.openshift.io statusDescriptors: @@ -357,7 +357,7 @@ spec: - description: DataProtectionApplication represents configuration to install a data protection application to safely backup and restore, perform disaster recovery and migrate Kubernetes cluster resources and persistent volumes. - displayName: DataProtectionApplication + displayName: Data Protection Application kind: DataProtectionApplication name: dataprotectionapplications.oadp.openshift.io statusDescriptors: @@ -724,38 +724,41 @@ spec: serviceAccountName: non-admin-controller - rules: - apiGroups: - - config.openshift.io + - "" resources: - - infrastructures + - configmaps + - endpoints + - events + - persistentvolumeclaims + - pods + - secrets + - serviceaccounts + - services verbs: + - create + - delete + - deletecollection - get - list - - watch - - apiGroups: - - cloudcredential.openshift.io - resources: - - credentialsrequests - verbs: - - create + - patch - update - - get + - watch - apiGroups: - - oadp.openshift.io + - "" resources: - - '*' + - namespaces verbs: - create - - delete - get - list - patch - update - watch - apiGroups: - - coordination.k8s.io - - corev1 + - apps resources: - - secrets + - daemonsets + - deployments verbs: - create - delete @@ -765,35 +768,38 @@ spec: - update - watch - apiGroups: - - oadp.openshift.io + - cloudcredential.openshift.io resources: - - buckets + - credentialsrequests verbs: - create - - delete - get - - list - - patch - update - - watch - apiGroups: - - oadp.openshift.io + - config.openshift.io resources: - - buckets/finalizers + - infrastructures verbs: - - update + - get + - list + - watch - apiGroups: - - oadp.openshift.io + - coordination.k8s.io + - corev1 resources: - - buckets/status + - secrets verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - oadp.openshift.io + - monitoring.coreos.com resources: - - leases + - servicemonitors verbs: - create - delete @@ -803,7 +809,7 @@ spec: - update - watch - apiGroups: - - velero.io + - oadp.openshift.io resources: - '*' verbs: @@ -815,61 +821,61 @@ spec: - update - watch - apiGroups: - - security.openshift.io + - oadp.openshift.io resources: - - securitycontextconstraints + - cloudstorages verbs: - - list - - get - create - delete + - get + - list - patch - update - watch - apiGroups: - - security.openshift.io - resourceNames: - - privileged + - oadp.openshift.io resources: - - securitycontextconstraints + - cloudstorages/finalizers verbs: - - use + - update - apiGroups: - - "" + - oadp.openshift.io resources: - - secrets - - configmaps - - pods - - services - - serviceaccounts - - endpoints - - persistentvolumeclaims - - events + - cloudstorages/status verbs: - - list - get + - patch + - update + - apiGroups: + - oadp.openshift.io + resources: + - dataprotectionapplications + verbs: - create - delete - - deletecollection + - get + - list - patch - update - watch - apiGroups: - - "" + - oadp.openshift.io resources: - - namespaces + - dataprotectionapplications/finalizers + verbs: + - update + - apiGroups: + - oadp.openshift.io + resources: + - dataprotectionapplications/status verbs: - - list - get - - create - patch - update - - watch - apiGroups: - - apps + - route.openshift.io resources: - - deployments - - daemonsets + - routes verbs: - create - delete @@ -879,9 +885,9 @@ spec: - update - watch - apiGroups: - - route.openshift.io + - security.openshift.io resources: - - routes + - securitycontextconstraints verbs: - create - delete @@ -891,16 +897,24 @@ spec: - update - watch - apiGroups: - - monitoring.coreos.com + - security.openshift.io + resourceNames: + - privileged resources: - - servicemonitors + - securitycontextconstraints + verbs: + - use + - apiGroups: + - velero.io + resources: + - '*' verbs: - - get - create - - list - delete - - update + - get + - list - patch + - update - watch - apiGroups: - authentication.k8s.io diff --git a/bundle/manifests/oadp.openshift.io_cloudstorages.yaml b/bundle/manifests/oadp.openshift.io_cloudstorages.yaml index 660b747356..acbad84f11 100644 --- a/bundle/manifests/oadp.openshift.io_cloudstorages.yaml +++ b/bundle/manifests/oadp.openshift.io_cloudstorages.yaml @@ -17,6 +17,8 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: + description: The CloudStorage API automates the creation of a bucket for object + storage. properties: apiVersion: description: |- @@ -93,9 +95,13 @@ spec: status: properties: lastSyncTimestamp: + description: LastSyncTimestamp is the last time the contents of the + CloudStorage was synced format: date-time type: string name: + description: Name is the name requested for the bucket (aws, gcp) + or container (azure) type: string required: - name diff --git a/bundle/manifests/oadp.openshift.io_dataprotectionapplications.yaml b/bundle/manifests/oadp.openshift.io_dataprotectionapplications.yaml index b7edc634a0..4be84d16cb 100644 --- a/bundle/manifests/oadp.openshift.io_dataprotectionapplications.yaml +++ b/bundle/manifests/oadp.openshift.io_dataprotectionapplications.yaml @@ -19,7 +19,10 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: DataProtectionApplication is the Schema for the dpa API + description: |- + DataProtectionApplication represents configuration to install a data protection + application to safely backup and restore, perform disaster recovery and migrate + Kubernetes cluster resources and persistent volumes. properties: apiVersion: description: |- @@ -2238,6 +2241,7 @@ spec: description: DataProtectionApplicationStatus defines the observed state of DataProtectionApplication properties: conditions: + description: Conditions defines the observed state of DataProtectionApplication items: description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" properties: diff --git a/bundle/manifests/openshift-adp-controller-manager-metrics-service_v1_service.yaml b/bundle/manifests/openshift-adp-controller-manager-metrics-service_v1_service.yaml index de24f72ae3..6e380c820c 100644 --- a/bundle/manifests/openshift-adp-controller-manager-metrics-service_v1_service.yaml +++ b/bundle/manifests/openshift-adp-controller-manager-metrics-service_v1_service.yaml @@ -9,6 +9,7 @@ spec: ports: - name: https port: 8443 + protocol: TCP targetPort: https selector: control-plane: controller-manager diff --git a/bundle/manifests/openshift-adp-manager-config_v1_configmap.yaml b/bundle/manifests/openshift-adp-manager-config_v1_configmap.yaml deleted file mode 100644 index 0db142f4ce..0000000000 --- a/bundle/manifests/openshift-adp-manager-config_v1_configmap.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -data: - controller_manager_config.yaml: | - apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 - kind: ControllerManagerConfig - health: - healthProbeBindAddress: :8081 - metrics: - bindAddress: 127.0.0.1:8080 - webhook: - port: 9443 - leaderElection: - leaderElect: true - resourceName: 8b4defce.openshift.io -kind: ConfigMap -metadata: - name: openshift-adp-manager-config diff --git a/bundle/metadata/annotations.yaml b/bundle/metadata/annotations.yaml index d6cb10e5ff..836b9fe075 100644 --- a/bundle/metadata/annotations.yaml +++ b/bundle/metadata/annotations.yaml @@ -8,7 +8,7 @@ annotations: operators.operatorframework.io.bundle.channel.default.v1: stable operators.operatorframework.io.metrics.builder: operator-sdk-v1.34.2 operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 - operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 # Annotations for testing. operators.operatorframework.io.test.mediatype.v1: scorecard+v1 diff --git a/bundle/tests/scorecard/config.yaml b/bundle/tests/scorecard/config.yaml index 8cba919ca9..de2979f631 100644 --- a/bundle/tests/scorecard/config.yaml +++ b/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-status-descriptors-test diff --git a/main.go b/cmd/main.go similarity index 90% rename from main.go rename to cmd/main.go index 4caa206296..1be17513d9 100644 --- a/main.go +++ b/cmd/main.go @@ -18,6 +18,7 @@ package main import ( "context" + "crypto/tls" "encoding/json" "flag" "fmt" @@ -51,9 +52,9 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - //+kubebuilder:scaffold:imports oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" - "github.com/openshift/oadp-operator/controllers" + "github.com/openshift/oadp-operator/internal/controller" + //+kubebuilder:scaffold:imports "github.com/openshift/oadp-operator/pkg/common" "github.com/openshift/oadp-operator/pkg/leaderelection" ) @@ -91,11 +92,17 @@ func main() { var metricsAddr string var enableLeaderElection bool var probeAddr string + var secureMetrics bool + var enableHTTP2 bool flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", false, + "If set the metrics endpoint is served securely") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") opts := zap.Options{ Development: true, } @@ -155,16 +162,34 @@ func main() { } } + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancelation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + tlsOpts := []func(*tls.Config){} + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: tlsOpts, + }) + mgr, err := ctrl.NewManager(kubeconf, ctrl.Options{ Scheme: scheme, Metrics: metricsserver.Options{ - BindAddress: metricsAddr, - }, - WebhookServer: &webhook.DefaultServer{ - Options: webhook.Options{ - Port: 9443, - }, + BindAddress: metricsAddr, + SecureServing: secureMetrics, + TLSOpts: tlsOpts, }, + WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, LeaseDuration: &leConfig.LeaseDuration.Duration, @@ -229,7 +254,7 @@ func main() { os.Exit(1) } - if err = (&controllers.DPAReconciler{ + if err = (&controller.DataProtectionApplicationReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), EventRecorder: mgr.GetEventRecorderFor("DPA-controller"), @@ -239,12 +264,12 @@ func main() { os.Exit(1) } - if err = (&controllers.BucketReconciler{ + if err = (&controller.CloudStorageReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - EventRecorder: mgr.GetEventRecorderFor("bucket-controller"), + EventRecorder: mgr.GetEventRecorderFor("CloudStorage-controller"), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Bucket") + setupLog.Error(err, "unable to create controller", "controller", "CloudStorage") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/main_test.go b/cmd/main_test.go similarity index 100% rename from main_test.go rename to cmd/main_test.go diff --git a/config/crd/bases/oadp.openshift.io_cloudstorages.yaml b/config/crd/bases/oadp.openshift.io_cloudstorages.yaml index 88342ee4d6..28073c6849 100644 --- a/config/crd/bases/oadp.openshift.io_cloudstorages.yaml +++ b/config/crd/bases/oadp.openshift.io_cloudstorages.yaml @@ -17,6 +17,8 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: + description: The CloudStorage API automates the creation of a bucket for object + storage. properties: apiVersion: description: |- @@ -93,9 +95,13 @@ spec: status: properties: lastSyncTimestamp: + description: LastSyncTimestamp is the last time the contents of the + CloudStorage was synced format: date-time type: string name: + description: Name is the name requested for the bucket (aws, gcp) + or container (azure) type: string required: - name diff --git a/config/crd/bases/oadp.openshift.io_dataprotectionapplications.yaml b/config/crd/bases/oadp.openshift.io_dataprotectionapplications.yaml index 2bd65c054d..fae5e415f2 100644 --- a/config/crd/bases/oadp.openshift.io_dataprotectionapplications.yaml +++ b/config/crd/bases/oadp.openshift.io_dataprotectionapplications.yaml @@ -19,7 +19,10 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: DataProtectionApplication is the Schema for the dpa API + description: |- + DataProtectionApplication represents configuration to install a data protection + application to safely backup and restore, perform disaster recovery and migrate + Kubernetes cluster resources and persistent volumes. properties: apiVersion: description: |- @@ -2238,6 +2241,7 @@ spec: description: DataProtectionApplicationStatus defines the observed state of DataProtectionApplication properties: conditions: + description: Conditions defines the observed state of DataProtectionApplication items: description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t // other fields\n\t}" properties: diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 0da74a2160..c1e79c1680 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -21,17 +21,21 @@ resources: - bases/oadp.openshift.io_nonadminrestores.yaml #+kubebuilder:scaffold:crdkustomizeresource -patchesStrategicMerge: +patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -#- patches/webhook_in_veleroes.yaml +#- path: patches/webhook_in_dataprotectionapplications.yaml +#- path: patches/webhook_in_cloudstorages.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- patches/cainjection_in_veleroes.yaml +#- path: patches/cainjection_in_dataprotectionapplications.yaml +#- path: patches/cainjection_in_cloudstorages.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch +# [WEBHOOK] To enable webhook, uncomment the following section # the following config is for teaching kustomize how to do kustomization for CRDs. -configurations: -- kustomizeconfig.yaml + +#configurations: +#- kustomizeconfig.yaml diff --git a/config/crd/patches/cainjection_in_dpas.yaml b/config/crd/patches/cainjection_in_dpas.yaml deleted file mode 100644 index 81723dbdc9..0000000000 --- a/config/crd/patches/cainjection_in_dpas.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# The following patch adds a directive for certmanager to inject CA into the CRD -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: dataprotectionapplications.oadp.openshift.io diff --git a/config/crd/patches/webhook_in_dpas.yaml b/config/crd/patches/webhook_in_dpas.yaml deleted file mode 100644 index 30dee432ff..0000000000 --- a/config/crd/patches/webhook_in_dpas.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# The following patch enables a conversion webhook for the CRD -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: dataprotectionapplications.oadp.openshift.io -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - service: - namespace: system - name: webhook-service - path: /convert - conversionReviewVersions: - - v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index ae2ce7b241..997b5592c6 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -9,10 +9,12 @@ namespace: openshift-adp namePrefix: openshift-adp- # Labels to add to all resources and selectors. -#commonLabels: -# someName: someValue +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue -bases: +resources: - ../crd - ../rbac - ../manager @@ -24,51 +26,117 @@ bases: # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus -patchesStrategicMerge: +patches: # Protect the /metrics endpoint by putting it behind auth. # If you want your controller-manager to expose the /metrics # endpoint w/o any authn/z, please comment the following line. -#- manager_auth_proxy_patch.yaml - -# Mount the controller config file for loading manager configurations -# through a ComponentConfig type -#- manager_config_patch.yaml +#- path: manager_auth_proxy_patch.yaml # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -#- manager_webhook_patch.yaml +#- path: manager_webhook_patch.yaml # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. # 'CERTMANAGER' needs to be enabled to use ca injection -#- webhookcainjection_patch.yaml +#- path: webhookcainjection_patch.yaml -# the following config is for teaching kustomize how to do var substitution -vars: # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. -#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldref: -# fieldpath: metadata.namespace -#- name: CERTIFICATE_NAME -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -#- name: SERVICE_NAMESPACE # namespace of the service -# objref: -# kind: Service -# version: v1 -# name: webhook-service -# fieldref: -# fieldpath: metadata.namespace -#- name: SERVICE_NAME -# objref: -# kind: Service -# version: v1 -# name: webhook-service +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - source: # Add cert-manager annotation to the webhook Service +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index a224be19ea..70c3437f4b 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -10,15 +10,28 @@ spec: spec: containers: - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" - "--logtostderr=true" - - "--v=10" + - "--v=0" ports: - containerPort: 8443 + protocol: TCP name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi - name: manager args: - "--health-probe-bind-address=:8081" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml index 6c400155cf..f6f5891692 100644 --- a/config/default/manager_config_patch.yaml +++ b/config/default/manager_config_patch.yaml @@ -8,13 +8,3 @@ spec: spec: containers: - name: manager - args: - - "--config=controller_manager_config.yaml" - volumeMounts: - - name: manager-config - mountPath: /controller_manager_config.yaml - subPath: controller_manager_config.yaml - volumes: - - name: manager-config - configMap: - name: manager-config diff --git a/config/manager/controller_manager_config.yaml b/config/manager/controller_manager_config.yaml deleted file mode 100644 index 5a80ada64c..0000000000 --- a/config/manager/controller_manager_config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 -kind: ControllerManagerConfig -health: - healthProbeBindAddress: :8081 -metrics: - bindAddress: 127.0.0.1:8080 -webhook: - port: 9443 -leaderElection: - leaderElect: true - resourceName: 8b4defce.openshift.io diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 3a9e916204..d16337ed00 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,13 +1,5 @@ resources: - manager.yaml - -generatorOptions: - disableNameSuffixHash: true - -configMapGenerator: -- files: - - controller_manager_config.yaml - name: manager-config apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 6fc8c60c5d..17a236f6eb 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -22,6 +22,26 @@ spec: labels: control-plane: controller-manager spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux securityContext: runAsNonRoot: true containers: diff --git a/config/manifests/bases/oadp-operator.clusterserviceversion.yaml b/config/manifests/bases/oadp-operator.clusterserviceversion.yaml index 6b1b039e62..2d708fd764 100644 --- a/config/manifests/bases/oadp-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/oadp-operator.clusterserviceversion.yaml @@ -38,17 +38,6 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: - - description: DataProtectionApplication represents configuration to install a - data protection application to safely backup and restore, perform disaster - recovery and migrate Kubernetes cluster resources and persistent volumes. - displayName: DataProtectionApplication - kind: DataProtectionApplication - name: dataprotectionapplications.oadp.openshift.io - statusDescriptors: - - description: Conditions defines the observed state of DataProtectionApplication - displayName: Conditions - path: conditions - version: v1alpha1 - description: A backup repository is an indicator of a connection from the restic/kopia server to the backupstoragelocation. displayName: BackupRepository @@ -386,9 +375,19 @@ spec: displayName: StartTimestamp path: startTimestamp version: v2alpha1 + - description: NonAdminBackup is the Schema for the nonadminbackups API + displayName: Non Admin Backup + kind: NonAdminBackup + name: nonadminbackups.oadp.openshift.io + version: v1alpha1 + - description: NonAdminRestore is the Schema for the nonadminrestores API + displayName: Non Admin Restore + kind: NonAdminRestore + name: nonadminrestores.oadp.openshift.io + version: v1alpha1 - description: The CloudStorage API automates the creation of a bucket for object storage. - displayName: CloudStorage + displayName: Cloud Storage kind: CloudStorage name: cloudstorages.oadp.openshift.io statusDescriptors: @@ -401,15 +400,16 @@ spec: displayName: Name path: name version: v1alpha1 - - description: NonAdminBackup is the Schema for the nonadminbackups API - displayName: Non Admin Backup - kind: NonAdminBackup - name: nonadminbackups.oadp.openshift.io - version: v1alpha1 - - description: NonAdminRestore is the Schema for the nonadminrestores API - displayName: Non Admin Restore - kind: NonAdminRestore - name: nonadminrestores.oadp.openshift.io + - description: DataProtectionApplication represents configuration to install a + data protection application to safely backup and restore, perform disaster + recovery and migrate Kubernetes cluster resources and persistent volumes. + displayName: Data Protection Application + kind: DataProtectionApplication + name: dataprotectionapplications.oadp.openshift.io + statusDescriptors: + - description: Conditions defines the observed state of DataProtectionApplication + displayName: Conditions + path: conditions version: v1alpha1 description: | **OpenShift API for Data Protection (OADP)** operator sets up and installs diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml index 788a21845a..5a625e6754 100644 --- a/config/manifests/kustomization.yaml +++ b/config/manifests/kustomization.yaml @@ -22,7 +22,7 @@ resources: # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. # - op: remove -# path: /spec/template/spec/containers/1/volumeMounts/0 +# path: /spec/template/spec/containers/0/volumeMounts/0 # # Remove the "cert" volume, since OLM will create and mount a set of certs. # # Update the indices in this path if adding or removing volumes in the manager's Deployment. # - op: remove diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 0000000000..ed137168a1 --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 0000000000..8920996a4c --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,25 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/instance: controller-manager-metrics-monitor + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: oadp-operator + app.kubernetes.io/part-of: oadp-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 6cf656be14..71f1797279 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -9,6 +9,7 @@ spec: ports: - name: https port: 8443 + protocol: TCP targetPort: https selector: control-plane: controller-manager diff --git a/config/rbac/cloudstorage_editor_role.yaml b/config/rbac/cloudstorage_editor_role.yaml new file mode 100644 index 0000000000..3cf5181caa --- /dev/null +++ b/config/rbac/cloudstorage_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit cloudstorages. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: cloudstorage-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: oadp-operator + app.kubernetes.io/part-of: oadp-operator + app.kubernetes.io/managed-by: kustomize + name: cloudstorage-editor-role +rules: +- apiGroups: + - oadp.openshift.io + resources: + - cloudstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oadp.openshift.io + resources: + - cloudstorages/status + verbs: + - get diff --git a/config/rbac/cloudstorage_viewer_role.yaml b/config/rbac/cloudstorage_viewer_role.yaml new file mode 100644 index 0000000000..0b6cde2979 --- /dev/null +++ b/config/rbac/cloudstorage_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view cloudstorages. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: cloudstorage-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: oadp-operator + app.kubernetes.io/part-of: oadp-operator + app.kubernetes.io/managed-by: kustomize + name: cloudstorage-viewer-role +rules: +- apiGroups: + - oadp.openshift.io + resources: + - cloudstorages + verbs: + - get + - list + - watch +- apiGroups: + - oadp.openshift.io + resources: + - cloudstorages/status + verbs: + - get diff --git a/config/rbac/dataprotectionapplication_editor_role.yaml b/config/rbac/dataprotectionapplication_editor_role.yaml new file mode 100644 index 0000000000..ee4f27746c --- /dev/null +++ b/config/rbac/dataprotectionapplication_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit dataprotectionapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: dataprotectionapplication-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: oadp-operator + app.kubernetes.io/part-of: oadp-operator + app.kubernetes.io/managed-by: kustomize + name: dataprotectionapplication-editor-role +rules: +- apiGroups: + - oadp.openshift.io + resources: + - dataprotectionapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oadp.openshift.io + resources: + - dataprotectionapplications/status + verbs: + - get diff --git a/config/rbac/dataprotectionapplication_viewer_role.yaml b/config/rbac/dataprotectionapplication_viewer_role.yaml new file mode 100644 index 0000000000..2ae3f163d5 --- /dev/null +++ b/config/rbac/dataprotectionapplication_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view dataprotectionapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: dataprotectionapplication-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: oadp-operator + app.kubernetes.io/part-of: oadp-operator + app.kubernetes.io/managed-by: kustomize + name: dataprotectionapplication-viewer-role +rules: +- apiGroups: + - oadp.openshift.io + resources: + - dataprotectionapplications + verbs: + - get + - list + - watch +- apiGroups: + - oadp.openshift.io + resources: + - dataprotectionapplications/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index d6ec7acd9d..7f99d89c78 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1,44 +1,45 @@ - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: - - config.openshift.io + - "" resources: - - infrastructures + - configmaps + - endpoints + - events + - persistentvolumeclaims + - pods + - secrets + - serviceaccounts + - services verbs: + - create + - delete + - deletecollection - get - list - - watch -- apiGroups: - - cloudcredential.openshift.io - resources: - - credentialsrequests - verbs: - - create + - patch - update - - get + - watch - apiGroups: - - oadp.openshift.io + - "" resources: - - '*' + - namespaces verbs: - create - - delete - get - list - patch - update - watch - apiGroups: - - coordination.k8s.io - - corev1 + - apps resources: - - secrets + - daemonsets + - deployments verbs: - create - delete @@ -48,35 +49,38 @@ rules: - update - watch - apiGroups: - - oadp.openshift.io + - cloudcredential.openshift.io resources: - - buckets + - credentialsrequests verbs: - create - - delete - get - - list - - patch - update - - watch - apiGroups: - - oadp.openshift.io + - config.openshift.io resources: - - buckets/finalizers + - infrastructures verbs: - - update + - get + - list + - watch - apiGroups: - - oadp.openshift.io + - coordination.k8s.io + - corev1 resources: - - buckets/status + - secrets verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - oadp.openshift.io + - monitoring.coreos.com resources: - - leases + - servicemonitors verbs: - create - delete @@ -86,7 +90,7 @@ rules: - update - watch - apiGroups: - - velero.io + - oadp.openshift.io resources: - '*' verbs: @@ -98,61 +102,61 @@ rules: - update - watch - apiGroups: - - security.openshift.io + - oadp.openshift.io resources: - - securitycontextconstraints + - cloudstorages verbs: - - list - - get - create - delete + - get + - list - patch - update - watch - apiGroups: - - security.openshift.io + - oadp.openshift.io resources: - - securitycontextconstraints + - cloudstorages/finalizers verbs: - - use - resourceNames: - - privileged + - update - apiGroups: - - "" + - oadp.openshift.io resources: - - secrets - - configmaps - - pods - - services - - serviceaccounts - - endpoints - - persistentvolumeclaims - - events + - cloudstorages/status verbs: - - list - get + - patch + - update +- apiGroups: + - oadp.openshift.io + resources: + - dataprotectionapplications + verbs: - create - delete - - deletecollection + - get + - list - patch - update - watch - apiGroups: - - "" + - oadp.openshift.io resources: - - namespaces + - dataprotectionapplications/finalizers + verbs: + - update +- apiGroups: + - oadp.openshift.io + resources: + - dataprotectionapplications/status verbs: - - list - get - - create - patch - update - - watch - apiGroups: - - apps + - route.openshift.io resources: - - deployments - - daemonsets + - routes verbs: - create - delete @@ -162,9 +166,9 @@ rules: - update - watch - apiGroups: - - route.openshift.io + - security.openshift.io resources: - - routes + - securitycontextconstraints verbs: - create - delete @@ -174,14 +178,22 @@ rules: - update - watch - apiGroups: - - monitoring.coreos.com + - security.openshift.io + resourceNames: + - privileged resources: - - servicemonitors + - securitycontextconstraints + verbs: + - use +- apiGroups: + - velero.io + resources: + - '*' verbs: - - get - create - - list - delete - - update + - get + - list - patch + - update - watch diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index f0f7e27a29..68ad0a2277 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,6 +1,6 @@ -## Append samples you want in your CSV to this file as resources ## +## Append samples of your project ## resources: -- oadp_v1alpha1_dpa.yaml +- oadp_v1alpha1_dataprotectionapplication.yaml - backupstoragelocation.yaml - deletebackuprequest.yaml - downloadrequest.yaml diff --git a/config/samples/oadp_v1alpha1_dpa.yaml b/config/samples/oadp_v1alpha1_dataprotectionapplication.yaml similarity index 100% rename from config/samples/oadp_v1alpha1_dpa.yaml rename to config/samples/oadp_v1alpha1_dataprotectionapplication.yaml diff --git a/config/scorecard/patches/basic.config.yaml b/config/scorecard/patches/basic.config.yaml index ab07c06fdf..0da8e36a6c 100644 --- a/config/scorecard/patches/basic.config.yaml +++ b/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: basic test: basic-check-spec-test diff --git a/config/scorecard/patches/olm.config.yaml b/config/scorecard/patches/olm.config.yaml index b1ba10d62f..6797ad842d 100644 --- a/config/scorecard/patches/olm.config.yaml +++ b/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.10.0 + image: quay.io/operator-framework/scorecard-test:v1.34.2 labels: suite: olm test: olm-status-descriptors-test diff --git a/config/velero/dpa_editor_role.yaml b/config/velero/dpa_editor_role.yaml deleted file mode 100644 index bf0b59406a..0000000000 --- a/config/velero/dpa_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit veleroes. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: dpa-editor-role -rules: -- apiGroups: - - oadp.openshift.io - resources: - - dataprotectionapplications - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - oadp.openshift.io - resources: - - dataprotectionapplications/status - verbs: - - get diff --git a/config/velero/dpa_viewer_role.yaml b/config/velero/dpa_viewer_role.yaml deleted file mode 100644 index 4264b63dad..0000000000 --- a/config/velero/dpa_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view veleroes. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: dpa-viewer-role -rules: -- apiGroups: - - oadp.openshift.io - resources: - - dataprotectionapplications - verbs: - - get - - list - - watch -- apiGroups: - - oadp.openshift.io - resources: - - dataprotectionapplications/status - verbs: - - get diff --git a/config/velero/kustomization.yaml b/config/velero/kustomization.yaml index d06650c43e..70f09e93e1 100644 --- a/config/velero/kustomization.yaml +++ b/config/velero/kustomization.yaml @@ -3,5 +3,3 @@ resources: - velero-service_account.yaml - velero-role.yaml - velero-role_binding.yaml -- dpa_editor_role.yaml -- dpa_viewer_role.yaml diff --git a/docs/developer/update_operator_sdk.md b/docs/developer/update_operator_sdk.md new file mode 100644 index 0000000000..6c41bc91b4 --- /dev/null +++ b/docs/developer/update_operator_sdk.md @@ -0,0 +1,39 @@ +## Upgrade Operator SDK version + +To upgrade Operator SDK version, create Operator SDK structure using the current Operator SDK version and the upgrade version (get Operator SDK executables in https://github.com/operator-framework/operator-sdk/releases), using the same commands used to scaffold the project, in two different folders. + +The project was generated using Operator SDK version v1.34.2, running the following commands +```sh +operator-sdk init \ + --project-name=oadp-operator \ + --repo=github.com/openshift/oadp-operator \ + --domain=openshift.io +operator-sdk create api \ + --group oadp \ + --version v1alpha1 \ + --kind DataProtectionApplication \ + --resource --controller +operator-sdk create api \ + --group oadp \ + --version v1alpha1 \ + --kind CloudStorage \ + --resource --controller +``` +> **NOTE:** The information about plugin and project version, as well as project name, repo and domain, is stored in [PROJECT](../../PROJECT) file + +Then generate a `diff` file from the two folders and apply changes to project code. + +Example +```sh +mkdir current +mkdir new +cd current +# Run Operator SDK commands pointing to Operator SDK executable with the current version +cd .. +cd new +# Run Operator SDK commands pointing to Operator SDK executable with the new version +cd .. +diff -ruN current new > operator-sdk-upgrade.diff +patch -p1 --verbose -d ./ -i operator-sdk-upgrade.diff +# Resolve possible conflicts +``` \ No newline at end of file diff --git a/controllers/bsl.go b/internal/controller/bsl.go similarity index 92% rename from controllers/bsl.go rename to internal/controller/bsl.go index 29f864ff63..5ab09e8ace 100644 --- a/controllers/bsl.go +++ b/internal/controller/bsl.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "errors" @@ -18,7 +18,7 @@ import ( "github.com/openshift/oadp-operator/pkg/storage/aws" ) -func (r *DPAReconciler) ValidateBackupStorageLocations() (bool, error) { +func (r *DataProtectionApplicationReconciler) ValidateBackupStorageLocations() (bool, error) { // Ensure BSL is a valid configuration // First, check for provider and then call functions based on the cloud provider for each backupstoragelocation configured dpa := r.dpa @@ -98,7 +98,7 @@ func (r *DPAReconciler) ValidateBackupStorageLocations() (bool, error) { return true, nil } -func (r *DPAReconciler) ReconcileBackupStorageLocations(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileBackupStorageLocations(log logr.Logger) (bool, error) { dpa := r.dpa dpaBSLNames := []string{} @@ -229,7 +229,7 @@ func (r *DPAReconciler) ReconcileBackupStorageLocations(log logr.Logger) (bool, return true, nil } -func (r *DPAReconciler) UpdateCredentialsSecretLabels(secretName string, namespace string, dpaName string) (bool, error) { +func (r *DataProtectionApplicationReconciler) UpdateCredentialsSecretLabels(secretName string, namespace string, dpaName string) (bool, error) { var secret corev1.Secret secret, err := r.getProviderSecret(secretName) if err != nil { @@ -262,7 +262,7 @@ func (r *DPAReconciler) UpdateCredentialsSecretLabels(secretName string, namespa return true, nil } -func (r *DPAReconciler) updateBSLFromSpec(bsl *velerov1.BackupStorageLocation, bslSpec velerov1.BackupStorageLocationSpec) error { +func (r *DataProtectionApplicationReconciler) updateBSLFromSpec(bsl *velerov1.BackupStorageLocation, bslSpec velerov1.BackupStorageLocationSpec) error { // Set controller reference to Velero controller err := controllerutil.SetControllerReference(r.dpa, bsl, r.Scheme) if err != nil { @@ -312,7 +312,7 @@ func (r *DPAReconciler) updateBSLFromSpec(bsl *velerov1.BackupStorageLocation, b return nil } -func (r *DPAReconciler) validateAWSBackupStorageLocation(bslSpec velerov1.BackupStorageLocationSpec) error { +func (r *DataProtectionApplicationReconciler) validateAWSBackupStorageLocation(bslSpec velerov1.BackupStorageLocationSpec) error { // validate provider plugin and secret err := r.validateProviderPluginAndSecret(bslSpec) if err != nil { @@ -346,7 +346,7 @@ func (r *DPAReconciler) validateAWSBackupStorageLocation(bslSpec velerov1.Backup return nil } -func (r *DPAReconciler) validateAzureBackupStorageLocation(bslSpec velerov1.BackupStorageLocationSpec) error { +func (r *DataProtectionApplicationReconciler) validateAzureBackupStorageLocation(bslSpec velerov1.BackupStorageLocationSpec) error { // validate provider plugin and secret err := r.validateProviderPluginAndSecret(bslSpec) if err != nil { @@ -377,7 +377,7 @@ func (r *DPAReconciler) validateAzureBackupStorageLocation(bslSpec velerov1.Back return nil } -func (r *DPAReconciler) validateGCPBackupStorageLocation(bslSpec velerov1.BackupStorageLocationSpec) error { +func (r *DataProtectionApplicationReconciler) validateGCPBackupStorageLocation(bslSpec velerov1.BackupStorageLocationSpec) error { // validate provider plugin and secret err := r.validateProviderPluginAndSecret(bslSpec) if err != nil { @@ -408,7 +408,7 @@ func pluginExistsInVeleroCR(configuredPlugins []oadpv1alpha1.DefaultPlugin, expe return false } -func (r *DPAReconciler) validateProviderPluginAndSecret(bslSpec velerov1.BackupStorageLocationSpec) error { +func (r *DataProtectionApplicationReconciler) validateProviderPluginAndSecret(bslSpec velerov1.BackupStorageLocationSpec) error { if r.dpa.Spec.Configuration.Velero.HasFeatureFlag("no-secret") { return nil } @@ -428,7 +428,7 @@ func (r *DPAReconciler) validateProviderPluginAndSecret(bslSpec velerov1.BackupS return nil } -func (r *DPAReconciler) ensureBackupLocationHasVeleroOrCloudStorage(bsl *oadpv1alpha1.BackupLocation) error { +func (r *DataProtectionApplicationReconciler) ensureBackupLocationHasVeleroOrCloudStorage(bsl *oadpv1alpha1.BackupLocation) error { if bsl.CloudStorage == nil && bsl.Velero == nil { return fmt.Errorf("BackupLocation must have velero or bucket configuration") } @@ -439,7 +439,7 @@ func (r *DPAReconciler) ensureBackupLocationHasVeleroOrCloudStorage(bsl *oadpv1a return nil } -func (r *DPAReconciler) ensurePrefixWhenBackupImages(bsl *oadpv1alpha1.BackupLocation) error { +func (r *DataProtectionApplicationReconciler) ensurePrefixWhenBackupImages(bsl *oadpv1alpha1.BackupLocation) error { if bsl.Velero != nil && bsl.Velero.ObjectStorage != nil && bsl.Velero.ObjectStorage.Prefix == "" && r.dpa.BackupImages() { return fmt.Errorf("BackupLocation must have velero prefix when backupImages is not set to false") @@ -452,7 +452,7 @@ func (r *DPAReconciler) ensurePrefixWhenBackupImages(bsl *oadpv1alpha1.BackupLoc return nil } -func (r *DPAReconciler) ensureSecretDataExists(bsl *oadpv1alpha1.BackupLocation) error { +func (r *DataProtectionApplicationReconciler) ensureSecretDataExists(bsl *oadpv1alpha1.BackupLocation) error { // Check if the Velero feature flag 'no-secret' is not set if !(r.dpa.Spec.Configuration.Velero.HasFeatureFlag("no-secret")) { // Check if the user specified credential under velero diff --git a/controllers/bsl_test.go b/internal/controller/bsl_test.go similarity index 99% rename from controllers/bsl_test.go rename to internal/controller/bsl_test.go index 91c676b563..de69fbd7c8 100644 --- a/controllers/bsl_test.go +++ b/internal/controller/bsl_test.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "context" @@ -1493,7 +1493,7 @@ func TestDPAReconciler_ValidateBackupStorageLocations(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), @@ -1779,7 +1779,7 @@ func TestDPAReconciler_updateBSLFromSpec(t *testing.T) { if err != nil { t.Errorf("error getting scheme for the test: %#v", err) } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Scheme: scheme, dpa: tt.dpa, } @@ -1912,7 +1912,7 @@ func TestDPAReconciler_ensureBackupLocationHasVeleroOrCloudStorage(t *testing.T) if err != nil { t.Errorf("error getting scheme for the test: %#v", err) } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Scheme: scheme, dpa: tt.dpa, } @@ -2087,7 +2087,7 @@ func TestDPAReconciler_ensurePrefixWhenBackupImages(t *testing.T) { if err != nil { t.Errorf("error getting scheme for the test: %#v", err) } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Scheme: scheme, dpa: tt.dpa, } @@ -2201,7 +2201,7 @@ func TestDPAReconciler_ReconcileBackupStorageLocations(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), @@ -2553,7 +2553,7 @@ func TestDPAReconciler_ReconcileBackupStorageLocations(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), diff --git a/controllers/bucket_controller.go b/internal/controller/cloudstorage_controller.go similarity index 85% rename from controllers/bucket_controller.go rename to internal/controller/cloudstorage_controller.go index d050df83ec..de37f2d076 100644 --- a/controllers/bucket_controller.go +++ b/internal/controller/cloudstorage_controller.go @@ -1,4 +1,20 @@ -package controllers +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller import ( "context" @@ -31,31 +47,21 @@ const ( oadpCloudStorageDeleteAnnotation = "oadp.openshift.io/cloudstorage-delete" ) -// VeleroReconciler reconciles a Velero object -type BucketReconciler struct { +// CloudStorageReconciler reconciles a CloudStorage object +type CloudStorageReconciler struct { Client client.Client Scheme *runtime.Scheme Log logr.Logger EventRecorder record.EventRecorder } -//TODO!!! FIX THIS!!!! - -//+kubebuilder:rbac:groups=oadp.openshift.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=corev1,resources=secrets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=oadp.openshift.io,resources=buckets/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=oadp.openshift.io,resources=buckets/finalizers,verbs=update +//+kubebuilder:rbac:groups=oadp.openshift.io,resources=cloudstorages,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=oadp.openshift.io,resources=cloudstorages/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=oadp.openshift.io,resources=cloudstorages/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Velero object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile -func (b BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (b CloudStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { b.Log = log.FromContext(ctx) logger := b.Log.WithValues("bucket", req.NamespacedName) result := ctrl.Result{} @@ -159,8 +165,7 @@ func (b BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl } // SetupWithManager sets up the controller with the Manager. -func (b *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { - +func (b *CloudStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&oadpv1alpha1.CloudStorage{}). WithEventFilter(bucketPredicate()). @@ -206,7 +211,7 @@ func removeKey(slice []string, s string) []string { return slice } -func (b *BucketReconciler) WaitForSecret(namespace, name string) (*corev1.Secret, error) { +func (b *CloudStorageReconciler) WaitForSecret(namespace, name string) (*corev1.Secret, error) { // set a timeout of 10 minutes timeout := 10 * time.Minute diff --git a/controllers/common.go b/internal/controller/common.go similarity index 99% rename from controllers/common.go rename to internal/controller/common.go index e5e4987c45..51f7cac4c3 100644 --- a/controllers/common.go +++ b/internal/controller/common.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( corev1 "k8s.io/api/core/v1" diff --git a/controllers/dpa_controller.go b/internal/controller/dataprotectionapplication_controller.go similarity index 79% rename from controllers/dpa_controller.go rename to internal/controller/dataprotectionapplication_controller.go index 5396286d4c..9c1aaccb4b 100644 --- a/controllers/dpa_controller.go +++ b/internal/controller/dataprotectionapplication_controller.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "context" @@ -42,8 +42,8 @@ import ( oadpclient "github.com/openshift/oadp-operator/pkg/client" ) -// DPAReconciler reconciles a Velero object -type DPAReconciler struct { +// DataProtectionApplicationReconciler reconciles a DataProtectionApplication object +type DataProtectionApplicationReconciler struct { client.Client Scheme *runtime.Scheme Log logr.Logger @@ -56,25 +56,26 @@ type DPAReconciler struct { var debugMode = os.Getenv("DEBUG") == "true" -//TODO!!! FIX THIS!!!! - -//+kubebuilder:rbac:groups=*,resources=*,verbs=* //+kubebuilder:rbac:groups=oadp.openshift.io,resources=dataprotectionapplications,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,verbs=use,resourceNames=privileged;velero-privileged -//+kubebuilder:rbac:groups=velero.io,resources=backups;restores;backupstoragelocations;volumesnapshotlocations,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=oadp.openshift.io,resources=dataprotectionapplications/status,verbs=get;update;patch //+kubebuilder:rbac:groups=oadp.openshift.io,resources=dataprotectionapplications/finalizers,verbs=update +//+kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures,verbs=get;list;watch +//+kubebuilder:rbac:groups=cloudcredential.openshift.io,resources=credentialsrequests,verbs=get;create;update +//+kubebuilder:rbac:groups=oadp.openshift.io,resources=*,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=corev1;coordination.k8s.io,resources=secrets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=velero.io,resources=*,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,verbs=use,resourceNames=privileged +//+kubebuilder:rbac:groups="",resources=secrets;configmaps;pods;services;serviceaccounts;endpoints;persistentvolumeclaims;events,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;create;update;patch +//+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete + // Reconcile is part of the main Kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the DataProtectionApplication object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/reconcile -func (r *DPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *DataProtectionApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Log = log.FromContext(ctx) logger := r.Log.WithValues("dpa", req.NamespacedName) result := ctrl.Result{} @@ -137,7 +138,7 @@ func (r *DPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R } // SetupWithManager sets up the controller with the Manager. -func (r *DPAReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *DataProtectionApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&oadpv1alpha1.DataProtectionApplication{}). Owns(&appsv1.Deployment{}). @@ -153,8 +154,7 @@ func (r *DPAReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -type labelHandler struct { -} +type labelHandler struct{} func (l *labelHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { // check for the label & add it to the queue diff --git a/controllers/monitor.go b/internal/controller/monitor.go similarity index 87% rename from controllers/monitor.go rename to internal/controller/monitor.go index a7c41b1f5b..52ef29a1f6 100644 --- a/controllers/monitor.go +++ b/internal/controller/monitor.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "fmt" @@ -10,7 +10,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) -func (r *DPAReconciler) ReconcileVeleroMetricsSVC(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileVeleroMetricsSVC(log logr.Logger) (bool, error) { svc := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "openshift-adp-velero-metrics-svc", @@ -39,7 +39,7 @@ func (r *DPAReconciler) ReconcileVeleroMetricsSVC(log logr.Logger) (bool, error) return true, nil } -func (r *DPAReconciler) updateVeleroMetricsSVC(svc *corev1.Service) error { +func (r *DataProtectionApplicationReconciler) updateVeleroMetricsSVC(svc *corev1.Service) error { // Setting controller owner reference on the metrics svc err := controllerutil.SetControllerReference(r.dpa, svc, r.Scheme) if err != nil { diff --git a/controllers/monitor_test.go b/internal/controller/monitor_test.go similarity index 98% rename from controllers/monitor_test.go rename to internal/controller/monitor_test.go index 7f18f93255..d65952ec2c 100644 --- a/controllers/monitor_test.go +++ b/internal/controller/monitor_test.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "fmt" @@ -148,7 +148,7 @@ func TestDPAReconciler_updateVeleroMetricsSVC(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), diff --git a/controllers/nodeagent.go b/internal/controller/nodeagent.go similarity index 95% rename from controllers/nodeagent.go rename to internal/controller/nodeagent.go index 49a3dce876..4a8a4bf3fb 100644 --- a/controllers/nodeagent.go +++ b/internal/controller/nodeagent.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "context" @@ -92,7 +92,7 @@ func getPluginsHostPath(platformType string) string { } } -func getNodeAgentObjectMeta(r *DPAReconciler) metav1.ObjectMeta { +func getNodeAgentObjectMeta(r *DataProtectionApplicationReconciler) metav1.ObjectMeta { return metav1.ObjectMeta{ Name: common.NodeAgent, Namespace: r.NamespacedName.Namespace, @@ -100,7 +100,7 @@ func getNodeAgentObjectMeta(r *DPAReconciler) metav1.ObjectMeta { } } -func (r *DPAReconciler) ReconcileNodeAgentDaemonset(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileNodeAgentDaemonset(log logr.Logger) (bool, error) { dpa := r.dpa var deleteDaemonSet bool = true // Define "static" portion of daemonset @@ -212,7 +212,7 @@ func (r *DPAReconciler) ReconcileNodeAgentDaemonset(log logr.Logger) (bool, erro * ds - pointer to daemonset with objectMeta defined * returns: (pointer to daemonset, nil) if successful */ -func (r *DPAReconciler) buildNodeAgentDaemonset(ds *appsv1.DaemonSet) (*appsv1.DaemonSet, error) { +func (r *DataProtectionApplicationReconciler) buildNodeAgentDaemonset(ds *appsv1.DaemonSet) (*appsv1.DaemonSet, error) { dpa := r.dpa if dpa == nil { return nil, fmt.Errorf("dpa cannot be nil") @@ -255,7 +255,7 @@ func (r *DPAReconciler) buildNodeAgentDaemonset(ds *appsv1.DaemonSet) (*appsv1.D return r.customizeNodeAgentDaemonset(ds) } -func (r *DPAReconciler) customizeNodeAgentDaemonset(ds *appsv1.DaemonSet) (*appsv1.DaemonSet, error) { +func (r *DataProtectionApplicationReconciler) customizeNodeAgentDaemonset(ds *appsv1.DaemonSet) (*appsv1.DaemonSet, error) { dpa := r.dpa if dpa.Spec.Configuration == nil || (dpa.Spec.Configuration.Restic == nil && dpa.Spec.Configuration.NodeAgent == nil) { // if restic and nodeAgent are not configured, therefore not enabled, return early. @@ -453,7 +453,7 @@ func (r *DPAReconciler) customizeNodeAgentDaemonset(ds *appsv1.DaemonSet) (*apps return ds, nil } -func (r *DPAReconciler) ReconcileFsRestoreHelperConfig(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileFsRestoreHelperConfig(log logr.Logger) (bool, error) { fsRestoreHelperCM := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: FsRestoreHelperCM, @@ -496,7 +496,7 @@ func (r *DPAReconciler) ReconcileFsRestoreHelperConfig(log logr.Logger) (bool, e return true, nil } -func (r *DPAReconciler) updateFsRestoreHelperCM(fsRestoreHelperCM *corev1.ConfigMap) error { +func (r *DataProtectionApplicationReconciler) updateFsRestoreHelperCM(fsRestoreHelperCM *corev1.ConfigMap) error { // Setting controller owner reference on the FS restore helper CM err := controllerutil.SetControllerReference(r.dpa, fsRestoreHelperCM, r.Scheme) @@ -518,7 +518,7 @@ func (r *DPAReconciler) updateFsRestoreHelperCM(fsRestoreHelperCM *corev1.Config } // getPlatformType fetches the cluster infrastructure object and returns the platform type. -func (r *DPAReconciler) getPlatformType() (string, error) { +func (r *DataProtectionApplicationReconciler) getPlatformType() (string, error) { infra := &configv1.Infrastructure{} key := types.NamespacedName{Name: Cluster} if err := r.Get(r.Context, key, infra); err != nil { diff --git a/controllers/nodeagent_test.go b/internal/controller/nodeagent_test.go similarity index 99% rename from controllers/nodeagent_test.go rename to internal/controller/nodeagent_test.go index 52634bd31c..8bebec9388 100644 --- a/controllers/nodeagent_test.go +++ b/internal/controller/nodeagent_test.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "context" @@ -151,7 +151,7 @@ var _ = ginkgo.Describe("Test ReconcileNodeAgentDaemonSet function", func() { os.Setenv(scenario.envVar.Name, scenario.envVar.Value) event := record.NewFakeRecorder(5) - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, Context: ctx, @@ -1154,7 +1154,7 @@ func TestDPAReconciler_buildNodeAgentDaemonset(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{Client: fakeClient, dpa: test.dpa} + r := &DataProtectionApplicationReconciler{Client: fakeClient, dpa: test.dpa} if result, err := r.buildNodeAgentDaemonset(test.nodeAgentDaemonSet); err != nil { if test.errorMessage != err.Error() { t.Errorf("buildNodeAgentDaemonset() error = %v, errorMessage %v", err, test.errorMessage) @@ -1219,7 +1219,7 @@ func TestDPAReconciler_updateFsRestoreHelperCM(t *testing.T) { t.Errorf("error in creating fake client, likely programmer error") } t.Run(tt.name, func(t *testing.T) { - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), @@ -1293,7 +1293,7 @@ func TestDPAReconciler_getPlatformType(t *testing.T) { t.Errorf("error in creating fake client, likely programmer error") } t.Run(tt.name, func(t *testing.T) { - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), diff --git a/controllers/nonadmin_controller.go b/internal/controller/nonadmin_controller.go similarity index 95% rename from controllers/nonadmin_controller.go rename to internal/controller/nonadmin_controller.go index 7d93fe21de..9405b86a7b 100644 --- a/controllers/nonadmin_controller.go +++ b/internal/controller/nonadmin_controller.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "fmt" @@ -48,7 +48,7 @@ var ( dpaRestoreSpecResourceVersion = "" ) -func (r *DPAReconciler) ReconcileNonAdminController(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileNonAdminController(log logr.Logger) (bool, error) { nonAdminDeployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: nonAdminObjectName, @@ -123,7 +123,7 @@ func (r *DPAReconciler) ReconcileNonAdminController(log logr.Logger) (bool, erro return true, nil } -func (r *DPAReconciler) buildNonAdminDeployment(deploymentObject *appsv1.Deployment) error { +func (r *DataProtectionApplicationReconciler) buildNonAdminDeployment(deploymentObject *appsv1.Deployment) error { nonAdminImage := r.getNonAdminImage() imagePullPolicy, err := common.GetImagePullPolicy(r.dpa.Spec.ImagePullPolicy, nonAdminImage) if err != nil { @@ -221,14 +221,14 @@ func ensureRequiredSpecs(deploymentObject *appsv1.Deployment, dpa *oadpv1alpha1. return nil } -func (r *DPAReconciler) checkNonAdminEnabled() bool { +func (r *DataProtectionApplicationReconciler) checkNonAdminEnabled() bool { if r.dpa.Spec.NonAdmin != nil && r.dpa.Spec.NonAdmin.Enable != nil { return *r.dpa.Spec.NonAdmin.Enable && r.dpa.Spec.UnsupportedOverrides[oadpv1alpha1.TechPreviewAck] == TrueVal } return false } -func (r *DPAReconciler) getNonAdminImage() string { +func (r *DataProtectionApplicationReconciler) getNonAdminImage() string { dpa := r.dpa unsupportedOverride := dpa.Spec.UnsupportedOverrides[oadpv1alpha1.NonAdminControllerImageKey] if unsupportedOverride != "" { diff --git a/controllers/nonadmin_controller_test.go b/internal/controller/nonadmin_controller_test.go similarity index 98% rename from controllers/nonadmin_controller_test.go rename to internal/controller/nonadmin_controller_test.go index 1adfe651ad..51bfde44d2 100644 --- a/controllers/nonadmin_controller_test.go +++ b/internal/controller/nonadmin_controller_test.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "context" @@ -102,7 +102,7 @@ func runReconcileNonAdminControllerTest( os.Setenv("RELATED_IMAGE_NON_ADMIN_CONTROLLER", envVarValue) event := record.NewFakeRecorder(5) - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, Context: ctx, @@ -242,7 +242,7 @@ var _ = ginkgo.Describe("Test ReconcileNonAdminController function", func() { }) func TestDPAReconcilerBuildNonAdminDeployment(t *testing.T) { - r := &DPAReconciler{dpa: &oadpv1alpha1.DataProtectionApplication{ + r := &DataProtectionApplicationReconciler{dpa: &oadpv1alpha1.DataProtectionApplication{ Spec: oadpv1alpha1.DataProtectionApplicationSpec{ NonAdmin: &oadpv1alpha1.NonAdmin{ Enable: ptr.To(true), @@ -464,7 +464,7 @@ func TestDPAReconcilerCheckNonAdminEnabled(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - r := &DPAReconciler{dpa: test.dpa} + r := &DataProtectionApplicationReconciler{dpa: test.dpa} result := r.checkNonAdminEnabled() if result != test.result { t.Errorf("Results differ: got '%v' but expected '%v'", result, test.result) @@ -511,7 +511,7 @@ func TestDPAReconcilerGetNonAdminImage(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - r := &DPAReconciler{dpa: test.dpa} + r := &DataProtectionApplicationReconciler{dpa: test.dpa} if len(test.env) > 0 { t.Setenv("RELATED_IMAGE_NON_ADMIN_CONTROLLER", test.env) } diff --git a/controllers/predicate.go b/internal/controller/predicate.go similarity index 93% rename from controllers/predicate.go rename to internal/controller/predicate.go index 4ed1adad0b..e187208a04 100644 --- a/controllers/predicate.go +++ b/internal/controller/predicate.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "k8s.io/apimachinery/pkg/runtime" @@ -41,7 +41,7 @@ func isObjectOurs(scheme *runtime.Scheme, object client.Object) bool { return false } gvk := objGVKs[0] - if gvk.Group == oadpv1alpha1.GroupVersion.Group && gvk.Version == oadpv1alpha1.GroupVersion.Version && gvk.Kind == oadpv1alpha1.Kind { + if gvk.Group == oadpv1alpha1.GroupVersion.Group && gvk.Version == oadpv1alpha1.GroupVersion.Version && gvk.Kind == "DataProtectionApplication" { return true } return object.GetLabels()[oadpv1alpha1.OadpOperatorLabel] != "" diff --git a/controllers/registry.go b/internal/controller/registry.go similarity index 93% rename from controllers/registry.go rename to internal/controller/registry.go index 87b2542721..536bc2be35 100644 --- a/controllers/registry.go +++ b/internal/controller/registry.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "context" @@ -158,7 +158,7 @@ type azureCredentials struct { strorageAccountKey string } -func (r *DPAReconciler) ReconcileRegistries(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileRegistries(log logr.Logger) (bool, error) { bslLabels := map[string]string{ "app.kubernetes.io/name": common.OADPOperatorVelero, "app.kubernetes.io/managed-by": common.OADPOperator, @@ -207,7 +207,7 @@ func registryName(bsl *velerov1.BackupStorageLocation) string { return "oadp-" + bsl.Name + "-" + bsl.Spec.Provider + "-registry" } -func (r *DPAReconciler) getProviderSecret(secretName string) (corev1.Secret, error) { +func (r *DataProtectionApplicationReconciler) getProviderSecret(secretName string) (corev1.Secret, error) { secret := corev1.Secret{} key := types.NamespacedName{ @@ -237,7 +237,7 @@ func replaceCarriageReturn(data map[string][]byte, logger logr.Logger) map[strin return data } -func (r *DPAReconciler) getSecretNameAndKeyFromCloudStorage(cloudStorage *oadpv1alpha1.CloudStorageLocation) (string, string, error) { +func (r *DataProtectionApplicationReconciler) getSecretNameAndKeyFromCloudStorage(cloudStorage *oadpv1alpha1.CloudStorageLocation) (string, string, error) { if cloudStorage.Credential != nil { // Check if user specified empty credential key if cloudStorage.Credential.Key == "" { @@ -254,7 +254,7 @@ func (r *DPAReconciler) getSecretNameAndKeyFromCloudStorage(cloudStorage *oadpv1 return "", "", nil } -func (r *DPAReconciler) getSecretNameAndKey(config map[string]string, credential *corev1.SecretKeySelector, plugin oadpv1alpha1.DefaultPlugin) (string, string, error) { +func (r *DataProtectionApplicationReconciler) getSecretNameAndKey(config map[string]string, credential *corev1.SecretKeySelector, plugin oadpv1alpha1.DefaultPlugin) (string, string, error) { // Assume default values unless user has overriden them secretName := credentials.PluginSpecificFields[plugin].SecretName secretKey := credentials.PluginSpecificFields[plugin].PluginSecretKey @@ -283,7 +283,7 @@ func (r *DPAReconciler) getSecretNameAndKey(config map[string]string, credential return secretName, secretKey, nil } -func (r *DPAReconciler) parseAWSSecret(secret corev1.Secret, secretKey string, matchProfile string) (string, string, error) { +func (r *DataProtectionApplicationReconciler) parseAWSSecret(secret corev1.Secret, secretKey string, matchProfile string) (string, string, error) { AWSAccessKey, AWSSecretKey, profile := "", "", "" splitString := strings.Split(string(secret.Data[secretKey]), "\n") @@ -368,7 +368,7 @@ func (r *DPAReconciler) parseAWSSecret(secret corev1.Secret, secretKey string, m return AWSAccessKey, AWSSecretKey, nil } -func (r *DPAReconciler) parseAzureSecret(secret corev1.Secret, secretKey string) (azureCredentials, error) { +func (r *DataProtectionApplicationReconciler) parseAzureSecret(secret corev1.Secret, secretKey string) (azureCredentials, error) { azcreds := azureCredentials{} @@ -459,7 +459,7 @@ func (r *DPAReconciler) parseAzureSecret(secret corev1.Secret, secretKey string) } // Return value to the right of = sign with quotations and spaces removed. -func (r *DPAReconciler) getMatchedKeyValue(key string, s string) (string, error) { +func (r *DataProtectionApplicationReconciler) getMatchedKeyValue(key string, s string) (string, error) { for _, removeChar := range []string{"\"", "'", " "} { s = strings.ReplaceAll(s, removeChar, "") } @@ -473,7 +473,7 @@ func (r *DPAReconciler) getMatchedKeyValue(key string, s string) (string, error) return s, nil } -func (r *DPAReconciler) ReconcileRegistrySVCs(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileRegistrySVCs(log logr.Logger) (bool, error) { // fetch the bsl instances bslList := velerov1.BackupStorageLocationList{} if err := r.List(r.Context, &bslList, &client.ListOptions{ @@ -516,7 +516,7 @@ func (r *DPAReconciler) ReconcileRegistrySVCs(log logr.Logger) (bool, error) { return true, nil } -func (r *DPAReconciler) ReconcileRegistryRoutes(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileRegistryRoutes(log logr.Logger) (bool, error) { // fetch the bsl instances bslList := velerov1.BackupStorageLocationList{} if err := r.List(r.Context, &bslList, &client.ListOptions{ @@ -567,7 +567,7 @@ func (r *DPAReconciler) ReconcileRegistryRoutes(log logr.Logger) (bool, error) { return true, nil } -func (r *DPAReconciler) ReconcileRegistryRouteConfigs(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileRegistryRouteConfigs(log logr.Logger) (bool, error) { // Now for each of these bsl instances, create a registry route cm for each of them registryRouteCM := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -598,7 +598,7 @@ func (r *DPAReconciler) ReconcileRegistryRouteConfigs(log logr.Logger) (bool, er } // Create secret for registry to be parsed by openshift-velero-plugin -func (r *DPAReconciler) ReconcileRegistrySecrets(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileRegistrySecrets(log logr.Logger) (bool, error) { dpa := r.dpa // fetch the bsl instances bslList := velerov1.BackupStorageLocationList{} @@ -672,7 +672,7 @@ func (r *DPAReconciler) ReconcileRegistrySecrets(log logr.Logger) (bool, error) return true, nil } -func (r *DPAReconciler) patchRegistrySecret(secret *corev1.Secret, bsl *velerov1.BackupStorageLocation) error { +func (r *DataProtectionApplicationReconciler) patchRegistrySecret(secret *corev1.Secret, bsl *velerov1.BackupStorageLocation) error { // Setting controller owner reference on the registry secret err := controllerutil.SetControllerReference(r.dpa, secret, r.Scheme) if err != nil { @@ -696,7 +696,7 @@ func (r *DPAReconciler) patchRegistrySecret(secret *corev1.Secret, bsl *velerov1 return nil } -func (r *DPAReconciler) populateAWSRegistrySecret(bsl *velerov1.BackupStorageLocation, registrySecret *corev1.Secret) error { +func (r *DataProtectionApplicationReconciler) populateAWSRegistrySecret(bsl *velerov1.BackupStorageLocation, registrySecret *corev1.Secret) error { // Check for secret name secretName, secretKey, _ := r.getSecretNameAndKey(bsl.Spec.Config, bsl.Spec.Credential, oadpv1alpha1.DefaultPluginAWS) @@ -725,7 +725,7 @@ func (r *DPAReconciler) populateAWSRegistrySecret(bsl *velerov1.BackupStorageLoc return nil } -func (r *DPAReconciler) populateAzureRegistrySecret(bsl *velerov1.BackupStorageLocation, registrySecret *corev1.Secret) error { +func (r *DataProtectionApplicationReconciler) populateAzureRegistrySecret(bsl *velerov1.BackupStorageLocation, registrySecret *corev1.Secret) error { // Check for secret name secretName, secretKey, _ := r.getSecretNameAndKey(bsl.Spec.Config, bsl.Spec.Credential, oadpv1alpha1.DefaultPluginMicrosoftAzure) @@ -770,7 +770,7 @@ func (r *DPAReconciler) populateAzureRegistrySecret(bsl *velerov1.BackupStorageL return nil } -func (r *DPAReconciler) verifySecretContent(secretName string, secretKey string) error { +func (r *DataProtectionApplicationReconciler) verifySecretContent(secretName string, secretKey string) error { secret, err := r.getProviderSecret(secretName) if err != nil { return err diff --git a/controllers/registry_test.go b/internal/controller/registry_test.go similarity index 98% rename from controllers/registry_test.go rename to internal/controller/registry_test.go index c511024496..74a96e36c5 100644 --- a/controllers/registry_test.go +++ b/internal/controller/registry_test.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "context" @@ -217,7 +217,7 @@ func TestDPAReconciler_getSecretNameAndKey(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), @@ -306,7 +306,7 @@ func TestDPAReconciler_getSecretNameAndKeyFromCloudStorage(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), @@ -397,7 +397,7 @@ func TestDPAReconciler_populateAWSRegistrySecret(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), @@ -487,7 +487,7 @@ func TestDPAReconciler_populateAzureRegistrySecret(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), diff --git a/controllers/restore_resource_version_priority.go b/internal/controller/restore_resource_version_priority.go similarity index 92% rename from controllers/restore_resource_version_priority.go rename to internal/controller/restore_resource_version_priority.go index 047cba3a1b..3e13c8f5cc 100644 --- a/controllers/restore_resource_version_priority.go +++ b/internal/controller/restore_resource_version_priority.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "fmt" @@ -14,7 +14,7 @@ const ( ) // If RestoreResourcesVersionPriority is defined, configmap is created or updated and feature flag for EnableAPIGroupVersions is added to velero -func (r *DPAReconciler) ReconcileRestoreResourcesVersionPriority() (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileRestoreResourcesVersionPriority() (bool, error) { dpa := r.dpa if len(dpa.Spec.Configuration.Velero.RestoreResourcesVersionPriority) == 0 { return true, nil diff --git a/controllers/suite_test.go b/internal/controller/suite_test.go similarity index 90% rename from controllers/suite_test.go rename to internal/controller/suite_test.go index 7266ed2342..923e89600c 100644 --- a/controllers/suite_test.go +++ b/internal/controller/suite_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( "fmt" @@ -25,6 +25,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -36,6 +37,7 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment @@ -50,7 +52,7 @@ var _ = ginkgo.BeforeSuite(func() { ginkgo.By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases"), filepath.Join("..", "hack", "extra-crds")}, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases"), filepath.Join("..", "..", "hack", "extra-crds")}, ErrorIfCRDPathMissing: true, // The BinaryAssetsDirectory is only required if you want to run the tests directly @@ -62,7 +64,9 @@ var _ = ginkgo.BeforeSuite(func() { fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), } - cfg, err := testEnv.Start() + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(cfg).NotTo(gomega.BeNil()) @@ -74,7 +78,6 @@ var _ = ginkgo.BeforeSuite(func() { k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(k8sClient).NotTo(gomega.BeNil()) - }) var _ = ginkgo.AfterSuite(func() { diff --git a/controllers/validator.go b/internal/controller/validator.go similarity index 95% rename from controllers/validator.go rename to internal/controller/validator.go index f7845c48ae..2dd5d8c73a 100644 --- a/controllers/validator.go +++ b/internal/controller/validator.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "errors" @@ -18,7 +18,7 @@ import ( // ValidateDataProtectionCR function validates the DPA CR, returns true if valid, false otherwise // it calls other validation functions to validate the DPA CR -func (r *DPAReconciler) ValidateDataProtectionCR(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ValidateDataProtectionCR(log logr.Logger) (bool, error) { dpaList := &oadpv1alpha1.DataProtectionApplicationList{} err := r.List(r.Context, dpaList, &client.ListOptions{Namespace: r.NamespacedName.Namespace}) if err != nil { @@ -94,7 +94,7 @@ func (r *DPAReconciler) ValidateDataProtectionCR(log logr.Logger) (bool, error) return false, err } for _, dpa := range dpaList.Items { - if dpa.Namespace != r.NamespacedName.Namespace && (&DPAReconciler{dpa: &dpa}).checkNonAdminEnabled() { + if dpa.Namespace != r.NamespacedName.Namespace && (&DataProtectionApplicationReconciler{dpa: &dpa}).checkNonAdminEnabled() { nonAdminDeployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: nonAdminObjectName, @@ -122,7 +122,7 @@ func (r *DPAReconciler) ValidateDataProtectionCR(log logr.Logger) (bool, error) // For later: Move this code into validator.go when more need for validation arises // TODO: if multiple default plugins exist, ensure we validate all of them. // Right now its sequential validation -func (r *DPAReconciler) ValidateVeleroPlugins(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ValidateVeleroPlugins(log logr.Logger) (bool, error) { dpa := r.dpa providerNeedsDefaultCreds, hasCloudStorage, err := r.noDefaultCredentials() diff --git a/controllers/validator_test.go b/internal/controller/validator_test.go similarity index 99% rename from controllers/validator_test.go rename to internal/controller/validator_test.go index 2bb180a52c..1eb97199cb 100644 --- a/controllers/validator_test.go +++ b/internal/controller/validator_test.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "testing" @@ -1603,7 +1603,7 @@ func TestDPAReconciler_ValidateDataProtectionCR(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, ClusterWideClient: fakeClient, Scheme: fakeClient.Scheme(), diff --git a/controllers/velero.go b/internal/controller/velero.go similarity index 95% rename from controllers/velero.go rename to internal/controller/velero.go index 972b48c7ad..c3cbca48b5 100644 --- a/controllers/velero.go +++ b/internal/controller/velero.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "fmt" @@ -70,7 +70,7 @@ var ( } ) -func (r *DPAReconciler) ReconcileVeleroDeployment(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileVeleroDeployment(log logr.Logger) (bool, error) { dpa := r.dpa @@ -136,21 +136,21 @@ func (r *DPAReconciler) ReconcileVeleroDeployment(log logr.Logger) (bool, error) return true, nil } -func (r *DPAReconciler) veleroServiceAccount() (*corev1.ServiceAccount, error) { +func (r *DataProtectionApplicationReconciler) veleroServiceAccount() (*corev1.ServiceAccount, error) { annotations := make(map[string]string) sa := install.ServiceAccount(r.dpa.Namespace, annotations) sa.Labels = getDpaAppLabels(r.dpa) return sa, nil } -func (r *DPAReconciler) veleroClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { +func (r *DataProtectionApplicationReconciler) veleroClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb := install.ClusterRoleBinding(r.dpa.Namespace) crb.Labels = getDpaAppLabels(r.dpa) return crb, nil } // Build VELERO Deployment -func (r *DPAReconciler) buildVeleroDeployment(veleroDeployment *appsv1.Deployment) error { +func (r *DataProtectionApplicationReconciler) buildVeleroDeployment(veleroDeployment *appsv1.Deployment) error { dpa := r.dpa if dpa == nil { return fmt.Errorf("DPA CR cannot be nil") @@ -209,7 +209,7 @@ func (r *DPAReconciler) buildVeleroDeployment(veleroDeployment *appsv1.Deploymen return r.customizeVeleroDeployment(veleroDeployment) } -func (r *DPAReconciler) customizeVeleroDeployment(veleroDeployment *appsv1.Deployment) error { +func (r *DataProtectionApplicationReconciler) customizeVeleroDeployment(veleroDeployment *appsv1.Deployment) error { dpa := r.dpa //append dpa labels var err error @@ -400,7 +400,7 @@ func (r *DPAReconciler) customizeVeleroDeployment(veleroDeployment *appsv1.Deplo } // add plugin specific specs to velero deployment -func (r *DPAReconciler) appendPluginSpecificSpecs(veleroDeployment *appsv1.Deployment, veleroContainer *corev1.Container, providerNeedsDefaultCreds map[string]bool, hasCloudStorage bool) { +func (r *DataProtectionApplicationReconciler) appendPluginSpecificSpecs(veleroDeployment *appsv1.Deployment, veleroContainer *corev1.Container, providerNeedsDefaultCreds map[string]bool, hasCloudStorage bool) { dpa := r.dpa init_container_resources := veleroContainer.Resources @@ -500,7 +500,7 @@ func (r *DPAReconciler) appendPluginSpecificSpecs(veleroDeployment *appsv1.Deplo } } -func (r *DPAReconciler) customizeVeleroContainer(veleroDeployment *appsv1.Deployment, veleroContainer *corev1.Container, prometheusPort *int) error { +func (r *DataProtectionApplicationReconciler) customizeVeleroContainer(veleroDeployment *appsv1.Deployment, veleroContainer *corev1.Container, prometheusPort *int) error { dpa := r.dpa if veleroContainer == nil { return fmt.Errorf("could not find velero container in Deployment") @@ -608,7 +608,7 @@ func disableInformerCacheValue(dpa *oadpv1alpha1.DataProtectionApplication) stri return FalseVal } -func (r *DPAReconciler) isSTSTokenNeeded(bsls []oadpv1alpha1.BackupLocation, ns string) bool { +func (r *DataProtectionApplicationReconciler) isSTSTokenNeeded(bsls []oadpv1alpha1.BackupLocation, ns string) bool { for _, bsl := range bsls { if bsl.CloudStorage != nil { @@ -712,7 +712,7 @@ func getResourceReqs(dpa *corev1.ResourceRequirements) (corev1.ResourceRequireme } // Get Velero Resource Requirements -func (r *DPAReconciler) getVeleroResourceReqs() (corev1.ResourceRequirements, error) { +func (r *DataProtectionApplicationReconciler) getVeleroResourceReqs() (corev1.ResourceRequirements, error) { dpa := r.dpa if dpa.Spec.Configuration.Velero != nil && dpa.Spec.Configuration.Velero.PodConfig != nil { return getResourceReqs(&dpa.Spec.Configuration.Velero.PodConfig.ResourceAllocations) @@ -741,7 +741,7 @@ func getNodeAgentResourceReqs(dpa *oadpv1alpha1.DataProtectionApplication) (core // noDefaultCredentials determines if a provider needs the default credentials. // This returns a map of providers found to if they need a default credential, // a boolean if Cloud Storage backup storage location was used and an error if any occured. -func (r DPAReconciler) noDefaultCredentials() (map[string]bool, bool, error) { +func (r DataProtectionApplicationReconciler) noDefaultCredentials() (map[string]bool, bool, error) { dpa := r.dpa providerNeedsDefaultCreds := map[string]bool{} hasCloudStorage := false diff --git a/controllers/velero_test.go b/internal/controller/velero_test.go similarity index 98% rename from controllers/velero_test.go rename to internal/controller/velero_test.go index f1b1588d65..816ba6e164 100644 --- a/controllers/velero_test.go +++ b/internal/controller/velero_test.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "context" @@ -242,7 +242,7 @@ var _ = ginkgo.Describe("Test ReconcileVeleroDeployment function", func() { os.Setenv(scenario.envVar.Name, scenario.envVar.Value) event := record.NewFakeRecorder(5) - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: k8sClient, Scheme: testEnv.Scheme, Context: ctx, @@ -1749,7 +1749,7 @@ func TestDPAReconciler_buildVeleroDeployment(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := DPAReconciler{Client: fakeClient, dpa: test.dpa} + r := DataProtectionApplicationReconciler{Client: fakeClient, dpa: test.dpa} oadpclient.SetClient(fakeClient) if test.testProxy { t.Setenv(proxyEnvKey, proxyEnvValue) @@ -1977,7 +1977,7 @@ func Test_validateVeleroPlugins(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), @@ -2067,20 +2067,20 @@ func TestDPAReconciler_noDefaultCredentials(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := DPAReconciler{ + r := DataProtectionApplicationReconciler{ Client: fakeClient, dpa: &tt.args.dpa, } got, got1, err := r.noDefaultCredentials() if (err != nil) != tt.wantErr { - t.Errorf("DPAReconciler.noDefaultCredentials() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("DataProtectionApplicationReconciler.noDefaultCredentials() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { - t.Errorf("DPAReconciler.noDefaultCredentials() got = \n%v, \nwant \n%v", got, tt.want) + t.Errorf("DataProtectionApplicationReconciler.noDefaultCredentials() got = \n%v, \nwant \n%v", got, tt.want) } if got1 != tt.wantHasCloudStorage { - t.Errorf("DPAReconciler.noDefaultCredentials() got1 = %v, want %v", got1, tt.wantHasCloudStorage) + t.Errorf("DataProtectionApplicationReconciler.noDefaultCredentials() got1 = %v, want %v", got1, tt.wantHasCloudStorage) } }) } @@ -2140,13 +2140,13 @@ func TestDPAReconciler_VeleroDebugEnvironment(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := DPAReconciler{ + r := DataProtectionApplicationReconciler{ Client: fakeClient, dpa: dpa, } err = r.buildVeleroDeployment(deployment) if (err != nil) != tt.wantErr { - t.Errorf("DPAReconciler.VeleroDebugEnvironment error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("DataProtectionApplicationReconciler.VeleroDebugEnvironment error = %v, wantErr %v", err, tt.wantErr) return } if deployment.Spec.Replicas == nil { diff --git a/controllers/vsl.go b/internal/controller/vsl.go similarity index 95% rename from controllers/vsl.go rename to internal/controller/vsl.go index 242c34830f..f436c75ae6 100644 --- a/controllers/vsl.go +++ b/internal/controller/vsl.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "errors" @@ -51,7 +51,7 @@ var validAzureKeys = map[string]bool{ AzureResourceGroup: true, } -func (r *DPAReconciler) LabelVSLSecrets(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) LabelVSLSecrets(log logr.Logger) (bool, error) { dpa := r.dpa for _, vsl := range dpa.Spec.SnapshotLocations { provider := strings.TrimPrefix(vsl.Velero.Provider, veleroIOPrefix) @@ -104,7 +104,7 @@ func (r *DPAReconciler) LabelVSLSecrets(log logr.Logger) (bool, error) { return true, nil } -func (r *DPAReconciler) ValidateVolumeSnapshotLocations() (bool, error) { +func (r *DataProtectionApplicationReconciler) ValidateVolumeSnapshotLocations() (bool, error) { dpa := r.dpa for i, vslSpec := range dpa.Spec.SnapshotLocations { vslYAMLPath := fmt.Sprintf("spec.snapshotLocations[%v]", i) @@ -183,7 +183,7 @@ func (r *DPAReconciler) ValidateVolumeSnapshotLocations() (bool, error) { return true, nil } -func (r *DPAReconciler) ReconcileVolumeSnapshotLocations(log logr.Logger) (bool, error) { +func (r *DataProtectionApplicationReconciler) ReconcileVolumeSnapshotLocations(log logr.Logger) (bool, error) { dpa := r.dpa dpaVSLNames := []string{} // Loop through all configured VSLs @@ -283,7 +283,7 @@ func containsPlugin(d []oadpv1alpha1.DefaultPlugin, value string) bool { return false } -func (r *DPAReconciler) ensureVslSecretDataExists(vsl *oadpv1alpha1.SnapshotLocation) error { +func (r *DataProtectionApplicationReconciler) ensureVslSecretDataExists(vsl *oadpv1alpha1.SnapshotLocation) error { // Check if the Velero feature flag 'no-secret' is not set if !(r.dpa.Spec.Configuration.Velero.HasFeatureFlag("no-secret")) { // Check if the user specified credential under velero diff --git a/controllers/vsl_test.go b/internal/controller/vsl_test.go similarity index 99% rename from controllers/vsl_test.go rename to internal/controller/vsl_test.go index 7be09a1b34..79dd1c8fb9 100644 --- a/controllers/vsl_test.go +++ b/internal/controller/vsl_test.go @@ -1,4 +1,4 @@ -package controllers +package controller import ( "reflect" @@ -686,7 +686,7 @@ func TestDPAReconciler_ValidateVolumeSnapshotLocation(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(), @@ -751,7 +751,7 @@ func TestDPAReconciler_ReconcileVolumeSnapshotLocations(t *testing.T) { if err != nil { t.Errorf("error in creating fake client, likely programmer error") } - r := &DPAReconciler{ + r := &DataProtectionApplicationReconciler{ Client: fakeClient, Scheme: fakeClient.Scheme(), Log: logr.Discard(),