diff --git a/.github/workflows/e2e-backend-switching-reuseable.yml b/.github/workflows/e2e-backend-switching-reuseable.yml index 2c59658f7..ea9757640 100644 --- a/.github/workflows/e2e-backend-switching-reuseable.yml +++ b/.github/workflows/e2e-backend-switching-reuseable.yml @@ -111,7 +111,7 @@ jobs: run: | make -C hack/ci/ wait-istio-cr-ready make -C hack/ci/ wait-api-gateway-cr-ready - make -C hack/ci/ wait-nats-cr-ready + make -C hack/ci/ wait-nats-cr-warning make -C hack/ci/ wait-eventing-cr-ready-with-backend ACTIVE_BACKEND=NATS - name: Setup eventing tests diff --git a/config/ui-extensions/eventing/general b/config/ui-extensions/eventing/general index 1484f69ee..79f7b011b 100644 --- a/config/ui-extensions/eventing/general +++ b/config/ui-extensions/eventing/general @@ -11,5 +11,5 @@ features: disableCreate: false disableDelete: false description: >- - {{[Eventing CR](https://github.com/kyma-project/eventing-manager/blob/main/config/samples/default.yaml)}} - specifies eventing module. + {{[Eventing custom resource](https://kyma-project.io/#/eventing-manager/user/02-configuration)}} + configures the Eventing module. diff --git a/hack/ci/Makefile b/hack/ci/Makefile index 8951681fc..318d0aafc 100644 --- a/hack/ci/Makefile +++ b/hack/ci/Makefile @@ -51,6 +51,10 @@ apply-peerauthentication-crd: wait-nats-cr-ready: kubectl wait nats.operator.kyma-project.io -n kyma-system eventing-nats --timeout=300s --for=jsonpath='{.status.state}'=Ready +.PHONY: wait-nats-cr-warning +wait-nats-cr-warning: + kubectl wait nats.operator.kyma-project.io -n kyma-system eventing-nats --timeout=300s --for=jsonpath='{.status.state}'=Warning + .PHONY: wait-eventing-cr-ready wait-eventing-cr-ready: kubectl wait eventing.operator.kyma-project.io -n kyma-system eventing --timeout=300s --for=jsonpath='{.status.state}'=Ready @@ -69,7 +73,7 @@ wait-api-gateway-cr-ready: .PHONY: provision-gardener-cluster provision-gardener-cluster: - GARDENER_CLUSTER_VERSION="${GARDENER_KUBE_VERSION}" PROJECT_ROOT="${PROJECT_ROOT}" KYMA_CLI="${KYMA_CLI}" ${PROJECT_ROOT}/scripts/gardener/aws/provision.sh + GARDENER_CLUSTER_VERSION="${GARDENER_KUBE_VERSION}" PROJECT_ROOT="${PROJECT_ROOT}" ${PROJECT_ROOT}/scripts/gardener/aws/provision.sh .PHONY: deprovision-gardener-cluster deprovision-gardener-cluster: diff --git a/scripts/gardener/aws/provision.sh b/scripts/gardener/aws/provision.sh index 17471ef81..a94d124c6 100755 --- a/scripts/gardener/aws/provision.sh +++ b/scripts/gardener/aws/provision.sh @@ -62,9 +62,6 @@ gardener::validate_and_default() { export GARDENER_CLUSTER_VERSION=$(kubectl --kubeconfig="${GARDENER_KUBECONFIG}" get cloudprofiles.core.gardener.cloud aws -o go-template='{{range .spec.kubernetes.versions}}{{if eq .classification "supported"}}{{.version}}{{break}}{{end}}{{end}}') fi - # Detect supported linux version. - GARDEN_LINUX_VERSION=$(kubectl --kubeconfig="${GARDENER_KUBECONFIG}" get cloudprofiles.core.gardener.cloud aws -o go-template='{{range .spec.machineImages}}{{if eq .name "gardenlinux"}}{{range .versions}}{{if eq .classification "supported"}}{{.version}}{{end}}{{end}}{{end}}{{end}}') - # print configurations for debugging purposes: log::banner "Configurations:" echo "CLUSTER_NAME: ${CLUSTER_NAME}" @@ -75,28 +72,73 @@ gardener::validate_and_default() { echo "SCALER_MAX: ${SCALER_MAX}" echo "GARDENER_CLUSTER_VERSION: ${GARDENER_CLUSTER_VERSION}" echo "RETRY_ATTEMPTS ${RETRY_ATTEMPTS}" - echo "GARDEN_LINUX_VERSION ${GARDEN_LINUX_VERSION}" } gardener::provision_cluster() { log::banner "Provision cluster: \"${CLUSTER_NAME}\"" - # decreasing attempts to 2 because we will try to create new cluster from scratch on exit code other than 0 - ${KYMA_CLI} provision gardener aws \ - --secret "${GARDENER_PROVIDER_SECRET_NAME}" \ - --name "${CLUSTER_NAME}" \ - --project "${GARDENER_PROJECT_NAME}" \ - --credentials "${GARDENER_KUBECONFIG}" \ - --region "${GARDENER_REGION}" \ - --zones "${GARDENER_ZONES}" \ - --type "${MACHINE_TYPE}" \ - --scaler-min ${SCALER_MIN} \ - --scaler-max ${SCALER_MAX} \ - --kube-version="${GARDENER_CLUSTER_VERSION}" \ - --gardenlinux-version "${GARDEN_LINUX_VERSION}" \ - --attempts ${RETRY_ATTEMPTS} \ - --verbose \ - --hibernation-start "" + cat << EOF | kubectl apply --kubeconfig="${GARDENER_KUBECONFIG}" -f - +apiVersion: core.gardener.cloud/v1beta1 +kind: Shoot +metadata: + name: ${CLUSTER_NAME} +spec: + secretBindingName: ${GARDENER_PROVIDER_SECRET_NAME} + cloudProfileName: aws + region: ${GARDENER_REGION} + purpose: evaluation + provider: + type: aws + infrastructureConfig: + apiVersion: aws.provider.extensions.gardener.cloud/v1alpha1 + kind: InfrastructureConfig + networks: + vpc: + cidr: 10.250.0.0/16 + zones: + - name: ${GARDENER_REGION}a + internal: 10.250.112.0/22 + public: 10.250.96.0/22 + workers: 10.250.0.0/19 + workers: + - name: cpu-worker + minimum: ${SCALER_MIN} + maximum: ${SCALER_MAX} + machine: + type: ${MACHINE_TYPE} + volume: + type: gp2 + size: 50Gi + zones: + - ${GARDENER_REGION}a + networking: + type: calico + pods: 100.96.0.0/11 + nodes: 10.250.0.0/16 + services: 100.64.0.0/13 + kubernetes: + version: ${GARDENER_CLUSTER_VERSION} + hibernation: + enabled: false + addons: + nginxIngress: + enabled: false +EOF + + echo "waiting fo cluster to be ready..." + kubectl wait --kubeconfig="${GARDENER_KUBECONFIG}" --for=condition=EveryNodeReady shoot/${CLUSTER_NAME} --timeout=17m + + # create kubeconfig request, that creates a Kubeconfig, which is valid for one day + kubectl create --kubeconfig="${GARDENER_KUBECONFIG}" \ + -f <(printf '{"spec":{"expirationSeconds":86400}}') \ + --raw /apis/core.gardener.cloud/v1beta1/namespaces/garden-${GARDENER_PROJECT_NAME}/shoots/${CLUSTER_NAME}/adminkubeconfig | \ + jq -r ".status.kubeconfig" | \ + base64 -d > ${CLUSTER_NAME}_kubeconfig.yaml + + # merge with the existing kubeconfig settings + mkdir -p ~/.kube + KUBECONFIG="~/.kube/config:${CLUSTER_NAME}_kubeconfig.yaml" kubectl config view --flatten --merge > merged_kubeconfig.yaml + mv merged_kubeconfig.yaml ~/.kube/config } ## MAIN Logic