Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update e2e with managed #1451

Open
wants to merge 35 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
54001ea
add managed cluster to e2e
coleenquadros Apr 30, 2024
63dbb4b
lint
coleenquadros Apr 30, 2024
ee1d641
Merge remote-tracking branch 'origin/main' into add_mc_e2e
coleenquadros Apr 30, 2024
7f26568
refactor to accomodate managed cluster
coleenquadros May 2, 2024
edf7c7e
fix
coleenquadros May 3, 2024
a50d02d
refactor
coleenquadros May 3, 2024
486aa70
syntax
coleenquadros May 3, 2024
57804ac
update kind tests to add new managed cluster
coleenquadros May 23, 2024
062218a
remove files
coleenquadros May 23, 2024
4cda1a7
[ACM-10812]: fix addon status not reported in hub (#1420)
thibaultmg Apr 30, 2024
e90911c
[ACM-10706] Add support for custom alertmanager url (#1419)
douglascamata May 2, 2024
6edaad8
Add required permissions for simulated cluster & various fixes for me…
jacobbaungard May 7, 2024
462d444
[ACM-10812]: retry status update on conflict (#1427)
thibaultmg May 14, 2024
c92cfb5
Tests: Don't check remote_write_requests on spokes (#1433)
jacobbaungard May 14, 2024
fcc1b80
retry manifest work (#1434)
thibaultmg May 15, 2024
7632583
[ACM-11093]: apply security context for microshift (#1422)
thibaultmg May 15, 2024
2c9cd9e
Add `cluster:node_cpu:ratio` to allowlist (#1409)
jacobbaungard May 16, 2024
caf27f3
handle update of missing kinds (#1426)
thibaultmg May 16, 2024
a0174e8
relocate kubeconfig to /workspace for non-root user access (#1437)
subbarao-meduri May 16, 2024
0bf59b1
Upgrade to Go 1.21 (#1440)
douglascamata May 18, 2024
103e6d4
Validate url for external remote write endpoint (#1432)
philipgough May 20, 2024
071c7f9
precreate and set permissions for /workspace/.kube directory (#1443)
subbarao-meduri May 21, 2024
0402d97
[ACM-11722] Add support for alertmanager path prefix and validate URL…
douglascamata May 21, 2024
2d62170
Improve e2e logs (#1435)
thibaultmg May 22, 2024
537c28a
Bump MCO memory limit to 3Gi (#1447)
jacobbaungard May 22, 2024
2c65c5d
Expose new promql-engine opt-out option in CR (#1446)
saswatamcode May 22, 2024
5f607dc
[ACM-11543] Do not add custom obs api url to certs SAN (#1441)
douglascamata May 22, 2024
29ed654
Add integration tests to github workflows (#1436)
thibaultmg May 22, 2024
b950a05
update prometheus operator crd (#1449)
coleenquadros May 22, 2024
ae41f18
refactor to accomodate managed cluster
coleenquadros May 2, 2024
0fdcef9
Merge branch 'main' into update_e2e_with_managed
coleenquadros May 23, 2024
3d38695
remove yaml additions
coleenquadros May 23, 2024
c78637e
refactor
coleenquadros May 23, 2024
b445cd2
lint
coleenquadros May 23, 2024
bf28bc3
refacter
coleenquadros May 23, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ kind-env:
@echo "Setting up KinD cluster"
@./scripts/bootstrap-kind-env.sh
@echo "Cluster has been created"
kind export kubeconfig --name=managed
kind export kubeconfig --name=hub
kubectl label node hub-control-plane node-role.kubernetes.io/master=''

Expand All @@ -108,6 +109,8 @@ mco-kind-env: kind-env
@echo "Local environment has been set up"
@echo "Installing MCO"
@kind get kubeconfig --name hub > /tmp/hub.yaml
@kind get kubeconfig --name managed > /tmp/managed.yaml
@kind get kubeconfig --name hub --internal > ./.hub-kubeconfig
KUBECONFIG=/tmp/hub.yaml IS_KIND_ENV=true KUSTOMIZE_VERSION=${KUSTOMIZE_VERSION} ./cicd-scripts/setup-e2e-tests.sh


Expand Down
16 changes: 14 additions & 2 deletions cicd-scripts/run-e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ else
fi

kubecontext=$(kubectl config current-context)
cluster_name="local-cluster"
hub_cluster_name="local-cluster"

if [[ -n ${IS_KIND_ENV} ]]; then
clusterServerURL="https://127.0.0.1:32806"
Expand Down Expand Up @@ -67,14 +67,26 @@ if [[ -n ${IS_KIND_ENV} ]]; then
printf "\n grafanaHost: grafana-test" >>${OPTIONSFILE}
fi
printf "\n clusters:" >>${OPTIONSFILE}
printf "\n - name: ${cluster_name}" >>${OPTIONSFILE}
printf "\n - name: ${hub_cluster_name}" >>${OPTIONSFILE}
if [[ -n ${IS_KIND_ENV} ]]; then
printf "\n clusterServerURL: ${clusterServerURL}" >>${OPTIONSFILE}
fi
printf "\n baseDomain: ${base_domain}" >>${OPTIONSFILE}
printf "\n kubeconfig: ${kubeconfig_hub_path}" >>${OPTIONSFILE}
printf "\n kubecontext: ${kubecontext}" >>${OPTIONSFILE}

kubeconfig_managed_path="${SHARED_DIR}/managed-1.kc"
if [[ -z ${IS_KIND_ENV} && -f ${kubeconfig_managed_path} ]]; then
managed_cluster_name="managed-cluster-1"
kubecontext_managed=$(kubectl --kubeconfig="${kubeconfig_managed_path}" config current-context)
app_domain_managed=$(kubectl -n openshift-ingress-operator --kubeconfig="${kubeconfig_managed_path}" get ingresscontrollers default -ojsonpath='{.status.domain}')
base_domain_managed="${app_domain_managed#apps.}"
printf "\n - name: ${managed_cluster_name}" >>${OPTIONSFILE}
printf "\n baseDomain: ${base_domain_managed}" >>${OPTIONSFILE}
printf "\n kubeconfig: ${kubeconfig_managed_path}" >>${OPTIONSFILE}
printf "\n kubecontext: ${kubecontext_managed}" >>${OPTIONSFILE}
fi

if command -v ginkgo &>/dev/null; then
GINKGO_CMD=ginkgo
else
Expand Down
100 changes: 79 additions & 21 deletions cicd-scripts/setup-e2e-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ AGENT_NS="open-cluster-management-agent"
HUB_NS="open-cluster-management-hub"
OBSERVABILITY_NS="open-cluster-management-observability"
IMAGE_REPO="quay.io/stolostron"
export MANAGED_CLUSTER="local-cluster" # registration-operator needs this
#export MANAGED_CLUSTER="local-cluster" # registration-operator needs this

SED_COMMAND=${SED}' -i-e -e'

Expand All @@ -43,32 +43,40 @@ deploy_hub_spoke_core() {
REGISTRATION_LATEST_SNAPSHOT='2.4.9-SNAPSHOT-2022-11-17-20-19-31'
make cluster-ip IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT}
make deploy IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT}

# wait until hub and spoke are ready
wait_for_deployment_ready 10 60s ${HUB_NS} cluster-manager-registration-controller cluster-manager-registration-webhook cluster-manager-work-webhook
wait_for_deployment_ready 10 60s ${AGENT_NS} klusterlet-registration-agent klusterlet-work-agent

}

# approve the CSR for cluster join request
approve_csr_joinrequest() {
echo "wait for CSR for cluster join reqest is created..."
for i in {1..60}; do
# TODO(morvencao): remove the hard-coded cluster label
csrs=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER})
if [[ -n ${csrs} ]]; then
csrnames=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER} -o jsonpath={.items..metadata.name})
for csrname in ${csrnames}; do
echo "approve CSR: ${csrname}"
kubectl certificate approve ${csrname}
done
break
fi
if [[ ${i} -eq 60 ]]; then
echo "timeout wait for CSR is created."
exit 1
fi
echo "retrying in 10s..."
sleep 10
managed_clusters=("local-cluster" "managed-cluster-1")

KUBECONFIG=/tmp/hub.yaml IS_KIND_ENV=true
#kubectl config use-context kind-hub
for MANAGED_CLUSTER in "${managed_clusters[@]}"; do
echo "Processing CSRs for ${MANAGED_CLUSTER}..."
for i in {1..60}; do
# TODO(morvencao): remove the hard-coded cluster label
# for loop for the case that multiple clusters are created
csrs=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER})
if [[ -n ${csrs} ]]; then
csrnames=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER} -o jsonpath={.items..metadata.name})
for csrname in ${csrnames}; do
echo "approve CSR: ${csrname}"
kubectl certificate approve ${csrname}
done
break
fi
if [[ ${i} -eq 60 ]]; then
echo "timeout wait for CSR is created."
exit 1
fi
echo "retrying in 10s..."
sleep 10
done
done

for i in {1..20}; do
Expand Down Expand Up @@ -130,6 +138,7 @@ EOF

# deploy the MCO operator via the kustomize resources
deploy_mco_operator() {
kubectl config use-context kind-hub
if [[ -n ${MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF} ]]; then
cd ${ROOTDIR}/operators/multiclusterobservability/config/manager && kustomize edit set image quay.io/stolostron/multicluster-observability-operator=${MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF}
else
Expand Down Expand Up @@ -216,10 +225,59 @@ wait_for_deployment_ready() {
done
}

deploy_managed_cluster() {
echo "Setting Kubernetes context to the managed cluster..."

KUBECONFIG=/tmp/managed.yaml IS_KIND_ENV=true
kubectl config use-context kind-managed
export MANAGED_CLUSTER="managed-cluster-1"

cd ${ROOTDIR}
# we are pinned here so no need to re-fetch if we have the project locally.
if [[ ! -d "registration-operator" ]]; then
git clone --depth 1 -b release-2.4 https://github.com/stolostron/registration-operator.git
fi
cd registration-operator
REGISTRATION_LATEST_SNAPSHOT='2.4.9-SNAPSHOT-2022-11-17-20-19-31'
${SED_COMMAND} "s~clusterName: cluster1$~clusterName: ${MANAGED_CLUSTER}~g" deploy/klusterlet/config/samples/operator_open-cluster-management_klusterlets.cr.yaml
make deploy-spoke IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT}
wait_for_deployment_ready 10 60s ${AGENT_NS} klusterlet-registration-agent klusterlet-work-agent
}

deploy_hub_and_managed_cluster() {
cd $(dirname ${BASH_SOURCE})

set -e

hub=${CLUSTER1:-hub}
hub_name="local-cluster"
c1=${CLUSTER1:-managed}

hubctx="kind-${hub}"
c1ctx="kind-${c1}"

echo "Initialize the ocm hub cluster\n" # ./.hub-kubeconfig is default value of HUB_KUBECONFIG
clusteradm init --wait --context ${hubctx}
joincmd=$(clusteradm get token --context ${hubctx} | grep clusteradm)

echo "Join hub to hub\n"
$(echo ${joincmd} --force-internal-endpoint-lookup --wait --context ${hubctx} | sed "s/<cluster_name>/$hub_name/g")
KUBECONFIG=/tmp/managed.yaml IS_KIND_ENV=true
echo "Join cluster1 to hub\n"
$(echo ${joincmd} --force-internal-endpoint-lookup --wait --context ${c1ctx} | sed "s/<cluster_name>/$c1/g")

echo "Accept join of hub,cluster1"
KUBECONFIG=/tmp/hub.yaml IS_KIND_ENV=true
clusteradm accept --context ${hubctx} --clusters ${c1},${hub_name} --skip-approve-check

kubectl get managedclusters --all-namespaces --context ${hubctx}
}
# function execute is the main routine to do the actual work
execute() {
deploy_hub_spoke_core
approve_csr_joinrequest
# deploy_hub_spoke_core
# approve_csr_joinrequest
# deploy_managed_cluster
deploy_hub_and_managed_cluster
deploy_mco_operator
deploy_grafana_test
echo "OCM and MCO are installed successfully..."
Expand Down
10 changes: 10 additions & 0 deletions scripts/bootstrap-kind-env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,22 @@ deploy_openshift_router() {
kubectl apply -f ${WORKDIR}/router/
}

create_kind_cluster_managed() {
echo "Coleen Delete the KinD cluster if exists"
kind delete cluster --name $1 || true
rm -rf $HOME/.kube/kind-config-$1

echo "Start KinD cluster with the default cluster name - $1"
kind create cluster --kubeconfig $HOME/.kube/kind-config-$1 --name $1 --config ${WORKDIR}/kind/kind-$1.config.yaml
}

run() {
create_kind_cluster hub
deploy_crds
deploy_templates
deploy_service_ca_operator
deploy_openshift_router
create_kind_cluster_managed managed
}

run
9 changes: 0 additions & 9 deletions tests/pkg/tests/observability_addon_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,6 @@ import (

var _ = Describe("Observability:", func() {
BeforeEach(func() {
hubClient = utils.NewKubeClient(
testOptions.HubCluster.ClusterServerURL,
testOptions.KubeConfig,
testOptions.HubCluster.KubeContext)

dynClient = utils.NewKubeClientDynamic(
testOptions.HubCluster.ClusterServerURL,
testOptions.KubeConfig,
testOptions.HubCluster.KubeContext)
if utils.GetManagedClusterName(testOptions) == hubManagedClusterName {
Skip("Skip the case for local-cluster since no observability addon")
}
Expand Down
5 changes: 0 additions & 5 deletions tests/pkg/tests/observability_alert_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,6 @@ var _ = Describe("Observability:", func() {
testOptions.HubCluster.ClusterServerURL,
testOptions.KubeConfig,
testOptions.HubCluster.KubeContext)

dynClient = utils.NewKubeClientDynamic(
testOptions.HubCluster.ClusterServerURL,
testOptions.KubeConfig,
testOptions.HubCluster.KubeContext)
})
statefulsetLabels := [...]string{
ALERTMANAGER_LABEL,
Expand Down
22 changes: 9 additions & 13 deletions tests/pkg/tests/observability_certrenew_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,9 @@ import (
"github.com/stolostron/multicluster-observability-operator/tests/pkg/utils"
)

var _ = Describe("Observability:", func() {
func runCertRenewTests(clusterConfig utils.Cluster) {
BeforeEach(func() {
hubClient = utils.NewKubeClient(
testOptions.HubCluster.ClusterServerURL,
testOptions.KubeConfig,
testOptions.HubCluster.KubeContext)

dynClient = utils.NewKubeClientDynamic(
testOptions.HubCluster.ClusterServerURL,
testOptions.KubeConfig,
testOptions.HubCluster.KubeContext)
clusterName := utils.GetManagedClusterName(testOptions)
if clusterName == hubManagedClusterName {
if clusterConfig.Name == hubManagedClusterName {
namespace = hubMetricsCollectorNamespace
isHub = false
}
Expand Down Expand Up @@ -165,7 +155,7 @@ var _ = Describe("Observability:", func() {

// debug code to check label "cert/time-restarted"
deployment, err := utils.GetDeployment(
testOptions,
clusterConfig,
isHub,
"metrics-collector-deployment",
namespace,
Expand All @@ -189,4 +179,10 @@ var _ = Describe("Observability:", func() {
namespace = MCO_ADDON_NAMESPACE
isHub = false
})
}

var _ = Describe("Observability:", func() {
for _, clusterConfig := range testOptions.ManagedClusters {
runCertRenewTests(clusterConfig)
}
})
14 changes: 1 addition & 13 deletions tests/pkg/tests/observability_dashboard_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,6 @@ const (
)

var _ = Describe("Observability:", func() {
BeforeEach(func() {
hubClient = utils.NewKubeClient(
testOptions.HubCluster.ClusterServerURL,
testOptions.KubeConfig,
testOptions.HubCluster.KubeContext)

dynClient = utils.NewKubeClientDynamic(
testOptions.HubCluster.ClusterServerURL,
testOptions.KubeConfig,
testOptions.HubCluster.KubeContext)
})

It("[P2][Sev2][observability][Stable] Should have custom dashboard which defined in configmap (dashboard/g0)", func() {
By("Creating custom dashboard configmap")
yamlB, _ := kustomize.Render(
Expand Down Expand Up @@ -71,7 +59,7 @@ var _ = Describe("Observability:", func() {

It("[P2][Sev2][observability][Stable] Should have no custom dashboard in grafana after related configmap removed (dashboard/g0)", func() {
By("Deleting custom dashboard configmap")
err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE)
err = utils.DeleteConfigMap(testOptions.ManagedClusters[0], true, dashboardName, MCO_NAMESPACE)
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
_, result := utils.ContainDashboard(testOptions, updateDashboardTitle)
Expand Down
Loading
Loading