Skip to content

Update macos-podman.yml #471

Update macos-podman.yml

Update macos-podman.yml #471

Workflow file for this run

name: Create a kind cluster, install kubevirt to test it
on:
workflow_dispatch:
push:
branches:
- main
env:
KUBEVIRT_VERSION: v1.2.1
KUBEVIRT_CDI_VERSION: v1.59.0
TEKTON_VERSION: v0.59.0
TEKTON_CLIENT: 0.37.0
ARGOCD_VERSION: v0.10.0
GITEA_VERSION: 1.22.0
QUAY_ORG: snowdrop
jobs:
kubevirt:
runs-on: ubuntu-latest
if: ${{ github.event_name == 'push' && contains( github.event.head_commit.message, 'virt') }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Create a kind cluster running the ingress controller
run: |
# kind create cluster
# kubectl wait pod -n ingress-nginx -ljob-name=ingress-nginx-admission-patch --for condition=PodReadyToStartContainers=false --timeout=90s
# kubectl wait pod -n ingress-nginx -ljob-name=ingress-nginx-admission-create --for condition=PodReadyToStartContainers=false --timeout=90s
# kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
curl -s -L "https://raw.githubusercontent.com/snowdrop/k8s-infra/main/kind/kind.sh" | bash -s install
kubectl wait deployment -n ingress ingress-nginx-controller --for condition=Available=True --timeout=90s
#- name: Install OLM
# run: |
# curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/$OLM_VERSION/install.sh -o install.sh
# chmod +x install.sh
# ./install.sh $OLM_VERSION
- name: Install Argocd client
run: |
curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64
sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
rm argocd-linux-amd64
- name: Install and configure ArgoCD
run: |
# Install ArgoCD Operator subscription
# kubectl apply -f https://raw.githubusercontent.com/ch007m/test-github-action/main/.github/resources/argocd/subscription.yaml
# function csv_succeeded {
# kubectl get csv argocd-operator.${ARGOCD_VERSION} -o jsonpath='{.status.phase}'
# }
#
# # Loop until the CSV is Succeeded
# until [[ $(csv_succeeded) == "Succeeded" ]]; do
# echo "Waiting for CSV 'argocd-operator.${ARGOCD_VERSION}' to reach Succeeded phase..."
# sleep 5
# done
#
# # Create the gitops namespace and deploy the argocd instance
# kubectl create ns gitops
# kubectl apply -n gitops -f https://raw.githubusercontent.com/ch007m/test-github-action/main/.github/resources/argocd/argocd.yaml
# kubectl wait deployment -n gitops argocd-gitops-server --for condition=Available=True --timeout=90s
# kubectl wait deployment -n gitops argocd-gitops-applicationset-controller --for condition=Available=True --timeout=90s
cd .github/resources/argocd/helm
helm repo add argo-cd https://argoproj.github.io/argo-helm
helm dep update my-chart
kubectl create ns gitops
helm install argo-cd ./my-chart -f ./values.yaml -n gitops
kubectl wait pod -n gitops -lapp.kubernetes.io/name=argocd-redis --for condition=Ready --timeout=90s
kubectl wait pod -n gitops -lapp.kubernetes.io/name=argocd-repo-server --for condition=Ready --timeout=90s
kubectl wait pod -n gitops -lapp.kubernetes.io/name=argocd-server --for condition=Ready --timeout=90s
kubectl wait pod -n gitops -lapp.kubernetes.io/name=argocd-applicationset-controller --for condition=Ready --timeout=90s
echo "Update /etc/hosts"
echo "127.0.0.1 argocd.localtest.me" | sudo tee -a /etc/hosts
- name : Install Tekton
run: |
kubectl apply -f https://github.com/tektoncd/pipeline/releases/download/${TEKTON_VERSION}/release.yaml
kubectl wait deployment -n tekton-pipelines tekton-pipelines-controller --for condition=Available=True --timeout=90s
kubectl wait deployment -n tekton-pipelines tekton-pipelines-webhook --for condition=Available=True --timeout=90s
echo "Disabling the affinity-assistant to avoid the error: more than one PersistentVolumeClaim is bound to a TaskRun = pod"
kubectl patch cm feature-flags -n tekton-pipelines -p '{"data":{"disable-affinity-assistant":"true"}}'
- name: Install Kubevirt
run: |
function is_nested_virt_enabled() {
kvm_nested="unknown"
if [ -f "/sys/module/kvm_intel/parameters/nested" ]; then
kvm_nested=$( cat /sys/module/kvm_intel/parameters/nested )
elif [ -f "/sys/module/kvm_amd/parameters/nested" ]; then
kvm_nested=$( cat /sys/module/kvm_amd/parameters/nested )
fi
[ "$kvm_nested" == "1" ] || [ "$kvm_nested" == "Y" ] || [ "$kvm_nested" == "y" ]
}
echo "Deploying KubeVirt"
kubectl apply -f "https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml"
kubectl apply -f "https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml"
echo "Configuring Kubevirt to use emulation if needed"
if ! is_nested_virt_enabled; then
kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}'
fi
echo "Deploying KubeVirt containerized-data-importer"
kubectl apply -f "https://github.com/kubevirt/containerized-data-importer/releases/download/${KUBEVIRT_CDI_VERSION}/cdi-operator.yaml"
kubectl apply -f "https://github.com/kubevirt/containerized-data-importer/releases/download/${KUBEVIRT_CDI_VERSION}/cdi-cr.yaml"
echo "Waiting for KubeVirt to be ready"
kubectl wait --for=condition=Available kubevirt kubevirt --namespace=kubevirt --timeout=5m
- name: Give more RBAC to Virt resources and RW for PVC
run: |
echo "Patch the StorageProfile to use the storageclass standard and give ReadWrite access"
kubectl get StorageProfile
kubectl patch --type merge -p '{"spec": {"claimPropertySets": [{"accessModes": ["ReadWriteOnce"], "volumeMode": "Filesystem"}]}}' StorageProfile standard
kubectl create clusterrolebinding pod-kubevirt-viewer --clusterrole=kubevirt.io:view --serviceaccount=default:default
kubectl create clusterrolebinding cdi-kubevirt-viewer --clusterrole=cdi.kubevirt.io:view --serviceaccount=default:default
kubectl create clusterrolebinding quarkus-dev --clusterrole=admin --serviceaccount=default:default
- name: Install our Fedora podman image
run: |
kubectl create ns vm-images
kubectl apply -n vm-images -f https://raw.githubusercontent.com/ch007m/test-github-action/main/.github/resources/quay-to-pvc-datavolume.yml
kubectl wait datavolume -n vm-images podman-remote --for condition=Ready=True --timeout=90s
- name: Create ssh key, secret and VM
run: |
ssh-keygen -N "" -f id_rsa
kubectl create secret generic quarkus-dev-ssh-key -n default --from-file=key=id_rsa.pub
kubectl apply -f https://raw.githubusercontent.com/ch007m/test-github-action/main/.github/resources/quarkus-dev-vm.yml
kubectl wait --for=condition=Ready vm/quarkus-dev --timeout=360s
# - name: Log OS, kube tools, container versions
# run: |
# echo "########### OS ###########"
# cat /etc/os-release
#
# echo "########### Kind version ###########"
# kind --version
#
# echo "########### Docker version ###########"
# docker -v
#
# echo "########### kubectl version ###########"
# kubectl version --client
#
# echo "########### Kubevirt version: $KUBEVIRT_VERSION ###########"
#
- name: Log on to ArgoCD Server
run: |
echo "Get Argocd admin secret"
ARGOCD_PWD=$(kubectl get secret/argocd-initial-admin-secret -n gitops -ojson | jq -r '.data.password' | base64 -d)
echo "Argocd password: $ARGOCD_PWD. End"
ARGOCD_SERVER=argocd.localtest.me
ARGOCD_USER=admin
echo "Logging using argocd client to the server ..."
argocd login --insecure $ARGOCD_SERVER --username $ARGOCD_USER --password $ARGOCD_PWD --grpc-web
echo "List the application ... should be empty"
argocd app list
- name: Wait till podman remote replies
run: |
VM_IP=$(kubectl get vmi -o jsonpath='{.items[0].status.interfaces[0].ipAddress}')
echo "VM IP is: $VM_IP"
# Launch the podman client
kubectl run podname-client --image=quay.io/podman/stable -- "sleep" "1000000"
# Wait for the pod to be in the Running state
kubectl wait --for condition=Ready pod/podname-client --timeout=360s
echo "Wait for the command within the pod to succeed"
echo ">>>> Command to be executed to check healthiness of podman & socat"
echo ">>>> kubectl exec podname-client -- podman \"-r\" \"--url=tcp://$VM_IP:2376\" \"version\""
while true; do
kubectl exec podname-client -- podman "-r" "--url=tcp://$VM_IP:2376" "version"
if [ $? -eq 0 ]; then
echo "Command within the pod succeeded."
break
else
echo "Remote podman is not yet ready to reply.."
fi
sleep 20
done
- name: Install Gitea
run: |
curl -L https://dl.gitea.com/gitea/${GITEA_VERSION}/gitea-${GITEA_VERSION}-linux-amd64 --output gitea
chmod +x gitea
sudo mv gitea /usr/local/bin
gitea cert --host gitea.localtest.me --ca
mv cert.pem gitea-localtest-me.pem
mv key.pem gitea-localtest-me.key
kubectl create ns gitea
kubectl -n gitea create secret tls gitea-tls --cert=gitea-localtest-me.pem --key=gitea-localtest-me.key
helm repo add gitea-charts https://dl.gitea.com/charts/
helm repo update
helm install gitea gitea-charts/gitea -n gitea -f https://raw.githubusercontent.com/ch007m/test-github-action/main/.github/resources/gitea/values.yml
kubectl wait -n gitea deployment gitea --for condition=Available=True --timeout=90s
echo "127.0.0.1 gitea.localtest.me" | sudo tee -a /etc/hosts
- name: Create a git org to test
run: |
export GITEA_API_URL="https://gitea.localtest.me/api"
export GITEA_USERNAME="gitea_admin"
export GITEA_PASSWORD="gitea_admin"
export ORG_NAME="dummy"
curl -ks \
"$GITEA_API_URL/v1/orgs" \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-u "$GITEA_USERNAME:$GITEA_PASSWORD" \
-d '{"username": "'"$ORG_NAME"'"}'
curl -ks \
"$GITEA_API_URL/v1/orgs/$ORG_NAME" \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-u "$GITEA_USERNAME:$GITEA_PASSWORD" | jq -r
- name: Log errors
if: failure()
run: |
echo "#### List Pods ... ###"
kubectl get po -A
echo "Logs of the argocd server ..."
kubectl logs -n gitops -lapp.kubernetes.io/name=argocd-server
echo "#### List Services ... ###"
kubectl get svc -A
echo "#### List Ingress ... ###"
kubectl get ingress -A
echo "#### Get VM ... ###"
kubectl get vm -A
echo "#### Check status of the VM ...###"
kubectl describe -n default vm/quarkus-dev
kubectl get -n default vm/quarkus-dev -oyaml
echo "#### Get VMI ... ###"
kubectl get vmi -A