diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index ffb0f57..889e187 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -78,6 +78,9 @@ jobs: - uses: helm/kind-action@v1.10.0 with: cluster_name: kind + - uses: eifinger/setup-rye@v4 + with: + version: '0.37.0' - uses: actions/download-artifact@v4 with: name: pod-graceful-drain.tar @@ -91,6 +94,8 @@ jobs: --set experimentalGeneralIngress=true \ --set logLevel=info\\,pod_graceful_drain=trace \ --wait=true --timeout=1m + - run: rye sync + - run: rye test - name: Dump if: always() run: | diff --git a/.gitignore b/.gitignore index 96ef862..089ff51 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,7 @@ +.venv/ +*.egg-info/ +__pycache__/ + target/ + .idea/ diff --git a/.python-version b/.python-version new file mode 100644 index 0000000..871f80a --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.12.3 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..98b7b10 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,16 @@ +[project] +name = "pod-graceful-drain-integ-test" +version = "0.1.0" +description = "Integration test for pod-graceful-drain" +authors = [ + { name = "SeongChan Lee", email = "foriequal@gmail.com" } +] +dependencies = [] +readme = "README.md" +requires-python = ">= 3.8" + +[tool.rye] +managed = true +dev-dependencies = [ + "pytest>=8.3.1", +] diff --git a/requirements-dev.lock b/requirements-dev.lock new file mode 100644 index 0000000..2ee8d19 --- /dev/null +++ b/requirements-dev.lock @@ -0,0 +1,20 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: false +# with-sources: false +# generate-hashes: false + +-e file:. +colorama==0.4.6 + # via pytest +iniconfig==2.0.0 + # via pytest +packaging==24.1 + # via pytest +pluggy==1.5.0 + # via pytest +pytest==8.3.1 diff --git a/requirements.lock b/requirements.lock new file mode 100644 index 0000000..d795436 --- /dev/null +++ b/requirements.lock @@ -0,0 +1,11 @@ +# generated by rye +# use `rye lock` or `rye sync` to update this lockfile +# +# last locked with the following flags: +# pre: false +# features: [] +# all-features: false +# with-sources: false +# generate-hashes: false + +-e file:. diff --git a/tests/test_smoke.py b/tests/test_smoke.py new file mode 100644 index 0000000..41fe627 --- /dev/null +++ b/tests/test_smoke.py @@ -0,0 +1,119 @@ +import subprocess +import sys +from datetime import datetime, timedelta +import random +import time + +namespace = "" + + +def setup_module(): + global namespace + namespace = f"pgd-test-{random.randrange(10000, 99999)}" + kubectl("create", "namespace", namespace) + kubectl("label", "namespace", namespace, "test=true") + print("testing on namespace: ", namespace) + + +def teardown_module(): + global namespace + kubectl("delete", "namespace", namespace) + + +def eprint(*args, **kwargs): + print(*args, file=sys.stderr, **kwargs) + + +def kubectl(*args): + global namespace + + result = subprocess.run( + ["kubectl", "--namespace", namespace, *args], + capture_output=True) + + if result.returncode != 0: + eprint("stdout:") + eprint(result.stdout) + eprint("stderr:") + eprint(result.stderr) + raise Exception(f"'kubectl {" ".join(args)}' failed with exit code '{result.returncode}'") + + +def kubectl_stdin(args, /, input): + global namespace + + result = subprocess.run( + ["kubectl", "--namespace", namespace, *args], + capture_output=True, + input=input, encoding="utf-8") + + if result.returncode != 0: + eprint("stdout:") + eprint(result.stdout) + eprint("stderr:") + eprint(result.stderr) + raise Exception(f"'kubectl {" ".join(args)}' failed with exit code '{result.returncode}'") + + +def kubectl_nowait(args): + global namespace + + child = subprocess.Popen( + ["kubectl", "--namespace", namespace, *args]) + + return child + + +def pod_is_alive(name): + global namespace + + result = subprocess.run( + ["kubectl", "--namespace", namespace, "get", name, "-o", "jsonpath={.metadata.deletionTimestamp}"], + capture_output=True, encoding="utf-8") + + if result.returncode != 0: + return False + + stdout = result.stdout.strip() + return not stdout + + +def test_can_delete_pod_without_delay_if_no_ingress(): + kubectl("run", "busybox-sleep", "--image=public.ecr.aws/docker/library/busybox", "--", "sleep", "1000") + kubectl("wait", "pod/busybox-sleep", "--for=condition=Ready") + start = datetime.now() + kubectl("delete", "pod/busybox-sleep", "--wait=false") + diff = datetime.now() - start + assert diff < timedelta(seconds=10), "it should be quick" + assert not pod_is_alive("pod/busybox-sleep") + + +def test_delete_is_delayed_with_ingress(): + kubectl("run", "nginx", "--image=nginx") + kubectl("expose", "pod", "nginx", "--port=80", "--target-port=8000") + kubectl_stdin(["apply", "-f", "-"], input=""" +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: nginx +spec: + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: nginx + port: + number: 80 + """) + kubectl("wait", "pod/nginx", "--for=condition=Ready") + + time.sleep(1) # give some time to settle down + + kubectl_nowait(["delete", "pod/nginx", "--wait=false"]) + + for _ in range(0, 20 - 5): + assert pod_is_alive("pod/nginx"), "pod should be alive for approx. 20s" + time.sleep(1)