From 462a0c74690e23838ebce9a9e44f0340c55c8b69 Mon Sep 17 00:00:00 2001 From: Mark Laing Date: Wed, 7 Aug 2024 09:21:00 +0100 Subject: [PATCH] WIP --- .github/workflows/tests.yml | 100 ++---------------------------------- tests/vm-migration | 58 ++++++++++----------- 2 files changed, 34 insertions(+), 124 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 832c0c5b0..500f49505 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -79,103 +79,10 @@ jobs: strategy: fail-fast: false matrix: - os: [20.04, 22.04, 24.04] - track: ${{ fromJSON(inputs.snap-tracks || '["latest/edge", "5.21/edge", "5.0/edge", "4.0/edge"]') }} + os: [24.04] + track: ${{ fromJSON(inputs.snap-tracks || '["latest/edge"]') }} test: - - cgroup - - cluster - - container - - container-copy - - conversion - - cpu-vm - - devlxd-vm - - devlxd-container - - docker - - efi-vars-editor-vm - - interception - - pylxd - - network-bridge-firewall - - network-ovn - - network-routed - - snapd - - storage-buckets - - storage-disks-vm - - "storage-vm dir" - - "storage-vm btrfs" - - "storage-vm ceph" - - "storage-vm lvm" - - "storage-vm lvm-thin" - - "storage-vm zfs" - - storage-volumes-vm - - tpm-vm - - vm-nesting - vm-migration - include: - - test: qemu-external-vm - track: "latest/edge" - os: "24.04" - exclude: - # not compatible with 4.0/* - - test: container-copy - track: "4.0/edge" - - test: conversion - track: "4.0/edge" - - test: cpu-vm - track: "4.0/edge" - - test: devlxd-vm - track: "4.0/edge" - - test: efi-vars-editor-vm - track: "4.0/edge" - - test: network-bridge-firewall - os: 20.04 - track: "4.0/edge" - - test: network-ovn - track: "4.0/edge" - # https://github.com/canonical/pylxd/issues/590 - - test: pylxd - track: "4.0/edge" - - test: storage-buckets - track: "4.0/edge" - - test: storage-disks-vm - track: "4.0/edge" - - test: "storage-vm dir" - track: "4.0/edge" - - test: "storage-vm btrfs" - track: "4.0/edge" - - test: "storage-vm ceph" - track: "4.0/edge" - - test: "storage-vm lvm" - track: "4.0/edge" - - test: "storage-vm lvm-thin" - track: "4.0/edge" - - test: "storage-vm zfs" - track: "4.0/edge" - - test: storage-volumes-vm - track: "4.0/edge" - - test: tpm-vm - track: "4.0/edge" - # not compatible with 5.0/* - - test: efi-vars-editor-vm # not compatible with 5.0/* - track: "5.0/edge" - # waiting for integration with microceph - - test: "storage-vm ceph" - # skip track/os combinaisons that are too far appart - - track: "4.0/edge" - os: "24.04" - - track: "5.0/edge" - os: "24.04" - - track: "5.0/edge" - os: "20.04" - - track: "5.21/edge" - os: "20.04" - - track: "latest/edge" - os: "20.04" - - track: "latest/edge" - os: "22.04" - - test: "vm-migration" - track: "4.0/edge" - - test: "vm-migration" - track: "5.0/edge" steps: - name: Performance tuning @@ -255,6 +162,9 @@ jobs: fi sudo --preserve-env=PURGE_LXD,TEST_IMG ./bin/local-run "tests/${TEST_SCRIPT}" ${{ matrix.track }} ${EXTRA_ARGS:-} + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + # always update cache as we have our own logic of # cache invalidation and updates in addition to a date check - name: Delete previous cache diff --git a/tests/vm-migration b/tests/vm-migration index 7bdd40aec..7f07ceb37 100644 --- a/tests/vm-migration +++ b/tests/vm-migration @@ -56,11 +56,11 @@ done # Launch two instances for our LXD cluster and wait for them to be ready. If the host supports `devlxd_images_vm` then # set `security.devlxd.images=true` so that we don't have to download the image again. if hasNeededAPIExtension devlxd_images_vm; then - lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" member1 --vm -c limits.memory=2GiB -c security.devlxd.images=true - lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" member2 --vm -c limits.memory=2GiB -c security.devlxd.images=true + lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" member1 --vm -c security.devlxd.images=true + lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" member2 --vm -c security.devlxd.images=true else - lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" member1 --vm -c limits.memory=2GiB - lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" member2 --vm -c limits.memory=2GiB + lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" member1 --vm + lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" member2 --vm fi waitInstanceReady member1 @@ -136,35 +136,35 @@ lxc exec member1 -- lxc storage create ceph ceph lxc exec member1 -- lxc storage volume create ceph vol1 --type=block size=500MiB # Create a VM in the cluster, on member1. -lxc exec member1 -- lxc init "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1 --vm --storage ceph --target member1 -c migration.stateful=true -c limits.memory=1GiB +lxc exec member1 -- lxc init "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1 --vm --storage ceph --target member1 -c migration.stateful=true -c limits.memory=500MiB # Add vol1 as a disk device to the VM. lxc exec member1 -- lxc config device add v1 vol1-disk disk pool=ceph source=vol1 -# Start the VM. -lxc exec member1 -- lxc start v1 - -# Wait for a long time for it to boot (doubly nested VM takes a while). -while [ "$(lxc exec member1 -- lxc info v1 | awk '{if ($1 == "Processes:") print $2}')" -le 1 ]; do - sleep 30 -done - -# vol1 should be available as /dev/sdb. Format it as ext4. Then mount it and create a file. -lxc exec member1 -- lxc exec v1 -- mkfs -t ext4 /dev/sdb -lxc exec member1 -- lxc exec v1 -- mkdir /mnt/vol1 -lxc exec member1 -- lxc exec v1 -- mount -t ext4 /dev/sdb /mnt/vol1 -lxc exec member1 -- lxc exec v1 -- cp /etc/hostname /mnt/vol1/bar - -# Move the instance -lxc exec member1 -- lxc move v1 --target member2 - -# The VM is slow. So the agent isn't immediately available after the live migration. -while [ "$(lxc exec member1 -- lxc info v1 | awk '{if ($1 == "Processes:") print $2}')" -le 1 ]; do - sleep 5 -done - -# The volume should be functional, still mounted, and the file we created should still be there with the same contents. -[ "$(lxc exec member2 -- lxc exec v1 -- cat /mnt/vol1/bar)" = "v1" ] +## Start the VM. +#lxc exec member1 -- lxc start v1 + +## Wait for a long time for it to boot (doubly nested VM takes a while). +#while [ "$(lxc exec member1 -- lxc info v1 | awk '{if ($1 == "Processes:") print $2}')" -le 1 ]; do +# sleep 30 +#done +# +## vol1 should be available as /dev/sdb. Format it as ext4. Then mount it and create a file. +#lxc exec member1 -- lxc exec v1 -- mkfs -t ext4 /dev/sdb +#lxc exec member1 -- lxc exec v1 -- mkdir /mnt/vol1 +#lxc exec member1 -- lxc exec v1 -- mount -t ext4 /dev/sdb /mnt/vol1 +#lxc exec member1 -- lxc exec v1 -- cp /etc/hostname /mnt/vol1/bar +# +## Move the instance +#lxc exec member1 -- lxc move v1 --target member2 +# +## The VM is slow. So the agent isn't immediately available after the live migration. +#while [ "$(lxc exec member1 -- lxc info v1 | awk '{if ($1 == "Processes:") print $2}')" -le 1 ]; do +# sleep 5 +#done +# +## The volume should be functional, still mounted, and the file we created should still be there with the same contents. +#[ "$(lxc exec member2 -- lxc exec v1 -- cat /mnt/vol1/bar)" = "v1" ] # shellcheck disable=SC2034 FAIL=0 \ No newline at end of file