From 04fd8ff8612e5641c149e162b14b6ab25e9f6ee2 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Tue, 8 Oct 2024 18:18:27 -0400 Subject: [PATCH 01/19] github: use setup-microceph action from LXD repo Signed-off-by: Simon Deziel --- .github/workflows/tests.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d0710d4c..3adf32d3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -266,6 +266,10 @@ jobs: path: /home/runner/work/cache key: cache-${{ steps.get-date.outputs.date }} + - name: Setup MicroCeph + if: ${{ matrix.test == 'storage-buckets' || matrix.test == 'storage-vm ceph' || matrix.test == 'storage-volumes-vm' }} + uses: canonical/lxd/.github/actions/setup-microceph@main + - name: ${{ matrix.test }} (${{ matrix.track }}) run: | set -eux From fcb0373b70340bb4fcef586a22f2bd181ff056ee Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 10 Oct 2024 16:35:48 -0400 Subject: [PATCH 02/19] github: don't reclaim space for storage-buckets This test doesn't need much. Signed-off-by: Simon Deziel --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3adf32d3..889c40e0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -200,7 +200,7 @@ jobs: echo "force-unsafe-io" | sudo tee /etc/dpkg/dpkg.cfg.d/force-unsafe-io - name: Reclaim some space (storage tests only) - if: ${{ startsWith(matrix.test, 'storage') || matrix.test == 'vm-nesting' || matrix.test == 'conversion' }} + if: ${{ startsWith(matrix.test, 'storage-vm') || matrix.test == 'storage-volumes-vm' || matrix.test == 'vm-nesting' || matrix.test == 'conversion' }} run: | set -eux df -h From 3d206b9e6d5967f529b51b08c58211a17042fa00 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Tue, 8 Oct 2024 18:27:42 -0400 Subject: [PATCH 03/19] tests/storage-vm: readd ceph to pool driver list Signed-off-by: Simon Deziel --- tests/storage-vm | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/storage-vm b/tests/storage-vm index f1f8532c..1454edfb 100755 --- a/tests/storage-vm +++ b/tests/storage-vm @@ -11,8 +11,7 @@ install_lxd IMAGE="${TEST_IMG:-ubuntu-minimal-daily:24.04}" -# XXX: skip ceph for now -poolDriverList="${1:-dir btrfs lvm lvm-thin zfs}" +poolDriverList="${1:-ceph dir btrfs lvm lvm-thin zfs}" # Configure LXD lxc network create lxdbr0 From 85522d075e454662cec59a3443195f79c8329d28 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Wed, 9 Oct 2024 17:32:02 -0400 Subject: [PATCH 04/19] tests/storage-volumes-vm: readd ceph to pool driver list Signed-off-by: Simon Deziel --- tests/storage-volumes-vm | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/storage-volumes-vm b/tests/storage-volumes-vm index 890e4434..843df15c 100755 --- a/tests/storage-volumes-vm +++ b/tests/storage-volumes-vm @@ -9,8 +9,7 @@ install_lxd IMAGE="${TEST_IMG:-ubuntu-minimal-daily:24.04}" -# XXX: skip ceph for now -poolDriverList="${1:-dir btrfs lvm lvm-thin zfs}" +poolDriverList="${1:-ceph dir btrfs lvm lvm-thin zfs}" # Configure LXD lxc project switch default From 9218875fa6a22368f4f13a610ddd9252fe7af41b Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 10 Oct 2024 16:13:51 -0400 Subject: [PATCH 05/19] tests/storage-buckets: readd ceph to pool driver list Signed-off-by: Simon Deziel --- tests/storage-buckets | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/tests/storage-buckets b/tests/storage-buckets index 8d3be2d0..0920d288 100755 --- a/tests/storage-buckets +++ b/tests/storage-buckets @@ -27,21 +27,7 @@ elif [ "${arch}" = "riscv64" ] ; then exit 0 fi -poolDriverList="${1:-dir btrfs lvm lvm-thin zfs ceph}" - -if echo "${poolDriverList}" | grep -qwF "ceph"; then - echo "::warning::Skipping test on ceph until we can integrate with microceph" - # shellcheck disable=SC2001 - poolDriverList="$(echo "${poolDriverList}" | sed 's/ \?\bceph\b//')" -fi - -if echo "${poolDriverList}" | grep -qwF "ceph"; then - # XXX: LXD will be reloaded further down - snap set lxd ceph.external=true - - # Install dependencies - install_deps ceph-common -fi +poolDriverList="${1:-ceph dir btrfs lvm lvm-thin zfs}" # Clean up the build dir in case it hung around from a failed test. rm -rf /opt/minio From d44062fb333e00ffd10b7638053112d9a8d921c0 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 10 Oct 2024 16:22:58 -0400 Subject: [PATCH 06/19] tests/storage-buckets: consistently use GiB Signed-off-by: Simon Deziel --- tests/storage-buckets | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/storage-buckets b/tests/storage-buckets index 0920d288..6d36e3fb 100755 --- a/tests/storage-buckets +++ b/tests/storage-buckets @@ -57,15 +57,15 @@ for poolDriver in $poolDriverList do echo "==> Create storage pool using driver ${poolDriver}" if [ "${poolDriver}" = "dir" ]; then - lxc storage create "${poolName}" "${poolDriver}" volume.size=5GB + lxc storage create "${poolName}" "${poolDriver}" volume.size=5GiB elif [ "${poolDriver}" = "ceph" ]; then lxc storage create "${poolName}" cephobject cephobject.radosgw.endpoint="${LXD_CEPH_CEPHOBJECT_RADOSGW}" elif [ "${poolDriver}" = "lvm" ]; then - lxc storage create "${poolName}" "${poolDriver}" size=40GiB lvm.use_thinpool=false volume.size=5GB + lxc storage create "${poolName}" "${poolDriver}" size=40GiB lvm.use_thinpool=false volume.size=5GiB elif [ "${poolDriver}" = "lvm-thin" ]; then - lxc storage create "${poolName}" lvm size=20GiB volume.size=5GB + lxc storage create "${poolName}" lvm size=20GiB volume.size=5GiB else - lxc storage create "${poolName}" "${poolDriver}" size=20GB volume.size=5GB + lxc storage create "${poolName}" "${poolDriver}" size=20GiB volume.size=5GiB fi if [ "${poolDriver}" != "ceph" ]; then From e29e22b6ac3af488e3974b50981b7ed132664ade Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 10 Oct 2024 16:38:09 -0400 Subject: [PATCH 07/19] tests/storage-buckets: provide a default value to LXD_CEPH_CEPHOBJECT_RADOSGW Signed-off-by: Simon Deziel --- tests/storage-buckets | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/storage-buckets b/tests/storage-buckets index 6d36e3fb..a1199012 100755 --- a/tests/storage-buckets +++ b/tests/storage-buckets @@ -59,7 +59,7 @@ do if [ "${poolDriver}" = "dir" ]; then lxc storage create "${poolName}" "${poolDriver}" volume.size=5GiB elif [ "${poolDriver}" = "ceph" ]; then - lxc storage create "${poolName}" cephobject cephobject.radosgw.endpoint="${LXD_CEPH_CEPHOBJECT_RADOSGW}" + lxc storage create "${poolName}" cephobject cephobject.radosgw.endpoint="${LXD_CEPH_CEPHOBJECT_RADOSGW:-http://127.0.0.1}" elif [ "${poolDriver}" = "lvm" ]; then lxc storage create "${poolName}" "${poolDriver}" size=40GiB lvm.use_thinpool=false volume.size=5GiB elif [ "${poolDriver}" = "lvm-thin" ]; then From 9865e1b3f19ee44d2aef229e975380d506e23c94 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 11 Oct 2024 16:08:44 -0400 Subject: [PATCH 08/19] tests/conversion: add ceph to the driver list Signed-off-by: Simon Deziel --- tests/conversion | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/conversion b/tests/conversion index 7b08179d..3d3ffa20 100755 --- a/tests/conversion +++ b/tests/conversion @@ -155,7 +155,7 @@ conversion() { lxdMigrateCmd="lxd-migrate --conversion=${conversionOptions}" # Create storage pool. - if [ "$poolType" = "dir" ]; then + if [ "$poolType" = "ceph" ] || [ "$poolType" = "dir" ]; then lxc storage create "${poolName}" "${poolType}" else lxc storage create "${poolName}" "${poolType}" size=11GiB @@ -255,7 +255,7 @@ IMAGE_PATH="${tmpdir}/backup/virtual-machine.img" # Test VM migration using conversion mode. If server does not support # conversion API extension, lxd-migrate must fallback to migration # mode and successfully transfer the VM disk. -for driver in btrfs lvm zfs dir; do +for driver in btrfs ceph dir lvm zfs; do conversion_vm alpine-raw "${driver}" "${IMAGE_PATH}" "no" done From 6f765676237effe55716ab55d045bf357b5d10a4 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 11 Oct 2024 16:09:42 -0400 Subject: [PATCH 09/19] github: add microceph for tests/conversion Signed-off-by: Simon Deziel --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 889c40e0..56859250 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -267,7 +267,7 @@ jobs: key: cache-${{ steps.get-date.outputs.date }} - name: Setup MicroCeph - if: ${{ matrix.test == 'storage-buckets' || matrix.test == 'storage-vm ceph' || matrix.test == 'storage-volumes-vm' }} + if: ${{ matrix.test == 'conversion' || matrix.test == 'storage-buckets' || matrix.test == 'storage-vm ceph' || matrix.test == 'storage-volumes-vm' }} uses: canonical/lxd/.github/actions/setup-microceph@main - name: ${{ matrix.test }} (${{ matrix.track }}) From d7f54e7947d56a7327cfd2074f648fd0a1c241a4 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 10 Oct 2024 17:12:55 -0400 Subject: [PATCH 10/19] github: re-enable `storage-vm ceph` test Signed-off-by: Simon Deziel --- .github/workflows/tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 56859250..a77df865 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -166,8 +166,6 @@ jobs: # not compatible with 5.0/* - test: efi-vars-editor-vm # not compatible with 5.0/* track: "5.0/edge" - # waiting for integration with microceph - - test: "storage-vm ceph" # skip track/os combinaisons that are too far appart - track: "4.0/edge" os: "24.04" From fcd3daf7d09972768119fcc3cbd68fe88e9d7d02 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 10 Oct 2024 17:15:23 -0400 Subject: [PATCH 11/19] github: sort tests and sort them Be more consistent with quoting. Signed-off-by: Simon Deziel --- .github/workflows/tests.yml | 48 ++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a77df865..42a01726 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -108,12 +108,12 @@ jobs: - snapd - storage-buckets - storage-disks-vm - - "storage-vm btrfs" - - "storage-vm ceph" - - "storage-vm dir" - - "storage-vm lvm" - - "storage-vm lvm-thin" - - "storage-vm zfs" + - storage-vm btrfs + - storage-vm ceph + - storage-vm dir + - storage-vm lvm + - storage-vm lvm-thin + - storage-vm zfs - storage-volumes-vm - tpm-vm - vm @@ -122,7 +122,7 @@ jobs: include: - test: qemu-external-vm track: "latest/edge" - os: "24.04" + os: 24.04 exclude: # not compatible with 4.0/* - test: container-copy @@ -147,42 +147,42 @@ jobs: track: "4.0/edge" - test: storage-disks-vm track: "4.0/edge" - - test: "storage-vm dir" + - test: storage-vm btrfs track: "4.0/edge" - - test: "storage-vm btrfs" + - test: storage-vm ceph track: "4.0/edge" - - test: "storage-vm ceph" + - test: storage-vm dir track: "4.0/edge" - - test: "storage-vm lvm" + - test: storage-vm lvm track: "4.0/edge" - - test: "storage-vm lvm-thin" + - test: storage-vm lvm-thin track: "4.0/edge" - - test: "storage-vm zfs" + - test: storage-vm zfs track: "4.0/edge" - test: storage-volumes-vm track: "4.0/edge" - test: tpm-vm track: "4.0/edge" + - test: vm-migration + track: "4.0/edge" # not compatible with 5.0/* - - test: efi-vars-editor-vm # not compatible with 5.0/* + - test: efi-vars-editor-vm + track: "5.0/edge" + - test: vm-migration track: "5.0/edge" # skip track/os combinaisons that are too far appart - track: "4.0/edge" - os: "24.04" + os: 24.04 - track: "5.0/edge" - os: "24.04" + os: 24.04 - track: "5.0/edge" - os: "20.04" + os: 20.04 - track: "5.21/edge" - os: "20.04" + os: 20.04 - track: "latest/edge" - os: "20.04" + os: 20.04 - track: "latest/edge" - os: "22.04" - - test: "vm-migration" - track: "4.0/edge" - - test: "vm-migration" - track: "5.0/edge" + os: 22.04 steps: - name: Performance tuning From 285199be55e30d5e501a8c8bc8fd31cc732b9365 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Thu, 10 Oct 2024 17:20:04 -0400 Subject: [PATCH 12/19] github: re-enable pylxd test with `4.0/edge` https://github.com/canonical/pylxd/issues/590 is fixed. Signed-off-by: Simon Deziel --- .github/workflows/tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 42a01726..1f4830b9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -140,9 +140,6 @@ jobs: track: "4.0/edge" - test: network-ovn track: "4.0/edge" - # https://github.com/canonical/pylxd/issues/590 - - test: pylxd - track: "4.0/edge" - test: storage-buckets track: "4.0/edge" - test: storage-disks-vm From 785a375254e8678d62be87d407077c5dcfda71c3 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 11 Oct 2024 15:20:51 -0400 Subject: [PATCH 13/19] github: reclaim disk to avoid issue with kernel 6.5.0-1025-azure With this kernel the ephemeral device remains active when microceph tries to wipe it: ``` jbd2/sdb1 537 root cwd DIR 8,1 4096 2 / jbd2/sdb1 537 root rtd DIR 8,1 4096 2 / jbd2/sdb1 537 root txt unknown /proc/537/exe ``` `microceph`'s error signature looks like this: ``` + sudo microceph disk add --wipe /dev/sdb +----------+---------+ | PATH | STATUS | +----------+---------+ | /dev/sdb | Failure | +----------+---------+ Error: failed to bootstrap OSD: Failed to run: ceph-osd --mkfs --no-mon-config -i 1: exit status 250 (2024-10-11T18:58:48.321+0000 7f5652ac1600 -1 bluestore(/var/lib/ceph/osd/ceph-1/block) _read_bdev_label unable to decode label /var/lib/ceph/osd/ceph-1/block at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3] 2024-10-11T18:58:48.325+0000 7f5652ac1600 -1 bluestore(/var/lib/ceph/osd/ceph-1/block) _read_bdev_label unable to decode label /var/lib/ceph/osd/ceph-1/block at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3] 2024-10-11T18:58:48.325+0000 7f5652ac1600 -1 bluestore(/var/lib/ceph/osd/ceph-1/block) _read_bdev_label unable to decode label /var/lib/ceph/osd/ceph-1/block at offset 102: void bluestore_bdev_label_t::decode(ceph::buffer::v15_2_0::list::const_iterator&) decode past end of struct encoding: Malformed input [buffer:3] 2024-10-11T18:58:48.345+0000 7f5652ac1600 -1 bdev(0x55573ad72000 /var/lib/ceph/osd/ceph-1/block) open open got: (16) Device or resource busy 2024-10-11T18:58:48.345+0000 7f5652ac1600 -1 bluestore(/var/lib/ceph/osd/ceph-1) mkfs failed, (16) Device or resource busy dmesg logs ``` Hypothesys: when `/mnt` is umount'ed, it's not done doing the lazy init which causes the jbd2 thread to stick around preventing the release of the partition? Signed-off-by: Simon Deziel --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1f4830b9..f43cb4de 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -194,8 +194,8 @@ jobs: # disable dpkg from calling sync() echo "force-unsafe-io" | sudo tee /etc/dpkg/dpkg.cfg.d/force-unsafe-io - - name: Reclaim some space (storage tests only) - if: ${{ startsWith(matrix.test, 'storage-vm') || matrix.test == 'storage-volumes-vm' || matrix.test == 'vm-nesting' || matrix.test == 'conversion' }} + - name: Reclaim some space + if: ${{ matrix.test == 'conversion' || startsWith(matrix.test, 'storage-') || matrix.test == 'vm-nesting' }} run: | set -eux df -h From 3af5d5ce577d2dd57e6acea3317ffc507ea1e5df Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 11 Oct 2024 17:02:01 -0400 Subject: [PATCH 14/19] tests/cpu-vm: remove special casing of 4.0 snap LXD 4.0 doesn't have the cpu_hotplug API extension so this test is skipped anyway. Signed-off-by: Simon Deziel --- tests/cpu-vm | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/tests/cpu-vm b/tests/cpu-vm index 145e2138..0d1b02b2 100755 --- a/tests/cpu-vm +++ b/tests/cpu-vm @@ -37,18 +37,12 @@ lxc storage create "${poolName}" "${poolDriver}" # still work lxc profile set default limits.kernel.nofile 50 -# 4.0 does not reject `limits.kernel.*` keys on VM instances -if ! echo "${LXD_SNAP_CHANNEL}" | grep -qE "^4\.0/"; then - ! lxc init v0 --vm --empty -c limits.kernel.cpu=46 -s "${poolName}" || false -fi +! lxc init v0 --vm --empty -c limits.kernel.cpu=46 -s "${poolName}" || false lxc init v0 --vm --empty -s "${poolName}" -# 4.0 does not reject `limits.kernel.*` keys on VM instances -if ! echo "${LXD_SNAP_CHANNEL}" | grep -qE "^4\.0/"; then - # limits.kernel.* only applies to containers (shouldn't work) - ! lxc config set v0 limits.kernel.as=1GiB || false -fi +# limits.kernel.* only applies to containers (shouldn't work) +! lxc config set v0 limits.kernel.as=1GiB || false lxc delete v0 From 089e2cb78a90d69929c971b2ecdcb65a1127dae1 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 11 Oct 2024 17:26:12 -0400 Subject: [PATCH 15/19] tests/devlxd-vm: consistently use IMAGE var Signed-off-by: Simon Deziel --- tests/devlxd-vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/devlxd-vm b/tests/devlxd-vm index 71f9062d..3c036bb2 100755 --- a/tests/devlxd-vm +++ b/tests/devlxd-vm @@ -115,7 +115,7 @@ if hasNeededAPIExtension devlxd_images_vm; then monitorPID="${!}" fi -lxc exec v1 -- /snap/bin/lxc launch "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1v1 --vm +lxc exec v1 -- /snap/bin/lxc launch "${IMAGE}" v1v1 --vm sleep 30 lxc exec v1 -- /snap/bin/lxc info v1v1 | grep -F RUNNING From 17be3f0b95b7bd6bc09fa36fa78e3564d5b277a4 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 11 Oct 2024 17:37:04 -0400 Subject: [PATCH 16/19] tests/vm-migration: simplify member2 init Signed-off-by: Simon Deziel --- tests/vm-migration | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/vm-migration b/tests/vm-migration index adc56609..77f6383f 100644 --- a/tests/vm-migration +++ b/tests/vm-migration @@ -96,17 +96,14 @@ fi # Create a preseed file for member2 to join member1. member2Address="$(lxc query /1.0/instances/member2?recursion=2 | jq -r ".state.network.enp5s0.addresses[0].address")" -preseed="$( - cat < Date: Fri, 11 Oct 2024 17:39:03 -0400 Subject: [PATCH 17/19] tests/vm-migration: use smaller vol1 Signed-off-by: Simon Deziel --- tests/vm-migration | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/vm-migration b/tests/vm-migration index 77f6383f..97157e11 100644 --- a/tests/vm-migration +++ b/tests/vm-migration @@ -119,7 +119,7 @@ lxc exec member1 -- lxc storage create ceph ceph --target member2 lxc exec member1 -- lxc storage create ceph ceph # Create a volume in the ceph pool to test that we can live-migrate a VM with this volume attached. -lxc exec member1 -- lxc storage volume create ceph vol1 --type=block size=500MiB +lxc exec member1 -- lxc storage volume create ceph vol1 --type=block size=64MiB # Create a VM in the cluster, on member1. lxc exec member1 -- lxc init "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1 --vm --storage ceph --target member1 -c migration.stateful=true -c limits.memory=512MiB From b69f5b355b15ebc3192ebc027235990bb206ebb2 Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Fri, 11 Oct 2024 17:41:42 -0400 Subject: [PATCH 18/19] tests/vm-migration: use smaller rootfs for v1 Signed-off-by: Simon Deziel --- tests/vm-migration | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/vm-migration b/tests/vm-migration index 97157e11..f364e079 100644 --- a/tests/vm-migration +++ b/tests/vm-migration @@ -122,7 +122,7 @@ lxc exec member1 -- lxc storage create ceph ceph lxc exec member1 -- lxc storage volume create ceph vol1 --type=block size=64MiB # Create a VM in the cluster, on member1. -lxc exec member1 -- lxc init "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1 --vm --storage ceph --target member1 -c migration.stateful=true -c limits.memory=512MiB +lxc exec member1 -- lxc init "${TEST_IMG:-ubuntu-minimal-daily:24.04}" v1 --vm --storage ceph --target member1 -c migration.stateful=true -c limits.memory=512MiB -d root,size=3584MiB # Add vol1 as a disk device to the VM. lxc exec member1 -- lxc config device add v1 vol1-disk disk pool=ceph source=vol1 From 5485dcf03784791422a7960037346af39532d30c Mon Sep 17 00:00:00 2001 From: Simon Deziel Date: Tue, 15 Oct 2024 09:09:56 -0400 Subject: [PATCH 19/19] tests/efi-vars-editor-vm: explain why waiting after the first VM launch Signed-off-by: Simon Deziel --- tests/efi-vars-editor-vm | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/efi-vars-editor-vm b/tests/efi-vars-editor-vm index 89d57352..965fea34 100755 --- a/tests/efi-vars-editor-vm +++ b/tests/efi-vars-editor-vm @@ -33,6 +33,8 @@ lxc storage create "${poolName}" "${poolDriver}" echo "==> Create VM and boot" lxc launch "${IMAGE}" v1 --vm -s "${poolName}" + +# Wait for instance to be ready ensures LXD is done initializing the NVRAM waitInstanceReady v1 lxc info v1