diff --git a/.github/workflows/upload-legacy-ami.yml b/.github/workflows/upload-legacy-ami.yml index c5e585c..32df1e4 100644 --- a/.github/workflows/upload-legacy-ami.yml +++ b/.github/workflows/upload-legacy-ami.yml @@ -34,7 +34,8 @@ jobs: - name: Download AMI from Hydra id: download_ami run: | - out=$(curl --location --silent --header 'Accept: application/json' https://hydra.nixos.org/job/nixos/release-23.11/nixos.amazonImage.${{ matrix.system }}/latest-finished | jq --raw-output '.buildoutputs.out.path') + set -o pipefail + out=$(curl --location --silent --fail-with-body --header 'Accept: application/json' https://hydra.nixos.org/job/nixos/release-23.11/nixos.amazonImage.${{ matrix.system }}/latest-finished | jq --raw-output '.buildoutputs.out.path') nix-store --realise "$out" --add-root ./result echo "image_info=$out/nix-support/image-info.json" >> "$GITHUB_OUTPUT" @@ -53,7 +54,7 @@ jobs: images_bucket='${{ vars.IMAGES_BUCKET }}' image_ids=$(nix run .#upload-ami -- \ --image-info "$image_info" \ - --prefix "staging-legacy/" \ + --prefix "smoketest/" \ --s3-bucket "$images_bucket") echo "image_ids=$image_ids" >> "$GITHUB_OUTPUT" @@ -86,7 +87,7 @@ jobs: images_bucket='${{ vars.IMAGES_BUCKET }}' image_ids=$(nix run .#upload-ami -- \ --image-info "$image_info" \ - --prefix "legacy/" \ + --prefix "nixos/" \ --s3-bucket "$images_bucket" \ --copy-to-regions \ --public) diff --git a/upload-ami/src/upload_ami/cli.py b/upload-ami/src/upload_ami/cli.py index 4550876..84bef8e 100644 --- a/upload-ami/src/upload_ami/cli.py +++ b/upload-ami/src/upload_ami/cli.py @@ -188,15 +188,15 @@ def upload_ami(image_info, s3_bucket, copy_to_regions, prefix, run_id, public): s3 = boto3.client("s3") image_file = image_info["file"] - base_name = os.path.basename(os.path.dirname(image_file)) - file_name = os.path.basename(image_file) - s3_key = os.path.join(base_name, file_name) + label = image_info["label"] + system = image_info["system"] + image_name = prefix + label + "-" + system + ("." + run_id if run_id else "") + s3_key = image_name upload_to_s3_if_not_exists(s3, s3_bucket, s3_key, image_file) image_format = image_info.get("format") or "VHD" snapshot_id = import_snapshot(ec2, s3_bucket, s3_key, image_format) - image_name = prefix + base_name + ("." + run_id if run_id else "") image_id = register_image_if_not_exists( ec2, image_name, image_info, snapshot_id, public) @@ -211,6 +211,7 @@ def upload_ami(image_info, s3_bucket, copy_to_regions, prefix, run_id, public): copy_image_to_regions(image_id, image_name, ec2.meta.region_name, regions, public) ) + return image_ids diff --git a/upload-ami/src/upload_ami/smoke_test.py b/upload-ami/src/upload_ami/smoke_test.py index 45ee306..a17e4b6 100644 --- a/upload-ami/src/upload_ami/smoke_test.py +++ b/upload-ami/src/upload_ami/smoke_test.py @@ -35,6 +35,8 @@ def smoke_test(image_id, region, run_id, cancel): # This basically waits for DHCP to have finished; as it uses ARP to check if the instance is healthy logging.info(f"Waiting for instance {instance_id} to be running") ec2.get_waiter("instance_running").wait(InstanceIds=[instance_id]) + logging.info(f"Waiting for instance {instance_id} to be healthy") + ec2.get_waiter("instance_status_ok").wait(InstanceIds=[instance_id]) tries = 5 console_output = ec2.get_console_output(InstanceId=instance_id, Latest=True) output = console_output.get("Output") @@ -46,7 +48,10 @@ def smoke_test(image_id, region, run_id, cancel): console_output = ec2.get_console_output(InstanceId=instance_id, Latest=True) output = console_output.get("Output") tries -= 1 - print(output) + logging.info(f"Console output: {output}") + except Exception as e: + logging.error(f"Error: {e}") + raise finally: logging.info(f"Terminating instance {instance_id}") ec2.terminate_instances(InstanceIds=[instance_id])