Skip to content

Commit

Permalink
Bikeshed name (#64)
Browse files Browse the repository at this point in the history
* bikeshed about name

* Fix bug

* Bikeshed

* Fix #56

* Wait for status_ok

* log before finally
  • Loading branch information
arianvp authored Feb 4, 2024
1 parent dd3dc2d commit b989f78
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 8 deletions.
7 changes: 4 additions & 3 deletions .github/workflows/upload-legacy-ami.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ jobs:
- name: Download AMI from Hydra
id: download_ami
run: |
out=$(curl --location --silent --header 'Accept: application/json' https://hydra.nixos.org/job/nixos/release-23.11/nixos.amazonImage.${{ matrix.system }}/latest-finished | jq --raw-output '.buildoutputs.out.path')
set -o pipefail
out=$(curl --location --silent --fail-with-body --header 'Accept: application/json' https://hydra.nixos.org/job/nixos/release-23.11/nixos.amazonImage.${{ matrix.system }}/latest-finished | jq --raw-output '.buildoutputs.out.path')
nix-store --realise "$out" --add-root ./result
echo "image_info=$out/nix-support/image-info.json" >> "$GITHUB_OUTPUT"
Expand All @@ -53,7 +54,7 @@ jobs:
images_bucket='${{ vars.IMAGES_BUCKET }}'
image_ids=$(nix run .#upload-ami -- \
--image-info "$image_info" \
--prefix "staging-legacy/" \
--prefix "smoketest/" \
--s3-bucket "$images_bucket")
echo "image_ids=$image_ids" >> "$GITHUB_OUTPUT"
Expand Down Expand Up @@ -86,7 +87,7 @@ jobs:
images_bucket='${{ vars.IMAGES_BUCKET }}'
image_ids=$(nix run .#upload-ami -- \
--image-info "$image_info" \
--prefix "legacy/" \
--prefix "nixos/" \
--s3-bucket "$images_bucket" \
--copy-to-regions \
--public)
Expand Down
9 changes: 5 additions & 4 deletions upload-ami/src/upload_ami/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,15 +188,15 @@ def upload_ami(image_info, s3_bucket, copy_to_regions, prefix, run_id, public):
s3 = boto3.client("s3")

image_file = image_info["file"]
base_name = os.path.basename(os.path.dirname(image_file))
file_name = os.path.basename(image_file)
s3_key = os.path.join(base_name, file_name)
label = image_info["label"]
system = image_info["system"]
image_name = prefix + label + "-" + system + ("." + run_id if run_id else "")
s3_key = image_name
upload_to_s3_if_not_exists(s3, s3_bucket, s3_key, image_file)

image_format = image_info.get("format") or "VHD"
snapshot_id = import_snapshot(ec2, s3_bucket, s3_key, image_format)

image_name = prefix + base_name + ("." + run_id if run_id else "")
image_id = register_image_if_not_exists(
ec2, image_name, image_info, snapshot_id, public)

Expand All @@ -211,6 +211,7 @@ def upload_ami(image_info, s3_bucket, copy_to_regions, prefix, run_id, public):
copy_image_to_regions(image_id, image_name,
ec2.meta.region_name, regions, public)
)

return image_ids


Expand Down
7 changes: 6 additions & 1 deletion upload-ami/src/upload_ami/smoke_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ def smoke_test(image_id, region, run_id, cancel):
# This basically waits for DHCP to have finished; as it uses ARP to check if the instance is healthy
logging.info(f"Waiting for instance {instance_id} to be running")
ec2.get_waiter("instance_running").wait(InstanceIds=[instance_id])
logging.info(f"Waiting for instance {instance_id} to be healthy")
ec2.get_waiter("instance_status_ok").wait(InstanceIds=[instance_id])
tries = 5
console_output = ec2.get_console_output(InstanceId=instance_id, Latest=True)
output = console_output.get("Output")
Expand All @@ -46,7 +48,10 @@ def smoke_test(image_id, region, run_id, cancel):
console_output = ec2.get_console_output(InstanceId=instance_id, Latest=True)
output = console_output.get("Output")
tries -= 1
print(output)
logging.info(f"Console output: {output}")
except Exception as e:
logging.error(f"Error: {e}")
raise
finally:
logging.info(f"Terminating instance {instance_id}")
ec2.terminate_instances(InstanceIds=[instance_id])
Expand Down

0 comments on commit b989f78

Please sign in to comment.