1
0
mirror of https://github.com/containers/netavark.git synced 2026-02-05 15:45:47 +01:00
Files
netavark/.cirrus.yml
2025-11-20 16:05:08 +01:00

284 lines
10 KiB
YAML

---
# Format Ref: https://cirrus-ci.org/guide/writing-tasks/
# Main collection of env. vars to set for all tasks and scripts.
env:
# Actual|intended branch for this run
DEST_BRANCH: "main"
# The default is 'sh' if unspecified
CIRRUS_SHELL: "/bin/bash"
# Location where source repo. will be cloned
CIRRUS_WORKING_DIR: "/var/tmp/netavark"
# Rust package cache also lives here
CARGO_HOME: "/var/cache/cargo"
# Rust compiler output lives here (see Makefile)
CARGO_TARGET_DIR: "$CIRRUS_WORKING_DIR/targets"
# Save a little typing (path relative to $CIRRUS_WORKING_DIR)
SCRIPT_BASE: "./contrib/cirrus"
IMAGE_SUFFIX: "c20251120t131229z-f42f41d14"
FEDORA_NETAVARK_IMAGE: "fedora-netavark-${IMAGE_SUFFIX}"
AARDVARK_DNS_BRANCH: "main"
AARDVARK_DNS_URL: "https://api.cirrus-ci.com/v1/artifact/github/containers/aardvark-dns/success/binary.zip?branch=${AARDVARK_DNS_BRANCH}"
FEDORA_NETAVARK_AARCH64_AMI: "fedora-netavark-aws-arm64-${IMAGE_SUFFIX}"
EC2_INST_TYPE: "t4g.xlarge"
NETAVARK_UPSTREAM: "1"
gcp_credentials: ENCRYPTED[d6efdb7d6d4c61e3831df2193ca6348bb02f26cd931695f69d41930b1965f7dab72a838ca0902f6ed8cde66c7deddae2]
aws_credentials: ENCRYPTED[9f65dd26c4b7dc54c72ce2061d22e6719c8c39cc452456e4ca02001039ad8ceee93ad35ed1782d87d71e340af94efa79]
build_task:
alias: "build"
# Compiling is very CPU intensive, make it chooch quicker for this task only
gce_instance: &standard_build_gce_x86_64
image_project: "libpod-218412"
zone: "us-central1-c"
disk: 200 # GB, do not set <200 per gcloud warning re: I/O performance
cpu: 8
memory: "8Gb"
image_name: "${FEDORA_NETAVARK_IMAGE}"
cargo_cache: &cargo_cache
# Populating this cache depends on execution of setup.sh, and runner.sh
# to builds of all release, debug, plus unit-tests.
folder: "$CARGO_HOME"
# Cirrus-CI will automatically store separate caches for branches vs PRs.
# We use the branch-name here mainly to distinguish PR-level caches in
# order to properly support backport-PRs to release branches. Otherwise
# all PRs & branches will share caches with other PRs and branches
# for a given $DEST_BRANCH and vX value. Adjust vX if cache schema
# changes.
fingerprint_script: echo -e "cargo_v3_${DEST_BRANCH}_amd64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
# Required to be set explicitly since fingerprint_key is also set
reupload_on_changes: true
targets_cache: &targets_cache
# Similar to cargo_cache, but holds the actual compiled dependent artifacts.
# This should be scoped to a hash of the dependency-metadata lock file.
# Cirrus-CI will automatically use separate caches for PRs and branches.
folder: "$CARGO_TARGET_DIR"
fingerprint_script: echo -e "targets_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_amd64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
reupload_on_changes: true
bin_cache: &bin_cache
# This simply prevents rebuilding bin/netavark for every subsequent task.
folder: "$CIRRUS_WORKING_DIR/bin"
# Avoid binary pollution by scoping this to only this specific build.
# Adjust vX if cache schema changes.
fingerprint_key: "bin_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_amd64" # Cache only within same tag, branch, or PR (branch will be 'pull/#')
reupload_on_changes: true
setup_script: &setup "$SCRIPT_BASE/setup.sh"
main_script: &main "$SCRIPT_BASE/runner.sh $CIRRUS_TASK_NAME"
postbuild_script: &postbuild hack/tree_status.sh
cache_grooming_script: &groom bash "$SCRIPT_BASE/cache_groom.sh"
upload_caches: [ "cargo", "targets", "bin" ]
build_aarch64_task:
alias: "build_aarch64"
ec2_instance: &standard_build_ec2_aarch64
image: "${FEDORA_NETAVARK_AARCH64_AMI}"
type: $EC2_INST_TYPE
region: us-east-1
architecture: arm64 # CAUTION: This has to be "arm64", not "aarch64"
cargo_cache: &cargo_cache_aarch64
folder: "$CARGO_HOME"
# N/B: Should exactly match (except for arch) line from build_task (above).
# (No, there isn't an easy way to not duplicate most of this :()
fingerprint_script: echo -e "cargo_v3_${DEST_BRANCH}_aarch64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
reupload_on_changes: true
targets_cache: &targets_cache_aarch64
folder: "$CARGO_TARGET_DIR"
# N/B: Should exactly match (except for arch) line from build_task (above).
# (No, there isn't an easy way to not duplicate most of this :()
fingerprint_script: echo -e "targets_v3_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_aarch64\n---\n$(<Cargo.lock)\n---\n$(<Cargo.toml)"
reupload_on_changes: true
bin_cache: &bin_cache_aarch64
# This simply prevents rebuilding bin/netavark for every subsequent task.
folder: "$CIRRUS_WORKING_DIR/bin"
fingerprint_key: "bin_v2_${CIRRUS_TAG}${DEST_BRANCH}${CIRRUS_PR}_aarch64" # Cache only within same tag, branch, or PR (branch will be 'pull/#')
reupload_on_changes: true
setup_script: *setup
main_script: *main
postbuild_script: *postbuild
cache_grooming_script: *groom
upload_caches: [ "cargo", "targets", "bin" ]
# Downstream CI needs the aarch64 binaries from this CI system.
# However, we don't want to confuse architectures.
art_prep_script: |
cd bin
ls -la
for filename in ./*; do
mv "$filename" "${filename}.$(uname -m)-unknown-linux-gnu"
done
armbinary_artifacts: # See success_task
path: ./bin/netavark*
validate_task:
alias: "validate"
depends_on:
- "build"
gce_instance: &standard_gce_x86_64
<<: *standard_build_gce_x86_64
cpu: 8
memory: "8Gb"
# From this point forward, all cache's become read-only - meaning
# any changes made in this task aren't re-uploaded to the cache.
# This avoids some flapping between tasks, along with the upload time.
cargo_cache: &ro_cargo_cache
<<: *cargo_cache
reupload_on_changes: false
targets_cache: &ro_targets_cache
<<: *targets_cache
reupload_on_changes: false
bin_cache: &ro_bin_cache
<<: *bin_cache
reupload_on_changes: false
setup_script: *setup
main_script: *main
validate_aarch64_task:
alias: "validate_aarch64"
depends_on:
- "build_aarch64"
ec2_instance: *standard_build_ec2_aarch64
cargo_cache: &ro_cargo_cache_aarch64
<<: *cargo_cache_aarch64
reupload_on_changes: false
targets_cache: &ro_targets_cache_aarch64
<<: *targets_cache_aarch64
reupload_on_changes: false
bin_cache: &ro_bin_cache_aarch64
<<: *bin_cache_aarch64
reupload_on_changes: false
setup_script: *setup
main_script: *main
unit_task:
alias: "unit"
depends_on:
- "build"
gce_instance: *standard_gce_x86_64
cargo_cache: *ro_cargo_cache
targets_cache: *ro_targets_cache
bin_cache: *ro_bin_cache
setup_script: *setup
main_script: *main
unit_aarch64_task:
alias: "unit_aarch64"
depends_on:
- "build_aarch64"
ec2_instance: *standard_build_ec2_aarch64
cargo_cache: *ro_cargo_cache_aarch64
targets_cache: *ro_targets_cache_aarch64
bin_cache: *ro_bin_cache_aarch64
setup_script: *setup
main_script: *main
integration_task:
alias: "integration"
depends_on:
- "unit"
gce_instance: *standard_gce_x86_64
cargo_cache: *ro_cargo_cache
targets_cache: *ro_targets_cache
bin_cache: *ro_bin_cache
setup_script: *setup
main_script: *main
integration_aarch64_task:
alias: "integration_aarch64"
depends_on:
- "unit_aarch64"
ec2_instance: *standard_build_ec2_aarch64
cargo_cache: *ro_cargo_cache_aarch64
targets_cache: *ro_targets_cache_aarch64
bin_cache: *ro_bin_cache_aarch64
setup_script: *setup
main_script: *main
# This task is critical. It updates the "last-used by" timestamp stored
# in metadata for all VM images. This mechanism functions in tandem with
# an out-of-band pruning operation to remove disused VM images.
meta_task:
alias: meta
name: "VM img. keepalive"
container:
cpu: 2
memory: 2
image: quay.io/libpod/imgts:latest
env:
# Space-separated list of images used by this repository state
IMGNAMES: "${FEDORA_NETAVARK_IMAGE}"
EC2IMGNAMES: "${FEDORA_NETAVARK_AARCH64_AMI}"
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_REPO_NAME}"
AWSINI: ENCRYPTED[47c697527e37e5016df8f14473b160200324532e0f1af8d2deeb28f4f92810b69bc42e661df0dc9e614bbef399552ce8]
GCPJSON: ENCRYPTED[e7e6e13b98eb34f480a12412a048e3fb78a02239c229659e136b7a27e2ab25a5bbb61ab6016e322cb6f777fa2c9f9520]
GCPNAME: ENCRYPTED[f3fc6da8fe283ef506d7b18467a81153ea8e18b1d3cd76e79dcd6f566f20fdd3651522432d3d232f4d69eeb1502d1f6b]
GCPPROJECT: libpod-218412
clone_script: &noop mkdir -p $CIRRUS_WORKING_DIR # source not needed
script: /usr/local/bin/entrypoint.sh
msrv_build_task:
alias: msrv_build
depends_on:
- "build"
gce_instance: *standard_gce_x86_64
container:
cpu: 2 # Do not increase, will result in scheduling delays
memory: "8Gb"
# When bumping the image always remember to update the README MSRV as well.
image: quay.io/libpod/nv-rust:1.86
script:
- make build
success_task:
name: "Total success"
alias: success
depends_on:
- "build"
- "build_aarch64"
- "validate"
- "validate_aarch64"
- "unit"
- "unit_aarch64"
- "integration"
- "integration_aarch64"
- "meta"
- "msrv_build"
gce_instance: *standard_gce_x86_64
env:
API_URL_BASE: "https://api.cirrus-ci.com/v1/artifact/build/${CIRRUS_BUILD_ID}"
EXP_BINS: >-
netavark
netavark.info
netavark.aarch64-unknown-linux-gnu
netavark.info.aarch64-unknown-linux-gnu
clone_script: *noop
bin_cache: *ro_bin_cache
# The paths used for uploaded artifacts are relative here and in Cirrus
script:
- set -x
- curl --fail --location -O --url ${API_URL_BASE}/build_aarch64/armbinary.zip
- unzip armbinary.zip
- rm -f armbinary.zip
- mv bin/* ./
- rm -rf bin
artifacts_test_script: # Other CI systems depend on all files being present
- ls -la
# If there's a missing file, show what it was in the output
- for fn in $EXP_BINS; do [[ -r "$(echo $fn|tee /dev/stderr)" ]] || exit 1; done
# Upload tested binary for consumption downstream
# https://cirrus-ci.org/guide/writing-tasks/#artifacts-instruction
binary_artifacts:
path: ./*netavark*