1
0
mirror of https://github.com/containers/bootc.git synced 2026-02-05 06:45:13 +01:00

test: add integration test

Signed-off-by: Xiaofeng Wang <henrywangxf@me.com>
This commit is contained in:
Xiaofeng Wang
2024-02-24 17:25:48 +08:00
parent 5f58cb3147
commit cd7e62d5aa
16 changed files with 1233 additions and 0 deletions

1
.fmf/version Normal file
View File

@@ -0,0 +1 @@
1

120
.github/workflows/integration.yml vendored Normal file
View File

@@ -0,0 +1,120 @@
---
name: Integration Test
permissions:
pull-requests: read
contents: read
statuses: write
# Running testing farm needs TF_API_KEY secret available inside the forked repo.
# So the pull_request_target trigger has to be used in this case. To protect the
# secrets this workflow has a PR sender permission checking at first job. Only
# collaborator with repo write or admin permission can run this workflow.
on:
pull_request_target:
types: [opened, synchronize, reopened]
env:
AWS_REGION: us-east-1
jobs:
pr-info:
runs-on: ubuntu-latest
steps:
- name: Query author repository permissions
uses: octokit/request-action@v2.x
id: user_permission
with:
route: GET /repos/${{ github.repository }}/collaborators/${{ github.event.sender.login }}/permission
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# restrict running of tests to users with admin or write permission for the repository
# see https://docs.github.com/en/rest/collaborators/collaborators?apiVersion=2022-11-28#get-repository-permissions-for-a-user
- name: Check if user does have correct permissions
if: contains('admin write', fromJson(steps.user_permission.outputs.data).permission)
id: check_user_perm
run: |
echo "User '${{ github.event.sender.login }}' has permission '${{ fromJson(steps.user_permission.outputs.data).permission }}' allowed values: 'admin', 'write'"
echo "allowed_user=true" >> $GITHUB_OUTPUT
- name: Get information for pull request
uses: octokit/request-action@v2.x
id: pr-api
with:
route: GET /repos/${{ github.repository }}/pulls/${{ github.event.number }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
outputs:
allowed_user: ${{ steps.check_user_perm.outputs.allowed_user }}
sha: ${{ fromJson(steps.pr-api.outputs.data).head.sha }}
ref: ${{ fromJson(steps.pr-api.outputs.data).head.ref }}
repo_url: ${{ fromJson(steps.pr-api.outputs.data).head.repo.html_url }}
rhel94-integration:
needs: pr-info
if: ${{ needs.pr-info.outputs.allowed_user == 'true' }}
continue-on-error: true
strategy:
matrix:
arch: [x86_64, aarch64]
platform: [aws]
runs-on: ubuntu-latest
steps:
- name: Clone repository
uses: actions/checkout@v4
with:
ref: ${{ needs.pr-info.outputs.sha }}
fetch-depth: 0
- name: Run the tests
uses: sclorg/testing-farm-as-github-action@v1
with:
compose: CentOS-Stream-9
api_key: ${{ secrets.TF_API_KEY }}
git_url: ${{ needs.pr-info.outputs.repo_url }}
git_ref: ${{ needs.pr-info.outputs.ref }}
arch: ${{ matrix.arch }}
tmt_context: "arch=${{ matrix.arch }}"
update_pull_request_status: true
pull_request_status_name: "Integration-rhel94-${{ matrix.arch }}-${{ matrix.platform }}"
tmt_plan_regex: "${{ matrix.platform }}"
tf_scope: private
secrets: "QUAY_USERNAME=${{ secrets.QUAY_USERNAME }};QUAY_PASSWORD=${{ secrets.QUAY_PASSWORD }};QUAY_SECRET=${{ secrets.QUAY_SECRET }};RHEL_REGISTRY_URL=${{ secrets.RHEL_REGISTRY_URL }};DOWNLOAD_NODE=${{ secrets.DOWNLOAD_NODE }};AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }};AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }}"
variables: "TEST_OS=rhel-9-4;PLATFORM=${{ matrix.platform }};ARCH=${{ matrix.arch }};AWS_REGION=${{ env.AWS_REGION }}"
cs9-dev-integration:
needs: pr-info
if: ${{ needs.pr-info.outputs.allowed_user == 'true' }}
continue-on-error: true
strategy:
matrix:
arch: [x86_64, aarch64]
platform: [aws]
runs-on: ubuntu-latest
steps:
- name: Clone repository
uses: actions/checkout@v4
with:
ref: ${{ needs.pr-info.outputs.sha }}
fetch-depth: 0
- name: Run the tests
uses: sclorg/testing-farm-as-github-action@v1
with:
compose: CentOS-Stream-9
api_key: ${{ secrets.TF_API_KEY }}
git_url: ${{ needs.pr-info.outputs.repo_url }}
git_ref: ${{ needs.pr-info.outputs.ref }}
arch: ${{ matrix.arch }}
tmt_context: "arch=${{ matrix.arch }}"
update_pull_request_status: true
pull_request_status_name: "Integration-cs9-dev-${{ matrix.arch }}-${{ matrix.platform }}"
tmt_plan_regex: "${{ matrix.platform }}"
tf_scope: private
secrets: "QUAY_USERNAME=${{ secrets.QUAY_USERNAME }};QUAY_PASSWORD=${{ secrets.QUAY_PASSWORD }};QUAY_SECRET=${{ secrets.QUAY_SECRET }};AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }};AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }}"
variables: "TEST_OS=centos-stream-9;PLATFORM=${{ matrix.platform }};ARCH=${{ matrix.arch }};AWS_REGION=${{ env.AWS_REGION }}"

35
plans/install-upgrade.fmf Normal file
View File

@@ -0,0 +1,35 @@
discover:
how: fmf
test: install-upgrade
prepare:
- how: install
package:
- ansible-core
- gcc
- podman
- skopeo
- jq
- python3-devel
- unzip
execute:
how: tmt
/aws:
summary: Run bootc install and upgrade test on aws
tag: aws
environment+:
PLATFORM: aws
discover+:
test:
- /rpm-build
- /bootc-install-upgrade
adjust+:
- when: arch != x86_64 and arch != aarch64
enabled: false
prepare+:
- how: shell
script: |
pip install boto3 botocore
ansible-galaxy collection install amazon.aws community.general ansible.posix
- how: shell
script: curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip" && unzip awscliv2.zip && sudo ./aws/install

View File

@@ -0,0 +1,70 @@
## Integration Test
### Scenarios
Integration test includes two scenarios, `RPM build` and `bootc install/upgrade`.
1. RPM build scenario will build RPM for RHEL 9, CentOS Stream 9, and Fedora with mock.
2. bootc install/upgrade scenario will install and upgrade bootc image and have some system checking, such as check mount point/permission, run podman with root and rootless, check persistent log.
#### Run RPM Build Test
```shell
podman run --rm --privileged -v ./:/workdir:z -e TEST_OS=$TEST_OS -e ARCH=$ARCH -e RHEL_REGISTRY_URL=$RHEL_REGISTRY_URL -e DOWNLOAD_NODE=$DOWNLOAD_NODE --workdir /workdir quay.io/fedora/fedora:40 ./tests/integration/mockbuild.sh
```
#### Run Integartion Test
Run on a shared test infrastructure using the [`testing farm`](https://docs.testing-farm.io/Testing%20Farm/0.1/cli.html) tool. For example, running on AWS.
Run `testing-farm` CLI from `quay.io/testing-farm/cli` container. Don't forget export the `TESTING_FARM_API_TOKEN` in your environment. To run RHEL test, `Red Hat Ranch` has to be used.
```shell
export TESTING_FARM_API_TOKEN=<your-token>
testing-farm request \
--plan "aws" \
--environment PLATFORM=$PLATFORM \
--environment ARCH=$ARCH \
--environment TEST_OS=$TEST_OS \
--environment AWS_REGION=us-east-1 \
--secret DOWNLOAD_NODE=$DOWNLOAD_NODE \
--secret RHEL_REGISTRY_URL=$RHEL_REGISTRY_URL \
--secret CERT_URL=$CERT_URL \
--secret QUAY_USERNAME=$QUAY_USERNAME \
--secret QUAY_PASSWORD=$QUAY_PASSWORD \
--secret QUAY_SECRET=$QUAY_SECRET \
--secret AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
--secret AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
--git-url <PR URL> \
--git-ref <PR branch> \
--compose "CentOS-Stream-9" \
--arch $ARCH \
--context "arch=$ARCH" \
--timeout "120"
```
* AWS test needs environment variables `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION=us-east-1` have to be configured.
### Required environment variables
TEST_OS The OS to run the tests in. Currently supported values:
"rhel-9-4"
"centos-stream-9"
ARCH Test architecture
"x86_64"
"aarch64"
PLATFORM Run test on:
"aws"
QUAY_USERNAME quay.io username
QUAY_PASSWORD quay.io password
QUAY_SECRET Save into /etc/ostree/auth.json for authenticated registry
DOWNLOAD_NODE RHEL nightly compose download URL
RHEL_REGISTRY_URL RHEL bootc image URL
CERT_URL CA certificate download URL
AWS_ACCESS_KEY_ID AWS access key id
AWS_SECRET_ACCESS_KEY AWS secrety key
AWS_REGION AWS region
"us-east-1" RHEL AWS EC2 image is only available in this region
TESTING_FARM_API_TOKEN Required by Testing Farm API

View File

@@ -0,0 +1,7 @@
{
"auths": {
"quay.io": {
"auth": "REPLACE_ME"
}
}
}

View File

@@ -0,0 +1,10 @@
[rhel-9x-baseos]
baseurl=http://REPLACE_ME/rhel-9/nightly/RHEL-9/REPLACE_COMPOSE_ID/compose/BaseOS/$basearch/os/
enabled=1
gpgcheck=0
[rhel-9x-appstream]
baseurl=http://REPLACE_ME/rhel-9/nightly/RHEL-9/REPLACE_COMPOSE_ID/compose/AppStream/$basearch/os/
enabled=1
gpgcheck=0

View File

@@ -0,0 +1,9 @@
/rpm-build:
summary: bootc rpm build test
test: podman run --rm --privileged -v ../../:/workdir:z -e TEST_OS=$TEST_OS -e ARCH=$ARCH -e RHEL_REGISTRY_URL=$RHEL_REGISTRY_URL -e DOWNLOAD_NODE=$DOWNLOAD_NODE --workdir /workdir quay.io/fedora/fedora:40 ./tests/integration/mockbuild.sh
duration: 40m
/bootc-install-upgrade:
summary: bootc install and upgrade test
test: ./install-upgrade.sh
duration: 40m

View File

@@ -0,0 +1,176 @@
#!/bin/bash
set -exuo pipefail
# Colorful timestamped output.
function greenprint {
echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
}
function redprint {
echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m"
}
TEMPDIR=$(mktemp -d)
trap 'rm -rf -- "$TEMPDIR"' EXIT
# SSH configurations
SSH_KEY=${TEMPDIR}/id_rsa
ssh-keygen -f "${SSH_KEY}" -N "" -q -t rsa-sha2-256 -b 2048
SSH_KEY_PUB="${SSH_KEY}.pub"
INSTALL_CONTAINERFILE=${TEMPDIR}/Containerfile.install
UPGRADE_CONTAINERFILE=${TEMPDIR}/Containerfile.upgrade
QUAY_REPO_TAG="${QUAY_REPO_TAG:-$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '')}"
INVENTORY_FILE="${TEMPDIR}/inventory"
REPLACE_CLOUD_USER=""
case "$TEST_OS" in
"rhel-9-4")
IMAGE_NAME="rhel9-rhel_bootc"
TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/${IMAGE_NAME}:rhel-9.4"
SSH_USER="cloud-user"
CURRENT_COMPOSE_RHEL94=$(skopeo inspect --tls-verify=false "docker://${TIER1_IMAGE_URL}" | jq -r '.Labels."redhat.compose-id"')
sed "s/REPLACE_ME/${DOWNLOAD_NODE}/; s/REPLACE_COMPOSE_ID/${CURRENT_COMPOSE_RHEL94}/" files/rhel-9.template | tee rhel-9.repo > /dev/null
ADD_REPO="COPY rhel-9.repo /etc/yum.repos.d/rhel-9.repo"
if [[ "$PLATFORM" == "aws" ]]; then
SSH_USER="ec2-user"
REPLACE_CLOUD_USER='RUN sed -i "s/name: cloud-user/name: ec2-user/g" /etc/cloud/cloud.cfg'
fi
greenprint "Prepare cloud-init file"
tee -a "playbooks/user-data" > /dev/null << EOF
#cloud-config
yum_repos:
rhel-9x-baseos:
name: rhel-9x-baseos
baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/BaseOS/${ARCH}/os/
enabled: true
gpgcheck: false
rhel-9x-appstream:
name: rhel-9x-appstream
baseurl: http://${DOWNLOAD_NODE}/rhel-9/nightly/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/AppStream/${ARCH}/os/
enabled: true
gpgcheck: false
EOF
;;
"centos-stream-9")
IMAGE_NAME="centos-bootc-dev"
TIER1_IMAGE_URL="quay.io/centos-bootc/${IMAGE_NAME}:stream9"
SSH_USER="cloud-user"
ADD_REPO=""
if [[ "$PLATFORM" == "aws" ]]; then
SSH_USER="ec2-user"
REPLACE_CLOUD_USER='RUN sed -i "s/name: cloud-user/name: ec2-user/g" /etc/cloud/cloud.cfg'
fi
;;
*)
redprint "Variable TEST_OS has to be defined"
exit 1
;;
esac
TEST_IMAGE_NAME="${IMAGE_NAME}-test"
TEST_IMAGE_URL="quay.io/redhat_emp1/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}"
sed "s/REPLACE_ME/${QUAY_SECRET}/g" files/auth.template | tee auth.json > /dev/null
greenprint "Create $TEST_OS installation Containerfile"
tee "$INSTALL_CONTAINERFILE" > /dev/null << EOF
FROM "$TIER1_IMAGE_URL"
$ADD_REPO
COPY build/bootc-2*.${ARCH}.rpm .
RUN dnf -y update ./bootc-2*.${ARCH}.rpm && \
rm -f ./bootc-2*.${ARCH}.rpm
RUN dnf -y install python3 cloud-init && \
dnf -y clean all
COPY auth.json /etc/ostree/auth.json
$REPLACE_CLOUD_USER
EOF
greenprint "Check $TEST_OS installation Containerfile"
cat "$INSTALL_CONTAINERFILE"
greenprint "Login quay.io"
podman login -u "${QUAY_USERNAME}" -p "${QUAY_PASSWORD}" quay.io
greenprint "Build $TEST_OS installation container image"
podman build --tls-verify=false -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$INSTALL_CONTAINERFILE" .
greenprint "Push $TEST_OS installation container image"
podman push "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL"
greenprint "Prepare inventory file"
tee -a "$INVENTORY_FILE" > /dev/null << EOF
[cloud]
localhost
[guest]
[cloud:vars]
ansible_connection=local
[guest:vars]
ansible_user="$SSH_USER"
ansible_private_key_file="$SSH_KEY"
ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
[all:vars]
ansible_python_interpreter=/usr/bin/python3
EOF
greenprint "Prepare ansible.cfg"
export ANSIBLE_CONFIG="playbooks/ansible.cfg"
greenprint "Deploy $PLATFORM instance"
ansible-playbook -v \
-i "$INVENTORY_FILE" \
-e ssh_key_pub="$SSH_KEY_PUB" \
-e inventory_file="$INVENTORY_FILE" \
"playbooks/deploy-${PLATFORM}.yaml"
greenprint "Install $TEST_OS bootc system"
ansible-playbook -v \
-i "$INVENTORY_FILE" \
-e test_image_url="$TEST_IMAGE_URL" \
playbooks/install.yaml
greenprint "Run ostree checking test on $PLATFORM instance"
ansible-playbook -v \
-i "$INVENTORY_FILE" \
-e bootc_image="$TEST_IMAGE_URL" \
playbooks/check-system.yaml
greenprint "Create upgrade Containerfile"
tee "$UPGRADE_CONTAINERFILE" > /dev/null << EOF
FROM "$TEST_IMAGE_URL"
RUN dnf -y install wget && \
dnf -y clean all
EOF
greenprint "Build $TEST_OS upgrade container image"
podman build --tls-verify=false -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$UPGRADE_CONTAINERFILE" .
greenprint "Push $TEST_OS upgrade container image"
podman push "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL"
greenprint "Upgrade $TEST_OS system"
ansible-playbook -v \
-i "$INVENTORY_FILE" \
playbooks/upgrade.yaml
greenprint "Run ostree checking test after upgrade on $PLATFORM instance"
ansible-playbook -v \
-i "$INVENTORY_FILE" \
-e bootc_image="$TEST_IMAGE_URL" \
-e upgrade="true" \
playbooks/check-system.yaml
greenprint "Remove $PLATFORM instance"
ansible-playbook -v \
-i "$INVENTORY_FILE" \
-e platform="$PLATFORM" \
playbooks/remove.yaml
greenprint "Clean up"
rm -rf auth.json rhel-9-4.repo
unset ANSIBLE_CONFIG
greenprint "🎉 All tests passed."
exit 0

83
tests/integration/mockbuild.sh Executable file
View File

@@ -0,0 +1,83 @@
#!/bin/bash
set -exuo pipefail
# Colorful output.
function greenprint {
echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m"
}
function redprint {
echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m"
}
greenprint "📥 Install required packages"
sudo dnf install -y cargo zstd git openssl-devel ostree-devel rpm-build mock podman skopeo jq
cargo install cargo-vendor-filterer
greenprint "⛏ Build archive"
cargo xtask package-srpm
greenprint "📋 Get target tmp folder path"
shopt -s extglob
TARGET_FOLDER=(target/.tmp*)
case "$TEST_OS" in
"rhel-9-4")
TEMPLATE="rhel-9.tpl"
greenprint "📝 update mock rhel-9 template"
# disable subscription for nightlies
sudo sed -i "s/config_opts\['redhat_subscription_required'\] = True/config_opts['redhat_subscription_required'] = False/" /etc/mock/templates/"$TEMPLATE"
# delete default cdn compose and add nightly compose
IMAGE_NAME="rhel9-rhel_bootc"
TIER1_IMAGE_URL="${RHEL_REGISTRY_URL}/${IMAGE_NAME}:rhel-9.4"
CURRENT_COMPOSE_RHEL94=$(skopeo inspect --tls-verify=false "docker://${TIER1_IMAGE_URL}" | jq -r '.Labels."redhat.compose-id"')
sudo sed -i '/user_agent/q' /etc/mock/templates/"$TEMPLATE"
sudo tee -a /etc/mock/templates/"$TEMPLATE" > /dev/null << EOF
[BaseOS]
name=Red Hat Enterprise Linux - BaseOS
baseurl=http://${DOWNLOAD_NODE}/rhel-9/nightly/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/BaseOS/\$basearch/os/
enabled=1
gpgcheck=0
[AppStream]
name=Red Hat Enterprise Linux - AppStream
baseurl=http://${DOWNLOAD_NODE}/rhel-9/nightly/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/AppStream/\$basearch/os/
enabled=1
gpgcheck=0
[CRB]
name = Red Hat Enterprise Linux - CRB
baseurl = http://${DOWNLOAD_NODE}/rhel-9/nightly/RHEL-9/${CURRENT_COMPOSE_RHEL94}/compose/CRB/\$basearch/os/
enabled = 1
gpgcheck = 0
"""
EOF
MOCK_CONFIG="rhel-9-${ARCH}"
;;
"centos-stream-9")
MOCK_CONFIG="centos-stream-9-${ARCH}"
;;
*)
redprint "Variable TEST_OS has to be defined"
exit 1
;;
esac
greenprint "🧬 Using mock config: ${MOCK_CONFIG}"
greenprint "✏ Adding user to mock group"
sudo usermod -a -G mock "$(whoami)"
greenprint "🎁 Building SRPM"
mock -r "$MOCK_CONFIG" --buildsrpm \
--spec "${TARGET_FOLDER[0]}/bootc.spec" \
--config-opts=cleanup_on_failure=False \
--config-opts=cleanup_on_success=True \
--sources "${TARGET_FOLDER[0]}" \
--resultdir ./tests/integration/build
greenprint "🎁 Building RPMs"
mock -r "$MOCK_CONFIG" \
--config-opts=cleanup_on_failure=False \
--config-opts=cleanup_on_success=True \
--resultdir "./tests/integration/build" \
./tests/integration/build/*.src.rpm

View File

@@ -0,0 +1,8 @@
[defaults]
timeout = 30
# human-readable stdout/stderr results display
stdout_callback = yaml
[ssh_connection]
# scp_if_ssh=True
pipelining=True

View File

@@ -0,0 +1,366 @@
---
- hosts: guest
become: false
vars:
bootc_image: ""
upgrade: ""
total_counter: "0"
failed_counter: "0"
tasks:
# current target host's IP address
- debug: var=ansible_all_ipv4_addresses
- debug: var=ansible_facts['distribution_version']
- debug: var=ansible_facts['distribution']
- debug: var=ansible_facts['architecture']
- name: check bios or uefi
stat:
path: /sys/firmware/efi
- name: check secure boot status
command: mokutil --sb-state
ignore_errors: true
- name: check tpm device
stat:
path: /dev/tpm0
ignore_errors: true
- name: check partition size
command: df -Th
ignore_errors: true
become: true
- name: check disk partition table
command: fdisk -l
ignore_errors: true
become: true
- name: check mount table
command: findmnt
ignore_errors: true
- name: check rpm-ostree status
command: rpm-ostree status
ignore_errors: true
- name: check bootc status
command: bootc status
ignore_errors: true
become: true
- name: check ostree finalize staged log
command: journalctl -b -1 -u ostree-finalize-staged.service
ignore_errors: true
become: true
# case: check installed container image
- name: get installed container image
shell: bootc status --json | jq -r '.status.booted.image.image.image'
register: result_bootc_status
become: true
- set_fact:
installed_image: "{{ result_bootc_status.stdout }}"
- name: check commit deployed and built
block:
- assert:
that:
- installed_image == bootc_image
fail_msg: "{{ bootc_image }} IS NOT installed"
success_msg: "{{ bootc_image }} installed"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check ostree-remount service status
- name: check ostree-remount service status
command: systemctl is-active ostree-remount.service
register: result_remount
- name: ostree-remount should be started
block:
- assert:
that:
- result_remount.stdout == "active"
fail_msg: "ostree-remount is not started by default"
success_msg: "starting ostree-remount successful"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
- name: set mount point device name
command: findmnt -r -o SOURCE -n /sysroot
register: result_sysroot_source
- set_fact:
device_name: "{{ result_sysroot_source.stdout }}"
- name: get ostree osname
shell: rpm-ostree status --json | jq -r '.deployments[0].osname'
register: result_osname
- set_fact:
osname: "{{ result_osname.stdout }}"
- name: get ostree checksum
shell: bootc status --json | jq -r '.status.booted.ostree.checksum'
register: result_ostree_checksum
become: true
- set_fact:
ostree_checksum: "{{ result_ostree_checksum.stdout }}"
# case: check /sysroot mount status
- name: check /sysroot mount status
shell: findmnt -r -o OPTIONS -n /sysroot | awk -F "," '{print $1}'
register: result_sysroot_mount_status
- name: /sysroot should be mount with rw permission
block:
- assert:
that:
- result_sysroot_mount_status.stdout == "rw"
fail_msg: "/sysroot is not mounted with rw permission"
success_msg: "/sysroot is mounted with rw permission"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check /var mount point
- name: check /var mount point
command: findmnt -r -o SOURCE -n /var
register: result_var_mount_point
- name: /var mount point checking
block:
- assert:
that:
- result_var_mount_point.stdout == var_mount_path
fail_msg: "/var does not mount on {{ var_mount_path }}"
success_msg: "/var mounts on {{ var_mount_path }}"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
vars:
var_mount_path: "{{ device_name }}[/ostree/deploy/{{ osname }}/var]"
# case: check /var mount status
- name: check /var mount status
shell: findmnt -r -o OPTIONS -n /var | awk -F "," '{print $1}'
register: result_var_mount_status
- name: /var should be mount with rw permission
block:
- assert:
that:
- result_var_mount_status.stdout == "rw"
fail_msg: "/var is not mounted with rw permission"
success_msg: "/var is mounted with rw permission"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check / mount point
- name: check / mount point
command: findmnt -r -o SOURCE -n /
register: result_root_mount_point
- name: / mount point checking
block:
- assert:
that:
- result_root_mount_point.stdout == "overlay"
fail_msg: "/ does not mount with overlay"
success_msg: "/ mounts with overlay"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
- name: check installed package
shell: rpm -qa | sort
register: result_packages
- name: upgrade checking
block:
# case: check booted ostree chacksum != rollback ostree checksum
- name: get rollback ostree checksum
shell: bootc status --json | jq -r '.status.rollback.ostree.checksum'
register: result_rollback_ostree_checksum
become: true
- name: check booted and rollback ostree chacksum
block:
- assert:
that:
- ostree_checksum != result_rollback_ostree_checksum.stdout
fail_msg: "upgrade failed"
success_msg: "upgrade passed"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
- set_fact:
ostree_checksum: "{{ result_ostree_checksum.stdout }}"
# case: check wget installed after upgrade
- name: check wget installed
block:
- assert:
that:
- "'wget' in result_packages.stdout"
fail_msg: "wget not installed, ostree upgrade might be failed"
success_msg: "wget installed in ostree upgrade"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
when: upgrade == "true"
# case: check dmesg error and failed log
- name: check dmesg output
command: dmesg
become: true
- name: check dmesg error and fail log
shell: dmesg --notime | grep -i "error\|fail" | grep -v "skipped" | grep -v "failover" | grep -v "ignition" | grep -v "Driver 'pcspkr'" || true
register: result_dmesg_error
become: true
# case: check running container with podman in root
- name: run CentOS Stream 9 image with podman in root
command: podman run --rm quay.io/centos/centos:stream9 cat /etc/redhat-release
register: podman_result
become: true
retries: 30
delay: 2
until: podman_result is success
ignore_errors: true
- name: run container test
block:
- assert:
that:
- podman_result is succeeded
- "'CentOS Stream release 9' in podman_result.stdout"
fail_msg: "failed run container with podman (root)"
success_msg: "running container with podman (root) succeeded"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check running container with podman in rootless
- name: run CentOS Stream 9 image with in rootless
command: podman run --rm quay.io/centos/centos:stream9 cat /etc/redhat-release
register: podman_result
retries: 30
delay: 2
until: podman_result is success
ignore_errors: true
- name: run container test
block:
- assert:
that:
- podman_result is succeeded
- "'CentOS Stream release 9' in podman_result.stdout"
fail_msg: "failed run container with podman (non-root)"
success_msg: "running container with podman (non-root) succeeded"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check system reboot
- name: check system reboot
block:
- name: check system reboot
reboot:
post_reboot_delay: 60
pre_reboot_delay: 60
reboot_timeout: 180
become: true
ignore_errors: true
- name: wait for connection to become reachable/usable
wait_for_connection:
delay: 30
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check persistent log in system
- name: check journald persistent logging
block:
- name: list boots
shell: journalctl --list-boots -q
register: result_list_boots
become: true
- assert:
that:
- result_list_boots.stdout_lines | length > 1
fail_msg: "NO journald persistent logging configured"
success_msg: "journald persistent logging configured"
always:
- set_fact:
total_counter: "{{ total_counter | int + 1 }}"
rescue:
- name: failed count + 1
set_fact:
failed_counter: "{{ failed_counter | int + 1 }}"
# case: check reboot times
- name: check reboot times
command: last reboot
ignore_errors: true
become: true
- assert:
that:
- failed_counter == "0"
fail_msg: "Run {{ total_counter }} tests, but {{ failed_counter }} of them failed"
success_msg: "Totally {{ total_counter }} test passed"

View File

@@ -0,0 +1,184 @@
---
- hosts: cloud
gather_facts: false
become: false
vars:
test_os: "{{ lookup('env', 'TEST_OS') | default('centos-stream-9', true) }}"
arch: "{{ lookup('env', 'ARCH') | default('x86_64', true) }}"
ssh_key_pub: ""
inventory_file: ""
download_node: "{{ lookup('env', 'DOWNLOAD_NODE') | default('', true) }}"
ami_id: ""
ami:
x86_64:
rhel-9-4: ami-047cbb4b91f923934
centos-stream-9: ami-087dde0e824141404
aarch64:
rhel-9-4: ami-0bf2a1a5d9ee15e42
centos-stream-9: ami-025af1eccc54ec6e6
instance_type:
x86_64:
"0": t2.medium
"1": t3.medium
"2": m6a.large
aarch64:
"0": t4g.medium
"1": c7g.medium
"2": m6g.medium
tasks:
- set_fact:
random_num: "{{ 9999 | random(start=1001) }}"
- set_fact:
instance_name: "bootc-aws-{{ test_os }}-{{ random_num }}"
- name: random number for instance type
set_fact:
instance_type_index: "{{ 3 | random(start=0) }}"
- name: set random instance type
set_fact:
random_instance_type: "{{ lookup('env', 'instance_type') | default(instance_type[arch][instance_type_index], true) }}"
- name: "get available zone for instance {{ random_instance_type }}"
shell: aws ec2 describe-instance-type-offerings --location-type availability-zone --filters="Name=instance-type,Values={{ random_instance_type }}" --query InstanceTypeOfferings | jq -r '.[0].Location'
register: ec2_zone
when: "'rhel' not in test_os"
- name: get subnet
amazon.aws.ec2_vpc_subnet_info:
filters:
"tag:Name": "kite-ci"
"availabilityZone": "{{ ec2_zone.stdout }}"
register: ec2_vpc_subnet
when: "'rhel' not in test_os"
- set_fact:
subnet_id: "{{ ec2_vpc_subnet.subnets[0].subnet_id }}"
when: "'rhel' not in test_os"
- name: get virtqe subnet
amazon.aws.ec2_vpc_subnet_info:
filters:
"tag:Name": "InternalA-virtqe"
register: ec2_vpc_subnet
when: '"rhel" in test_os'
- set_fact:
subnet_id: "{{ ec2_vpc_subnet.subnets[0].subnet_id }}"
when: "'rhel' in test_os"
- name: get security group
amazon.aws.ec2_security_group_info:
filters:
"tag:Name": "kite-ci"
register: ec2_security_group
when: "'rhel' not in test_os"
- set_fact:
group_id: "{{ ec2_security_group.security_groups[0].group_id }}"
when: "'rhel' not in test_os"
- name: get virtqe security group
amazon.aws.ec2_security_group_info:
filters:
"tag:Name": "bootc-test"
register: ec2_security_group
when: "'rhel' in test_os"
- set_fact:
group_id: "{{ ec2_security_group.security_groups[0].group_id }}"
when: "'rhel' in test_os"
- name: config ssh keypair used by test
amazon.aws.ec2_key:
name: "kp-bootc-{{ random_num }}"
key_material: "{{ lookup('file', ssh_key_pub) }}"
tags:
name: "bootc-test"
- name: generate ec2_run_instance script
template:
src: ec2_run_instance.j2
dest: "{{ playbook_dir }}/ec2_run_instance.sh"
mode: 0755
- name: run ec2 instance with script
command: "{{ playbook_dir }}/ec2_run_instance.sh"
register: result_instance
- name: convert run_instance output to json
set_fact:
instance_json: "{{ result_instance.stdout | from_json }}"
- name: wait for instance running
shell: aws ec2 describe-instances --instance-ids {{ instance_json.Instances[0].InstanceId }} --query 'Reservations[0].Instances[0].State.Name' --output text
register: describe_result
retries: 60
delay: 5
until: describe_result.stdout == "running"
- name: get instance public ip
shell: aws ec2 describe-instances --instance-ids {{ instance_json.Instances[0].InstanceId }} --query 'Reservations[*].Instances[*].PublicIpAddress' --output text
register: ip_result
when: "'rhel' not in test_os"
- set_fact:
instance_ip: "{{ ip_result.stdout }}"
when: "'rhel' not in test_os"
- name: get instance private ip
shell: aws ec2 describe-instances --instance-ids {{ instance_json.Instances[0].InstanceId }} --query 'Reservations[*].Instances[*].PrivateIpAddress' --output text
register: ip_result
when: "'rhel' in test_os"
- set_fact:
instance_ip: "{{ ip_result.stdout }}"
when: "'rhel' in test_os"
- name: waits until instance is reachable
wait_for:
host: "{{ instance_ip }}"
port: 22
search_regex: OpenSSH
delay: 10
retries: 30
register: result_ssh_check
until: result_ssh_check is success
- name: add instance ip into host group guest
add_host:
name: "{{ instance_ip }}"
groups: guest
- name: Write instance ip to inventory file
community.general.ini_file:
path: "{{ inventory_file }}"
section: guest
option: guest ansible_host
value: "{{ instance_ip }}"
no_extra_spaces: true
- name: Write random number to inventory file
community.general.ini_file:
path: "{{ inventory_file }}"
section: cloud:vars
option: random_num
value: "{{ random_num }}"
no_extra_spaces: true
- name: write AWS EC2 instance id to inventory file
community.general.ini_file:
path: "{{ inventory_file }}"
section: cloud:vars
option: instance_id
value: "{{ instance_json.Instances[0].InstanceId }}"
no_extra_spaces: true
- name: write AWS ami id to inventory file
community.general.ini_file:
path: "{{ inventory_file }}"
section: cloud:vars
option: ami_id
value: "{{ ami_id }}"
no_extra_spaces: true

View File

@@ -0,0 +1,79 @@
---
- hosts: guest
become: false
vars:
test_os: "{{ lookup('env', 'TEST_OS') | default('centos-stream-9', true) }}"
platform: "{{ lookup('env', 'PLATFORM') | default('aws', true) }}"
test_image_url: ""
tasks:
- name: check bios or uefi
stat:
path: /sys/firmware/efi
- name: check partition size
command: df -Th
ignore_errors: true
become: true
- name: check disk partition table
command: fdisk -l
ignore_errors: true
become: true
- name: check mount table
command: findmnt
ignore_errors: true
- name: Install podman
dnf:
name:
- podman
state: present
become: true
when: ('rhel' not in test_os) or (platform != 'aws')
- name: Install podman from internal
dnf:
disablerepo: "*"
enablerepo: "rhel-9x-*"
name:
- podman
state: present
become: true
when:
- "'rhel' in test_os"
- platform == "aws"
- name: Auth for RHEL private image
command:
podman login \
-u "{{ lookup('env', 'QUAY_USERNAME') }}" \
-p "{{ lookup('env', 'QUAY_PASSWORD') }}" \
quay.io
no_log: true
become: true
- name: Install image
command:
"podman run \
--rm \
--privileged \
--pid=host \
-v /:/target \
-v /var/lib/containers:/var/lib/containers \
--security-opt label=type:unconfined_t \
{{ test_image_url }} \
bootc install to-filesystem --replace=alongside /target"
become: true
- name: Reboot to deploy new system
reboot:
post_reboot_delay: 60
reboot_timeout: 180
become: true
ignore_errors: true
- name: Wait for connection to become reachable/usable
wait_for_connection:
delay: 30

View File

@@ -0,0 +1,36 @@
---
- hosts: cloud
gather_facts: false
become: false
tasks:
- name: Remove AWS resources
block:
- name: terminate instance
amazon.aws.ec2_instance:
instance_ids: "{{ instance_id }}"
state: absent
wait: true
ignore_errors: true
- name: wait until instance terminated
amazon.aws.ec2_instance_info:
instance_ids:
- "{{ instance_id }}"
register: result_instance_status
retries: 30
delay: 10
until: result_instance_status.instances[0].state.name == "terminated"
- name: remove ec2 key
amazon.aws.ec2_key:
name: "kp-bootc-{{ random_num }}"
state: absent
- name: Deregister AMI (delete associated snapshots too)
amazon.aws.ec2_ami:
image_id: "{{ ami_id }}"
delete_snapshot: true
state: absent
when: ami_id != ""
when: platform == "aws"

View File

@@ -0,0 +1,30 @@
#!/bin/bash
/usr/local/bin/aws ec2 run-instances \
--associate-public-ip-address \
--block-device-mappings DeviceName=/dev/xvda,Ebs=\{DeleteOnTermination=true,VolumeSize=12,VolumeType=gp2,Encrypted=false\} \
{% if random_instance_type.startswith('t3') or random_instance_type.startswith('t4g') %}
--credit-specification CpuCredits=standard \
{% endif %}
{% if test_os.startswith('rhel') %}
--user-data file://user-data \
{% endif %}
{% if ami_id == "" %}
--image-id {{ ami[arch][test_os] }} \
{% else %}
--image-id {{ ami_id }} \
{% endif %}
--instance-market-options MarketType=spot,SpotOptions=\{MaxPrice=0.1,SpotInstanceType=one-time,InstanceInterruptionBehavior=terminate\} \
--instance-type {{ random_instance_type }} \
--key-name kp-bootc-{{ random_num }} \
--security-group-ids {{ group_id }} \
--subnet-id {{ subnet_id }} \
--tag-specifications ResourceType=instance,Tags=[\{Key=bootc-test,Value='bootc-test.{{ test_os }}.{{ arch }}.{{ random_num }}'\},\{Key=Name,Value='bootc-test.{{ test_os }}.{{ arch }}.{{ random_num }}'\}] \
return_code=$?
if [[ $return_code == 0 ]]; then
exit 0
fi
# If we had no successful boots, we should exit with a failure.
exit 1

View File

@@ -0,0 +1,19 @@
---
- hosts: guest
become: false
tasks:
- name: bootc upgrade
command: bootc upgrade
become: true
- name: Reboot to deploy new system
reboot:
post_reboot_delay: 60
reboot_timeout: 180
become: true
ignore_errors: true
- name: Wait for connection to become reachable/usable
wait_for_connection:
delay: 30