mirror of
https://github.com/openshift/installer.git
synced 2026-02-05 06:46:36 +01:00
Merge branch 'agent-installer'
This commit is contained in:
@@ -158,3 +158,21 @@ aliases:
|
||||
- prestist
|
||||
- marmijo
|
||||
- RishabhSaini
|
||||
agent-reviewers:
|
||||
- andfasano
|
||||
- bfournie
|
||||
- celebdor
|
||||
- dhellmann
|
||||
- lranjbar
|
||||
- pawanpinjarkar
|
||||
- rwsu
|
||||
- zaneb
|
||||
agent-approvers:
|
||||
- andfasano
|
||||
- bfournie
|
||||
- celebdor
|
||||
- dhellmann
|
||||
- lranjbar
|
||||
- pawanpinjarkar
|
||||
- rwsu
|
||||
- zaneb
|
||||
|
||||
91
cmd/openshift-install/agent.go
Normal file
91
cmd/openshift-install/agent.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/openshift/installer/cmd/openshift-install/agent"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent/agentconfig"
|
||||
"github.com/openshift/installer/pkg/asset/agent/image"
|
||||
"github.com/openshift/installer/pkg/asset/agent/manifests"
|
||||
"github.com/openshift/installer/pkg/asset/agent/mirror"
|
||||
"github.com/openshift/installer/pkg/asset/kubeconfig"
|
||||
)
|
||||
|
||||
func newAgentCmd() *cobra.Command {
|
||||
agentCmd := &cobra.Command{
|
||||
Use: "agent",
|
||||
Short: "Commands for supporting cluster installation using agent installer",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
agentCmd.AddCommand(newAgentCreateCmd())
|
||||
agentCmd.AddCommand(agent.NewWaitForCmd())
|
||||
return agentCmd
|
||||
}
|
||||
|
||||
var (
|
||||
agentConfigTarget = target{
|
||||
// TODO: remove template wording when interactive survey has been implemented
|
||||
name: "Agent Config Template",
|
||||
command: &cobra.Command{
|
||||
Use: "agent-config-template",
|
||||
Short: "Generates a template of the agent config manifest used by the agent installer",
|
||||
Args: cobra.ExactArgs(0),
|
||||
},
|
||||
assets: []asset.WritableAsset{
|
||||
&agentconfig.AgentConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
agentManifestsTarget = target{
|
||||
name: "Cluster Manifests",
|
||||
command: &cobra.Command{
|
||||
Use: "cluster-manifests",
|
||||
Short: "Generates the cluster definition manifests used by the agent installer",
|
||||
Args: cobra.ExactArgs(0),
|
||||
},
|
||||
assets: []asset.WritableAsset{
|
||||
&manifests.AgentManifests{},
|
||||
&mirror.RegistriesConf{},
|
||||
&mirror.CaBundle{},
|
||||
},
|
||||
}
|
||||
|
||||
agentImageTarget = target{
|
||||
name: "Image",
|
||||
command: &cobra.Command{
|
||||
Use: "image",
|
||||
Short: "Generates a bootable image containing all the information needed to deploy a cluster",
|
||||
Args: cobra.ExactArgs(0),
|
||||
},
|
||||
assets: []asset.WritableAsset{
|
||||
&image.AgentImage{},
|
||||
&kubeconfig.AgentAdminClient{},
|
||||
},
|
||||
}
|
||||
|
||||
agentTargets = []target{agentConfigTarget, agentManifestsTarget, agentImageTarget}
|
||||
)
|
||||
|
||||
func newAgentCreateCmd() *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Commands for generating agent installation artifacts",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
for _, t := range agentTargets {
|
||||
t.command.Args = cobra.ExactArgs(0)
|
||||
t.command.Run = runTargetCmd(t.assets...)
|
||||
cmd.AddCommand(t.command)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
7
cmd/openshift-install/agent/OWNERS
Normal file
7
cmd/openshift-install/agent/OWNERS
Normal file
@@ -0,0 +1,7 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- agent-approvers
|
||||
reviewers:
|
||||
- agent-reviewers
|
||||
89
cmd/openshift-install/agent/waitfor.go
Normal file
89
cmd/openshift-install/agent/waitfor.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
agentpkg "github.com/openshift/installer/pkg/agent"
|
||||
)
|
||||
|
||||
const (
|
||||
exitCodeInstallConfigError = iota + 3
|
||||
exitCodeInfrastructureFailed
|
||||
exitCodeBootstrapFailed
|
||||
exitCodeInstallFailed
|
||||
)
|
||||
|
||||
// NewWaitForCmd create the commands for waiting the completion of the agent based cluster installation.
|
||||
func NewWaitForCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "wait-for",
|
||||
Short: "Wait for install-time events",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(newWaitForBootstrapCompleteCmd())
|
||||
cmd.AddCommand(newWaitForInstallCompleteCmd())
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newWaitForBootstrapCompleteCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "bootstrap-complete",
|
||||
Short: "Wait until the cluster bootstrap is complete",
|
||||
Args: cobra.ExactArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
assetDir := cmd.Flags().Lookup("dir").Value.String()
|
||||
logrus.Debugf("asset directory: %s", assetDir)
|
||||
if len(assetDir) == 0 {
|
||||
logrus.Fatal("No cluster installation directory found")
|
||||
}
|
||||
cluster, err := agentpkg.WaitForBootstrapComplete(assetDir)
|
||||
if err != nil {
|
||||
logrus.Debug("Printing the event list gathered from the Agent Rest API")
|
||||
cluster.PrintInfraEnvRestAPIEventList()
|
||||
err2 := cluster.API.OpenShift.LogClusterOperatorConditions()
|
||||
if err2 != nil {
|
||||
logrus.Error("Attempted to gather ClusterOperator status after wait failure: ", err2)
|
||||
}
|
||||
logrus.Info("Use the following commands to gather logs from the cluster")
|
||||
logrus.Info("openshift-install gather bootstrap --help")
|
||||
logrus.Error(errors.Wrap(err, "Bootstrap failed to complete: "))
|
||||
logrus.Exit(exitCodeBootstrapFailed)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newWaitForInstallCompleteCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "install-complete",
|
||||
Short: "Wait until the cluster installation is complete",
|
||||
Args: cobra.ExactArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
assetDir := cmd.Flags().Lookup("dir").Value.String()
|
||||
logrus.Debugf("asset directory: %s", assetDir)
|
||||
if len(assetDir) == 0 {
|
||||
logrus.Fatal("No cluster installation directory found")
|
||||
}
|
||||
cluster, err := agentpkg.WaitForInstallComplete(assetDir)
|
||||
if err != nil {
|
||||
logrus.Debug("Printing the event list gathered from the Agent Rest API")
|
||||
cluster.PrintInfraEnvRestAPIEventList()
|
||||
err2 := cluster.API.OpenShift.LogClusterOperatorConditions()
|
||||
if err2 != nil {
|
||||
logrus.Error("Attempted to gather ClusterOperator status after wait failure: ", err2)
|
||||
}
|
||||
logrus.Error(`Cluster initialization failed because one or more operators are not functioning properly.
|
||||
The cluster should be accessible for troubleshooting as detailed in the documentation linked below,
|
||||
https://docs.openshift.com/container-platform/latest/support/troubleshooting/troubleshooting-installations.html`)
|
||||
logrus.Exit(exitCodeInstallFailed)
|
||||
}
|
||||
cluster.PrintInstallationComplete()
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -240,6 +240,15 @@ func newCreateCmd() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func asFileWriter(a asset.WritableAsset) asset.FileWriter {
|
||||
switch v := a.(type) {
|
||||
case asset.FileWriter:
|
||||
return v
|
||||
default:
|
||||
return asset.NewDefaultFileWriter(a)
|
||||
}
|
||||
}
|
||||
|
||||
func runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args []string) {
|
||||
runner := func(directory string) error {
|
||||
assetStore, err := assetstore.NewStore(directory)
|
||||
@@ -253,7 +262,8 @@ func runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args
|
||||
err = errors.Wrapf(err, "failed to fetch %s", a.Name())
|
||||
}
|
||||
|
||||
if err2 := asset.PersistToFile(a, directory); err2 != nil {
|
||||
err2 := asFileWriter(a).PersistToFile(directory)
|
||||
if err2 != nil {
|
||||
err2 = errors.Wrapf(err2, "failed to write asset (%s) to disk", a.Name())
|
||||
if err != nil {
|
||||
logrus.Error(err2)
|
||||
@@ -289,7 +299,9 @@ func runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args
|
||||
}
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if cmd.Name() != "cluster" {
|
||||
switch cmd.Name() {
|
||||
case "cluster", "image":
|
||||
default:
|
||||
logrus.Infof(logging.LogCreatedFiles(cmd.Name(), rootOpts.dir, targets))
|
||||
}
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ func installerMain() {
|
||||
newCompletionCmd(),
|
||||
newMigrateCmd(),
|
||||
newExplainCmd(),
|
||||
newAgentCmd(),
|
||||
} {
|
||||
rootCmd.AddCommand(subCmd)
|
||||
}
|
||||
|
||||
7
data/data/agent/OWNERS
Normal file
7
data/data/agent/OWNERS
Normal file
@@ -0,0 +1,7 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- agent-approvers
|
||||
reviewers:
|
||||
- agent-reviewers
|
||||
3
data/data/agent/files/etc/containers/containers.conf
Normal file
3
data/data/agent/files/etc/containers/containers.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
[engine]
|
||||
# By default use the infra image build by podman
|
||||
infra_image = ""
|
||||
2
data/data/agent/files/etc/issue
Normal file
2
data/data/agent/files/etc/issue
Normal file
@@ -0,0 +1,2 @@
|
||||
\S
|
||||
This image built by agent installer.
|
||||
10
data/data/agent/files/etc/motd
Normal file
10
data/data/agent/files/etc/motd
Normal file
@@ -0,0 +1,10 @@
|
||||
** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
|
||||
This is a host being installed by the OpenShift Assisted Installer.
|
||||
It will be installed from scratch during the installation.
|
||||
|
||||
The primary service is agent.service. To watch its status, run:
|
||||
sudo journalctl -u agent.service
|
||||
|
||||
To view the agent log, run:
|
||||
sudo journalctl TAG=agent
|
||||
** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
|
||||
10
data/data/agent/files/etc/multipath.conf
Normal file
10
data/data/agent/files/etc/multipath.conf
Normal file
@@ -0,0 +1,10 @@
|
||||
defaults {
|
||||
user_friendly_names yes
|
||||
find_multipaths yes
|
||||
enable_foreign "^$"
|
||||
}
|
||||
blacklist_exceptions {
|
||||
property "(SCSI_IDENT_|ID_WWN)"
|
||||
}
|
||||
blacklist {
|
||||
}
|
||||
1
data/data/agent/files/root/.docker/config.json.template
Normal file
1
data/data/agent/files/root/.docker/config.json.template
Normal file
@@ -0,0 +1 @@
|
||||
{{.PullSecret}}
|
||||
13
data/data/agent/files/root/assisted.te
Normal file
13
data/data/agent/files/root/assisted.te
Normal file
@@ -0,0 +1,13 @@
|
||||
module assisted 1.0;
|
||||
require {
|
||||
type chronyd_t;
|
||||
type container_file_t;
|
||||
type spc_t;
|
||||
class unix_dgram_socket sendto;
|
||||
class dir search;
|
||||
class sock_file write;
|
||||
}
|
||||
#============= chronyd_t ==============
|
||||
allow chronyd_t container_file_t:dir search;
|
||||
allow chronyd_t container_file_t:sock_file write;
|
||||
allow chronyd_t spc_t:unix_dgram_socket sendto;
|
||||
9
data/data/agent/files/usr/local/bin/common.sh.template
Normal file
9
data/data/agent/files/usr/local/bin/common.sh.template
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
wait_for_assisted_service() {
|
||||
echo "Waiting for assisted-service to be ready"
|
||||
until $(curl --output /dev/null --silent --fail {{.ServiceBaseURL}}/api/assisted-install/v2/infra-envs); do
|
||||
printf '.'
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
/usr/local/bin/release-image-download.sh
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. /usr/local/bin/release-image.sh
|
||||
|
||||
IMAGE=$(image_for agent-installer-node-agent)
|
||||
|
||||
echo "Using agent image: ${IMAGE} to copy bin"
|
||||
|
||||
/usr/bin/podman run --privileged --rm -v /usr/local/bin:/hostbin ${IMAGE} cp /usr/bin/agent /hostbin
|
||||
16
data/data/agent/files/usr/local/bin/get-container-images.sh
Normal file
16
data/data/agent/files/usr/local/bin/get-container-images.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
/usr/local/bin/release-image-download.sh
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. /usr/local/bin/release-image.sh
|
||||
|
||||
# Store images in the environment file used by services and passed to assisted-service
|
||||
# The agent image will be also retrieved when its script is run
|
||||
cat <<EOF >/usr/local/share/assisted-service/agent-images.env
|
||||
SERVICE_IMAGE=$(image_for agent-installer-api-server)
|
||||
CONTROLLER_IMAGE=$(image_for agent-installer-csr-approver)
|
||||
INSTALLER_IMAGE=$(image_for agent-installer-orchestrator)
|
||||
AGENT_DOCKER_IMAGE=$(image_for agent-installer-node-agent)
|
||||
EOF
|
||||
29
data/data/agent/files/usr/local/bin/set-hostname.sh
Normal file
29
data/data/agent/files/usr/local/bin/set-hostname.sh
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# The hostnames defined in agent-config.yaml are written out
|
||||
# to files at /etc/assisted/hostnames/<MAC-address>.
|
||||
#
|
||||
# If a host has multiple interfaces, the host's first network
|
||||
# interface's MAC address is used.
|
||||
#
|
||||
# This script compares the MAC addresses on the current host
|
||||
# with the addresses in /etc/assisted/hostnames/.
|
||||
#
|
||||
# If a match is found, then the hostname in the file is set
|
||||
# as this host's hostname.
|
||||
#
|
||||
|
||||
HOSTNAMES_PATH=/etc/assisted/hostnames
|
||||
FILES=$(ls $HOSTNAMES_PATH)
|
||||
for filename in $FILES
|
||||
do
|
||||
MATCHED_MAC_ADDRESS_WITH_HOST=$(ip address | grep $filename)
|
||||
if [ "$MATCHED_MAC_ADDRESS_WITH_HOST" != "" ]; then
|
||||
HOSTNAME=$(cat ${HOSTNAMES_PATH}/${filename})
|
||||
echo "Host has matching MAC address: $filename" 1>&2
|
||||
echo "Setting hostname to $HOSTNAME" 1>&2
|
||||
hostnamectl set-hostname $HOSTNAME
|
||||
else
|
||||
echo "MAC address, $filename, does not exist on this host" 1>&2
|
||||
fi
|
||||
done
|
||||
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
timeout=$((SECONDS + 30))
|
||||
|
||||
while [[ $SECONDS -lt $timeout ]]
|
||||
do
|
||||
IS_NODE_ZERO=$(ip -j address | jq '[.[].addr_info] | flatten | map(.local=="{{.NodeZeroIP}}") | any')
|
||||
if [ "${IS_NODE_ZERO}" = "true" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [ "${IS_NODE_ZERO}" = "true" ]; then
|
||||
echo "Node 0 IP {{.NodeZeroIP}} found on this host" 1>&2
|
||||
|
||||
NODE0_PATH=/etc/assisted-service/node0
|
||||
mkdir -p "$(dirname "${NODE0_PATH}")"
|
||||
|
||||
NODE_ZERO_MAC=$(ip -j address | jq -r '.[] | select(.addr_info | map(select(.local == "{{.NodeZeroIP}}")) | any).address')
|
||||
echo "MAC Address for Node 0: ${NODE_ZERO_MAC}"
|
||||
|
||||
cat >"${NODE0_PATH}" <<EOF
|
||||
BOOTSTRAP_HOST_MAC=${NODE_ZERO_MAC}
|
||||
EOF
|
||||
|
||||
echo "Created file ${NODE0_PATH}"
|
||||
fi
|
||||
22
data/data/agent/files/usr/local/bin/start-agent.sh.template
Normal file
22
data/data/agent/files/usr/local/bin/start-agent.sh.template
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
source common.sh
|
||||
|
||||
>&2 echo "Waiting for infra-env-id to be available"
|
||||
INFRA_ENV_ID=""
|
||||
until [[ $INFRA_ENV_ID != "" && $INFRA_ENV_ID != "null" ]]; do
|
||||
sleep 5
|
||||
>&2 echo "Querying assisted-service for infra-env-id..."
|
||||
INFRA_ENV_ID=$(curl -s -S '{{.ServiceBaseURL}}/api/assisted-install/v2/infra-envs' | jq -r .[0].id)
|
||||
done
|
||||
echo "Fetched infra-env-id and found: $INFRA_ENV_ID"
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
. /usr/local/bin/release-image.sh
|
||||
|
||||
IMAGE=$(image_for agent-installer-node-agent)
|
||||
|
||||
echo "Using agent image: ${IMAGE} to start agent"
|
||||
|
||||
# use infra-env-id to have agent register this host with assisted-service
|
||||
exec /usr/local/bin/agent --url '{{.ServiceBaseURL}}' --infra-env-id '{{.InfraEnvID}}' --agent-version ${IMAGE} --insecure=true
|
||||
@@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source common.sh
|
||||
|
||||
wait_for_assisted_service
|
||||
|
||||
BASE_URL="{{.ServiceBaseURL}}api/assisted-install/v2"
|
||||
|
||||
cluster_id=""
|
||||
while [[ "${cluster_id}" = "" ]]
|
||||
do
|
||||
# Get cluster id
|
||||
cluster_id=$(curl -s -S "${BASE_URL}/clusters" | jq -r .[].id)
|
||||
if [[ "${cluster_id}" = "" ]]; then
|
||||
sleep 2
|
||||
fi
|
||||
done
|
||||
|
||||
infra_env_id="{{.InfraEnvID}}"
|
||||
echo -e "\nInfra env id is $infra_env_id" 1>&2
|
||||
|
||||
required_master_nodes={{.ControlPlaneAgents}}
|
||||
required_worker_nodes={{.WorkerAgents}}
|
||||
total_required_nodes=$(( ${required_master_nodes}+${required_worker_nodes} ))
|
||||
echo "Number of required master nodes: ${required_master_nodes}" 1>&2
|
||||
echo "Number of required worker nodes: ${required_worker_nodes}" 1>&2
|
||||
echo "Total number of required nodes: ${total_required_nodes}" 1>&2
|
||||
|
||||
|
||||
num_known_hosts() {
|
||||
local known_hosts=0
|
||||
host_status=$(curl -s -S "${BASE_URL}/infra-envs/${infra_env_id}/hosts" | jq -r .[].status)
|
||||
if [[ -n ${host_status} ]]; then
|
||||
for status in ${host_status}; do
|
||||
if [[ "${status}" == "known" ]]; then
|
||||
((known_hosts+=1))
|
||||
echo "Hosts known and ready for cluster installation (${known_hosts}/${total_required_nodes})" 1>&2
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo "${known_hosts}"
|
||||
}
|
||||
|
||||
while [[ "${total_required_nodes}" != $(num_known_hosts) ]]
|
||||
do
|
||||
echo "Waiting for hosts to become ready for cluster installation..." 1>&2
|
||||
sleep 10
|
||||
done
|
||||
|
||||
echo "All ${total_required_nodes} hosts are ready." 1>&2
|
||||
|
||||
if [[ "${APIVIP}" != "" ]]; then
|
||||
api_vip=$(curl -s -S "${BASE_URL}/clusters" | jq -r .[].api_vip)
|
||||
if [ "${api_vip}" == null ]; then
|
||||
echo "Setting api vip" 1>&2
|
||||
curl -s -S -X PATCH "${BASE_URL}/clusters/${cluster_id}" -H "Content-Type: application/json" -d '{"api_vip": "{{.APIVIP}}"}'
|
||||
fi
|
||||
fi
|
||||
|
||||
while [[ "${cluster_status}" != "ready" ]]
|
||||
do
|
||||
cluster_status=$(curl -s -S "${BASE_URL}/clusters" | jq -r .[].status)
|
||||
echo "Cluster status: ${cluster_status}" 1>&2
|
||||
sleep 5
|
||||
if [[ "${cluster_status}" == "ready" ]]; then
|
||||
echo "Starting cluster installation..." 1>&2
|
||||
curl -s -S -X POST "${BASE_URL}/clusters/${cluster_id}/actions/install" \
|
||||
-H 'accept: application/json' \
|
||||
-d ''
|
||||
echo "Cluster installation started" 1>&2
|
||||
fi
|
||||
done
|
||||
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source common.sh
|
||||
|
||||
wait_for_assisted_service
|
||||
@@ -0,0 +1,3 @@
|
||||
POSTGRESQL_DATABASE=installer
|
||||
POSTGRESQL_PASSWORD=admin
|
||||
POSTGRESQL_USER=admin
|
||||
@@ -0,0 +1,21 @@
|
||||
AUTH_TYPE=none
|
||||
DB_HOST=127.0.0.1
|
||||
DB_NAME=installer
|
||||
DB_PASS=admin
|
||||
DB_PORT=5432
|
||||
DB_USER=admin
|
||||
DEPLOY_TARGET=onprem
|
||||
DISK_ENCRYPTION_SUPPORT=true
|
||||
DUMMY_IGNITION=false
|
||||
ENABLE_SINGLE_NODE_DNSMASQ=true
|
||||
EPHEMERAL_INSTALLER_CLUSTER_TLS_CERTS_OVERRIDE_DIR=/opt/agent/tls
|
||||
HW_VALIDATOR_REQUIREMENTS=[{"version":"default","master":{"cpu_cores":4,"ram_mib":16384,"disk_size_gb":120,"installation_disk_speed_threshold_ms":10,"network_latency_threshold_ms":100,"packet_loss_percentage":0},"worker":{"cpu_cores":2,"ram_mib":8192,"disk_size_gb":120,"installation_disk_speed_threshold_ms":10,"network_latency_threshold_ms":1000,"packet_loss_percentage":10},"sno":{"cpu_cores":8,"ram_mib":16384,"disk_size_gb":120,"installation_disk_speed_threshold_ms":10}}]
|
||||
IMAGE_SERVICE_BASE_URL=http://{{.NodeZeroIP}}:8888
|
||||
IPV6_SUPPORT=true
|
||||
NTP_DEFAULT_SERVER=
|
||||
PUBLIC_CONTAINER_REGISTRIES=quay.io
|
||||
RELEASE_IMAGES={{.ReleaseImages}}
|
||||
OPENSHIFT_INSTALL_RELEASE_IMAGE_MIRROR={{.ReleaseImageMirror}}
|
||||
SERVICE_BASE_URL={{.ServiceBaseURL}}
|
||||
STORAGE=filesystem
|
||||
INFRA_ENV_ID={{.InfraEnvID}}
|
||||
@@ -0,0 +1,3 @@
|
||||
ASSISTED_SERVICE_HOST={{.AssistedServiceHost}}
|
||||
ASSISTED_SERVICE_SCHEME={{.ServiceProtocol}}
|
||||
OS_IMAGES=[{"openshift_version":"4.10","cpu_architecture":"x86_64","url":"https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/pre-release/4.10.0-rc.1/rhcos-4.10.0-rc.1-x86_64-live.x86_64.iso","rootfs_url":"https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/pre-release/4.10.0-rc.1/rhcos-4.10.0-rc.1-x86_64-live-rootfs.x86_64.img","version":"410.84.202201251210-0"},{"openshift_version":"4.11","cpu_architecture":"x86_64","url":"https://rhcos-redirector.apps.art.xq1c.p1.openshiftapps.com/art/storage/releases/rhcos-4.11/411.85.202203181601-0/x86_64/rhcos-411.85.202203181601-0-live.x86_64.iso","rootfs_url":"https://rhcos-redirector.apps.art.xq1c.p1.openshiftapps.com/art/storage/releases/rhcos-4.11/411.85.202203181601-0/x86_64/rhcos-411.85.202203181601-0-live-rootfs.x86_64.img","version":"411.85.202203181601-0"}]
|
||||
24
data/data/agent/systemd/units/agent.service.template
Normal file
24
data/data/agent/systemd/units/agent.service.template
Normal file
@@ -0,0 +1,24 @@
|
||||
[Service]
|
||||
Type=simple
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
StartLimitInterval=0
|
||||
Environment=HTTP_PROXY=
|
||||
Environment=http_proxy=
|
||||
Environment=HTTPS_PROXY=
|
||||
Environment=https_proxy=
|
||||
Environment=NO_PROXY=
|
||||
Environment=no_proxy=
|
||||
# TODO: If AUTH_TYPE != none, then PULL_SECRET_TOKEN needs to be updated
|
||||
# https://github.com/openshift/assisted-service/blob/master/internal/ignition/ignition.go#L1381
|
||||
Environment=PULL_SECRET_TOKEN={{.PullSecretToken}}
|
||||
TimeoutStartSec=3000
|
||||
ExecStartPre=/usr/local/bin/extract-agent.sh
|
||||
ExecStart=/usr/local/bin/start-agent.sh
|
||||
|
||||
[Unit]
|
||||
Wants=network-online.target set-hostname.service
|
||||
After=network-online.target set-hostname.service
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,26 @@
|
||||
[Unit]
|
||||
Description=Service that applies host-specific configuration
|
||||
Wants=network-online.target
|
||||
Requires=create-cluster-and-infraenv.service
|
||||
PartOf=assisted-service-pod.service
|
||||
After=network-online.target create-cluster-and-infraenv.service
|
||||
ConditionPathExists=/etc/assisted-service/node0
|
||||
|
||||
[Service]
|
||||
Environment=PODMAN_SYSTEMD_UNIT=%n
|
||||
Environment=SERVICE_BASE_URL={{.ServiceBaseURL}}
|
||||
Environment=INFRA_ENV_ID={{.InfraEnvID}}
|
||||
EnvironmentFile=/usr/local/share/assisted-service/agent-images.env
|
||||
ExecStartPre=/bin/rm -f %t/%n.ctr-id
|
||||
ExecStartPre=/bin/mkdir -p %t/agent-installer /etc/assisted/hostconfig
|
||||
ExecStartPre=/usr/local/bin/wait-for-assisted-service.sh
|
||||
ExecStart=podman run --cidfile=%t/%n.ctr-id --cgroups=no-conmon --log-driver=journald --restart=on-failure:10 --pod-id-file=%t/assisted-service-pod.pod-id --replace --name=apply-host-config -v /etc/assisted/hostconfig:/etc/assisted/hostconfig -v %t/agent-installer:/var/run/agent-installer:z --env SERVICE_BASE_URL --env INFRA_ENV_ID $SERVICE_IMAGE /usr/local/bin/agent-installer-client configure
|
||||
ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id
|
||||
ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id
|
||||
|
||||
KillMode=none
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
21
data/data/agent/systemd/units/assisted-service-db.service
Normal file
21
data/data/agent/systemd/units/assisted-service-db.service
Normal file
@@ -0,0 +1,21 @@
|
||||
[Unit]
|
||||
Description=Assisted Service database
|
||||
Wants=network.target
|
||||
RequiresMountsFor=%t/containers
|
||||
BindsTo=assisted-service-pod.service
|
||||
After=network-online.target assisted-service-pod.service
|
||||
|
||||
[Service]
|
||||
Environment=PODMAN_SYSTEMD_UNIT=%n
|
||||
EnvironmentFile=/usr/local/share/assisted-service/agent-images.env
|
||||
Restart=on-failure
|
||||
TimeoutStopSec=300
|
||||
ExecStartPre=/bin/rm -f %t/%n.ctr-id
|
||||
ExecStart=/usr/bin/podman run --user=postgres --cidfile=%t/%n.ctr-id --cgroups=no-conmon --log-driver=journald --rm --pod-id-file=%t/assisted-service-pod.pod-id --sdnotify=conmon --replace -d --name=assisted-db --env-file=/usr/local/share/assisted-service/assisted-db.env $SERVICE_IMAGE /bin/bash start_db.sh
|
||||
ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id
|
||||
ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id
|
||||
Type=notify
|
||||
NotifyAccess=all
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
24
data/data/agent/systemd/units/assisted-service-pod.service
Normal file
24
data/data/agent/systemd/units/assisted-service-pod.service
Normal file
@@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Assisted Service pod
|
||||
Wants=network.target node-zero.service
|
||||
After=network-online.target node-zero.service
|
||||
ConditionPathExists=/etc/assisted-service/node0
|
||||
RequiresMountsFor=
|
||||
Requires=assisted-service-db.service assisted-service.service
|
||||
Before=assisted-service-db.service assisted-service.service
|
||||
|
||||
[Service]
|
||||
Environment=PODMAN_SYSTEMD_UNIT=%n
|
||||
Restart=on-failure
|
||||
TimeoutStopSec=70
|
||||
ExecStartPre=/bin/rm -f %t/%n.pid %t/%N.pod-id
|
||||
ExecStartPre=/usr/bin/podman pod create --infra-conmon-pidfile %t/%n.pid --pod-id-file %t/%N.pod-id -n assisted-service --publish=8090:8090 --publish=8080:8080 --publish=8888:8888
|
||||
ExecStartPre=/usr/local/bin/get-container-images.sh
|
||||
ExecStart=/usr/bin/podman pod start --pod-id-file=%t/%N.pod-id
|
||||
ExecStop=/usr/bin/podman pod stop --ignore --pod-id-file=%t/%N.pod-id -t 10
|
||||
ExecStopPost=/usr/bin/podman pod rm --ignore -f --pod-id-file=%t/%N.pod-id
|
||||
PIDFile=%t/%n.pid
|
||||
Type=forking
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,22 @@
|
||||
[Unit]
|
||||
Description=Assisted Service container
|
||||
Wants=network.target
|
||||
RequiresMountsFor=%t/containers
|
||||
Requires=assisted-service-db.service
|
||||
BindsTo=assisted-service-pod.service
|
||||
After=network-online.target assisted-service-pod.service
|
||||
|
||||
[Service]
|
||||
Environment=PODMAN_SYSTEMD_UNIT=%n
|
||||
EnvironmentFile=/usr/local/share/assisted-service/agent-images.env
|
||||
Restart=on-failure
|
||||
TimeoutStopSec=300
|
||||
ExecStartPre=/bin/rm -f %t/%n.ctr-id
|
||||
ExecStart=/usr/bin/podman run --cidfile=%t/%n.ctr-id --cgroups=no-conmon --log-driver=journald --rm --pod-id-file=%t/assisted-service-pod.pod-id --sdnotify=conmon --replace -d --name=service -v /opt/agent/tls:/opt/agent/tls:z {{ if .HaveMirrorConfig }}-v /etc/containers:/etc/containers{{ end }} -v /etc/pki/ca-trust:/etc/pki/ca-trust --env-file=/usr/local/share/assisted-service/assisted-service.env --env-file=/usr/local/share/assisted-service/images.env --env-file=/etc/assisted-service/node0 --env-file=/usr/local/share/assisted-service/agent-images.env $SERVICE_IMAGE
|
||||
ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id
|
||||
ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id
|
||||
Type=notify
|
||||
NotifyAccess=all
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=Service that creates initial cluster and infraenv
|
||||
Wants=network-online.target
|
||||
Requires=assisted-service.service
|
||||
PartOf=assisted-service-pod.service
|
||||
After=network-online.target assisted-service.service
|
||||
ConditionPathExists=/etc/assisted-service/node0
|
||||
|
||||
[Service]
|
||||
Environment=PODMAN_SYSTEMD_UNIT=%n
|
||||
Environment=SERVICE_BASE_URL={{.ServiceBaseURL}}
|
||||
Environment=OPENSHIFT_INSTALL_RELEASE_IMAGE_MIRROR={{.ReleaseImageMirror}}
|
||||
EnvironmentFile=/usr/local/share/assisted-service/agent-images.env
|
||||
ExecStartPre=/bin/rm -f %t/%n.ctr-id
|
||||
ExecStartPre=/usr/local/bin/wait-for-assisted-service.sh
|
||||
ExecStart=podman run --cidfile=%t/%n.ctr-id --cgroups=no-conmon --log-driver=journald --rm --pod-id-file=%t/assisted-service-pod.pod-id --replace --name=create-cluster-and-infraenv -v /etc/assisted/manifests:/manifests -v /etc/assisted/extra-manifests:/extra-manifests -v /etc/pki/ca-trust:/etc/pki/ca-trust:z --env SERVICE_BASE_URL --env OPENSHIFT_INSTALL_RELEASE_IMAGE_MIRROR $SERVICE_IMAGE /usr/local/bin/agent-installer-client register
|
||||
ExecStop=/usr/bin/podman stop --ignore --cidfile=%t/%n.ctr-id
|
||||
ExecStopPost=/usr/bin/podman rm -f --ignore --cidfile=%t/%n.ctr-id
|
||||
|
||||
KillMode=none
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
12
data/data/agent/systemd/units/node-zero.service
Normal file
12
data/data/agent/systemd/units/node-zero.service
Normal file
@@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Identify node zero to run OpenShift Assisted Installation Service on
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=True
|
||||
ExecStart=/usr/local/bin/set-node-zero.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,18 @@
|
||||
[Unit]
|
||||
Description=Prepare network manager config content
|
||||
Before=dracut-initqueue.service
|
||||
After=dracut-cmdline.service
|
||||
DefaultDependencies=no
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
ExecStart=/usr/local/bin/pre-network-manager-config.sh
|
||||
|
||||
TimeoutSec=60
|
||||
KillMode=none
|
||||
Type=oneshot
|
||||
PrivateTmp=true
|
||||
RemainAfterExit=no
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
8
data/data/agent/systemd/units/selinux.service
Normal file
8
data/data/agent/systemd/units/selinux.service
Normal file
@@ -0,0 +1,8 @@
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStartPre=checkmodule -M -m -o /root/assisted.mod /root/assisted.te
|
||||
ExecStartPre=semodule_package -o /root/assisted.pp -m /root/assisted.mod
|
||||
ExecStart=semodule -i /root/assisted.pp
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
14
data/data/agent/systemd/units/set-hostname.service
Normal file
14
data/data/agent/systemd/units/set-hostname.service
Normal file
@@ -0,0 +1,14 @@
|
||||
[Unit]
|
||||
Description=Agent-based installer hostname update service
|
||||
Wants=network-online.target
|
||||
After=local-fs.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/set-hostname.sh
|
||||
|
||||
KillMode=none
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,17 @@
|
||||
[Unit]
|
||||
Description=Service that starts cluster installation
|
||||
Wants=network-online.target
|
||||
Requires=apply-host-config.service
|
||||
PartOf=assisted-service-pod.service
|
||||
After=network-online.target apply-host-config.service
|
||||
ConditionPathExists=/etc/assisted-service/node0
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/start-cluster-installation.sh
|
||||
|
||||
KillMode=none
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
46
go.mod
46
go.mod
@@ -24,17 +24,18 @@ require (
|
||||
github.com/aws/aws-sdk-go v1.43.19
|
||||
github.com/clarketm/json v1.14.1
|
||||
github.com/containers/image v3.0.2+incompatible
|
||||
github.com/coreos/ignition/v2 v2.9.0
|
||||
github.com/coreos/ignition/v2 v2.13.0
|
||||
github.com/coreos/stream-metadata-go v0.1.8
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible
|
||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
|
||||
github.com/go-openapi/strfmt v0.21.2
|
||||
github.com/go-playground/validator/v10 v10.2.0
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gophercloud/gophercloud v0.24.0
|
||||
github.com/gophercloud/utils v0.0.0-20220307143606-8e7800759d16
|
||||
github.com/h2non/filetype v1.0.12
|
||||
@@ -45,17 +46,21 @@ require (
|
||||
github.com/metal3-io/baremetal-operator/apis v0.0.0
|
||||
github.com/metal3-io/baremetal-operator/pkg/hardwareutils v0.0.0
|
||||
github.com/nutanix-cloud-native/prism-go-client v0.2.0
|
||||
github.com/openshift/api v0.0.0-20220823143838-5768cc618ba0
|
||||
github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible
|
||||
github.com/openshift/assisted-image-service v0.0.0-20220307202600-054a1afa8d28
|
||||
github.com/openshift/assisted-service v1.0.10-0.20220223093655-7ada9949bf1d
|
||||
github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3
|
||||
github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e
|
||||
github.com/openshift/cluster-api-provider-baremetal v0.0.0-20220408122422-7a548effc26e
|
||||
github.com/openshift/cluster-api-provider-ibmcloud v0.0.1-0.20220201105455-8014e5e894b0
|
||||
github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603
|
||||
github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20220323121149-e3f2850dd519
|
||||
github.com/openshift/hive/apis v0.0.0-20210506000654-5c038fb05190
|
||||
github.com/openshift/library-go v0.0.0-20220121154930-b7889002d63e
|
||||
github.com/openshift/machine-config-operator v0.0.0
|
||||
github.com/ovirt/go-ovirt v0.0.0-20210308100159-ac0bcbc88d7c
|
||||
github.com/ovirt/go-ovirt v0.0.0-20210809163552-d4276e35d3db
|
||||
github.com/pborman/uuid v1.2.0
|
||||
github.com/pelletier/go-toml v1.9.3
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.10.1
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
@@ -64,8 +69,9 @@ require (
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/stretchr/testify v1.7.2
|
||||
github.com/ulikunitz/xz v0.5.8
|
||||
github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50
|
||||
github.com/thedevsaddam/retry v0.0.0-20200324223450-9769a859cc6d
|
||||
github.com/ulikunitz/xz v0.5.10
|
||||
github.com/vincent-petithory/dataurl v1.0.0
|
||||
github.com/vmware/govmomi v0.27.4
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
|
||||
@@ -74,7 +80,7 @@ require (
|
||||
google.golang.org/api v0.44.0
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368
|
||||
google.golang.org/grpc v1.46.0
|
||||
gopkg.in/ini.v1 v1.66.2
|
||||
gopkg.in/ini.v1 v1.66.4
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.24.3
|
||||
k8s.io/apiextensions-apiserver v0.24.3
|
||||
@@ -131,13 +137,15 @@ require (
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c // indirect
|
||||
github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.0 // indirect
|
||||
github.com/emicklei/go-restful v2.10.0+incompatible // indirect
|
||||
github.com/diskfs/go-diskfs v1.2.1-0.20210727185522-a769efacd235 // indirect
|
||||
github.com/emicklei/go-restful v2.14.2+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
@@ -149,7 +157,6 @@ require (
|
||||
github.com/go-openapi/loads v0.21.1 // indirect
|
||||
github.com/go-openapi/runtime v0.23.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.4 // indirect
|
||||
github.com/go-openapi/strfmt v0.21.2 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/go-openapi/validate v0.20.3 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
@@ -161,7 +168,9 @@ require (
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
||||
github.com/hashicorp/go-version v1.5.0 // indirect
|
||||
github.com/hashicorp/logutils v1.0.0 // indirect
|
||||
@@ -169,6 +178,8 @@ require (
|
||||
github.com/hashicorp/terraform-plugin-sdk/v2 v2.17.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.2 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
@@ -188,16 +199,21 @@ require (
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/openshift/cluster-api v0.0.0-20190805113604-f8de78af80fc // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pierrec/lz4 v2.3.0+incompatible // indirect
|
||||
github.com/pkg/xattr v0.4.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
gopkg.in/djherbis/times.v1 v1.2.0 // indirect
|
||||
gopkg.in/gcfg.v1 v1.2.3 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gorm.io/gorm v1.22.3 // indirect
|
||||
)
|
||||
|
||||
// OpenShift Forks
|
||||
@@ -207,6 +223,7 @@ replace (
|
||||
github.com/metal3-io/baremetal-operator/pkg/hardwareutils => github.com/openshift/baremetal-operator/pkg/hardwareutils v0.0.0-20220128094204-28771f489634
|
||||
k8s.io/cloud-provider-vsphere => github.com/openshift/cloud-provider-vsphere v1.19.1-0.20211222185833-7829863d0558
|
||||
sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v0.4.5
|
||||
sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200929152424-eab2e087f366 // Indirect dependency through MAO from cluster API providers
|
||||
sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210626224711-5d94c794092f // Indirect dependency through MAO from cluster API providers
|
||||
sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20211111204942-611d320170af
|
||||
)
|
||||
@@ -220,3 +237,8 @@ replace k8s.io/client-go => k8s.io/client-go v0.24.0
|
||||
|
||||
// Needed so that the InstallConfig CRD can be created. Later versions of controller-gen balk at using IPNet as a field.
|
||||
replace sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.3.1-0.20200617211605-651903477185
|
||||
|
||||
// Override the OpenShift API version in hive
|
||||
replace github.com/openshift/api => github.com/openshift/api v0.0.0-20220823143838-5768cc618ba0
|
||||
|
||||
replace github.com/terraform-providers/terraform-provider-nutanix => github.com/nutanix/terraform-provider-nutanix v1.5.0
|
||||
|
||||
7
pkg/agent/OWNERS
Normal file
7
pkg/agent/OWNERS
Normal file
@@ -0,0 +1,7 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- agent-approvers
|
||||
reviewers:
|
||||
- agent-reviewers
|
||||
433
pkg/agent/cluster.go
Normal file
433
pkg/agent/cluster.go
Normal file
@@ -0,0 +1,433 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/openshift/assisted-service/client/installer"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
)
|
||||
|
||||
// Cluster is a struct designed to help interact with the cluster that is
|
||||
// currently being installed by agent installer.
|
||||
type Cluster struct {
|
||||
Ctx context.Context
|
||||
API *clientSet
|
||||
assetDir string
|
||||
clusterConsoleRouteURL string
|
||||
clusterID *strfmt.UUID
|
||||
clusterInfraEnvID *strfmt.UUID
|
||||
installHistory *clusterInstallStatusHistory
|
||||
}
|
||||
|
||||
type clientSet struct {
|
||||
Kube *ClusterKubeAPIClient
|
||||
OpenShift *ClusterOpenShiftAPIClient
|
||||
Rest *NodeZeroRestClient
|
||||
}
|
||||
|
||||
type clusterInstallStatusHistory struct {
|
||||
RestAPISeen bool
|
||||
RestAPIClusterStatusAddingHostsSeen bool
|
||||
RestAPIClusterStatusCancelledSeen bool
|
||||
RestAPIClusterStatusInstallingSeen bool
|
||||
RestAPIClusterStatusInstallingPendingUserActionSeen bool
|
||||
RestAPIClusterStatusInsufficientSeen bool
|
||||
RestAPIClusterStatusFinalizingSeen bool
|
||||
RestAPIClusterStatusErrorSeen bool
|
||||
RestAPIClusterStatusPendingForInputSeen bool
|
||||
RestAPIClusterStatusPreparingForInstallationSeen bool
|
||||
RestAPIClusterStatusReadySeen bool
|
||||
RestAPIInfraEnvEventList models.EventList
|
||||
RestAPIPreviousClusterStatus string
|
||||
RestAPIPreviousEventMessage string
|
||||
RestAPIHostValidationsPassed bool
|
||||
ClusterKubeAPISeen bool
|
||||
ClusterBootstrapComplete bool
|
||||
ClusterOperatorsInitialized bool
|
||||
ClusterConsoleRouteCreated bool
|
||||
ClusterConsoleRouteURLCreated bool
|
||||
ClusterInstallComplete bool
|
||||
}
|
||||
|
||||
// NewCluster initializes a Cluster object
|
||||
func NewCluster(ctx context.Context, assetDir string) (*Cluster, error) {
|
||||
|
||||
czero := &Cluster{}
|
||||
capi := &clientSet{}
|
||||
|
||||
restclient, err := NewNodeZeroRestClient(ctx, assetDir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
kubeclient, err := NewClusterKubeAPIClient(ctx, assetDir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ocpclient, err := NewClusterOpenShiftAPIClient(ctx, assetDir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
capi.Rest = restclient
|
||||
capi.Kube = kubeclient
|
||||
capi.OpenShift = ocpclient
|
||||
|
||||
cinstallstatushistory := &clusterInstallStatusHistory{
|
||||
RestAPISeen: false,
|
||||
RestAPIClusterStatusAddingHostsSeen: false,
|
||||
RestAPIClusterStatusCancelledSeen: false,
|
||||
RestAPIClusterStatusInstallingSeen: false,
|
||||
RestAPIClusterStatusInstallingPendingUserActionSeen: false,
|
||||
RestAPIClusterStatusInsufficientSeen: false,
|
||||
RestAPIClusterStatusFinalizingSeen: false,
|
||||
RestAPIClusterStatusErrorSeen: false,
|
||||
RestAPIClusterStatusPendingForInputSeen: false,
|
||||
RestAPIClusterStatusPreparingForInstallationSeen: false,
|
||||
RestAPIClusterStatusReadySeen: false,
|
||||
RestAPIInfraEnvEventList: nil,
|
||||
RestAPIPreviousClusterStatus: "",
|
||||
RestAPIPreviousEventMessage: "",
|
||||
RestAPIHostValidationsPassed: false,
|
||||
ClusterKubeAPISeen: false,
|
||||
ClusterBootstrapComplete: false,
|
||||
ClusterOperatorsInitialized: false,
|
||||
ClusterConsoleRouteCreated: false,
|
||||
ClusterConsoleRouteURLCreated: false,
|
||||
ClusterInstallComplete: false,
|
||||
}
|
||||
|
||||
czero.Ctx = ctx
|
||||
czero.API = capi
|
||||
czero.clusterID = nil
|
||||
czero.clusterInfraEnvID = nil
|
||||
czero.assetDir = assetDir
|
||||
czero.clusterConsoleRouteURL = ""
|
||||
czero.installHistory = cinstallstatushistory
|
||||
return czero, nil
|
||||
}
|
||||
|
||||
// IsBootstrapComplete Determine if the cluster has completed the bootstrap process.
|
||||
func (czero *Cluster) IsBootstrapComplete() (bool, error) {
|
||||
|
||||
if czero.installHistory.ClusterBootstrapComplete {
|
||||
logrus.Info("Bootstrap is complete")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
clusterKubeAPILive, clusterKubeAPIErr := czero.API.Kube.IsKubeAPILive()
|
||||
if clusterKubeAPIErr != nil {
|
||||
logrus.Trace(errors.Wrap(clusterKubeAPIErr, "Cluster Kube API is not available"))
|
||||
}
|
||||
|
||||
if clusterKubeAPILive {
|
||||
|
||||
// First time we see the cluster Kube API
|
||||
if !czero.installHistory.ClusterKubeAPISeen {
|
||||
logrus.Info("Cluster Kube API Initialized")
|
||||
czero.installHistory.ClusterKubeAPISeen = true
|
||||
}
|
||||
|
||||
configmap, err := czero.API.Kube.IsBootstrapConfigMapComplete()
|
||||
if configmap {
|
||||
logrus.Info("Bootstrap configMap status is complete")
|
||||
czero.installHistory.ClusterBootstrapComplete = true
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debug(err)
|
||||
}
|
||||
}
|
||||
|
||||
agentRestAPILive, agentRestAPIErr := czero.API.Rest.IsRestAPILive()
|
||||
if agentRestAPIErr != nil {
|
||||
logrus.Trace(errors.Wrap(agentRestAPIErr, "Agent Rest API is not available"))
|
||||
}
|
||||
|
||||
if agentRestAPILive {
|
||||
|
||||
// First time we see the agent Rest API
|
||||
if !czero.installHistory.RestAPISeen {
|
||||
logrus.Debug("Agent Rest API Initialized")
|
||||
czero.installHistory.RestAPISeen = true
|
||||
}
|
||||
|
||||
// Lazy loading of the clusterID and clusterInfraEnvID
|
||||
if czero.clusterID == nil {
|
||||
clusterID, err := czero.API.Rest.getClusterID()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Unable to retrieve clusterID from Agent Rest API")
|
||||
}
|
||||
czero.clusterID = clusterID
|
||||
}
|
||||
|
||||
if czero.clusterInfraEnvID == nil {
|
||||
clusterInfraEnvID, err := czero.API.Rest.getClusterInfraEnvID()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Unable to retrieve clusterInfraEnvID from Agent Rest API")
|
||||
}
|
||||
czero.clusterInfraEnvID = clusterInfraEnvID
|
||||
}
|
||||
|
||||
logrus.Trace("Getting cluster metadata from Agent Rest API")
|
||||
clusterMetadata, err := czero.GetClusterRestAPIMetadata()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Unable to retrieve cluster metadata from Agent Rest API")
|
||||
}
|
||||
|
||||
if clusterMetadata == nil {
|
||||
return false, errors.New("cluster metadata returned nil from Agent Rest API")
|
||||
}
|
||||
|
||||
if !checkHostsValidations(clusterMetadata, logrus.StandardLogger()) {
|
||||
return false, errors.New("cluster host validations failed")
|
||||
}
|
||||
|
||||
czero.PrintInstallStatus(clusterMetadata)
|
||||
czero.installHistory.RestAPIPreviousClusterStatus = *clusterMetadata.Status
|
||||
|
||||
// Update Install History object when we see these states
|
||||
czero.updateInstallHistoryClusterStatus(clusterMetadata)
|
||||
|
||||
installing, _ := czero.IsInstalling(*clusterMetadata.Status)
|
||||
if !installing {
|
||||
logrus.Warn("Cluster has stopped installing... working to recover installation")
|
||||
errored, _ := czero.HasErrored(*clusterMetadata.Status)
|
||||
if errored {
|
||||
return false, errors.New("cluster installation has stopped due to errors")
|
||||
} else if *clusterMetadata.Status == models.ClusterStatusCancelled {
|
||||
return false, errors.New("cluster installation was cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
// Print most recent event associated with the clusterInfraEnvID
|
||||
eventList, err := czero.API.Rest.GetInfraEnvEvents(czero.clusterInfraEnvID)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Unable to retrieve events about the cluster from the Agent Rest API")
|
||||
}
|
||||
if len(eventList) == 0 {
|
||||
logrus.Trace("No cluster events detected from the Agent Rest API")
|
||||
} else {
|
||||
mostRecentEvent := eventList[len(eventList)-1]
|
||||
// Don't print the same status message back to back
|
||||
if *mostRecentEvent.Message != czero.installHistory.RestAPIPreviousEventMessage {
|
||||
if *mostRecentEvent.Severity == models.EventSeverityInfo {
|
||||
logrus.Info(*mostRecentEvent.Message)
|
||||
} else {
|
||||
logrus.Warn(*mostRecentEvent.Message)
|
||||
}
|
||||
}
|
||||
czero.installHistory.RestAPIPreviousEventMessage = *mostRecentEvent.Message
|
||||
czero.installHistory.RestAPIInfraEnvEventList = eventList
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// both API's are not available
|
||||
if !agentRestAPILive && !clusterKubeAPILive {
|
||||
logrus.Trace("Current API Status: Node Zero Agent API: down, Cluster Kube API: down")
|
||||
if !czero.installHistory.RestAPISeen && !czero.installHistory.ClusterKubeAPISeen {
|
||||
logrus.Debug("Node zero Agent Rest API never initialized. Cluster API never initialized")
|
||||
logrus.Info("Waiting for cluster install to initialize. Sleeping for 30 seconds")
|
||||
time.Sleep(30 * time.Second)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if czero.installHistory.RestAPISeen && !czero.installHistory.ClusterKubeAPISeen {
|
||||
logrus.Debug("Cluster API never initialized")
|
||||
logrus.Debugf("Cluster install status from Agent Rest API last seen was: %s", czero.installHistory.RestAPIPreviousClusterStatus)
|
||||
return false, errors.New("cluster bootstrap did not complete")
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Trace("cluster bootstrap is not complete")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// IsInstallComplete Determine if the cluster has completed installation.
|
||||
func (czero *Cluster) IsInstallComplete() (bool, error) {
|
||||
|
||||
if czero.installHistory.ClusterInstallComplete {
|
||||
logrus.Info("Cluster installation is complete")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !czero.installHistory.ClusterOperatorsInitialized {
|
||||
initialized, err := czero.API.OpenShift.AreClusterOperatorsInitialized()
|
||||
if initialized && err == nil {
|
||||
czero.installHistory.ClusterOperatorsInitialized = true
|
||||
}
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Error while initializing cluster operators")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !czero.installHistory.ClusterConsoleRouteCreated {
|
||||
route, err := czero.API.OpenShift.IsConsoleRouteAvailable()
|
||||
if route && err == nil {
|
||||
czero.installHistory.ClusterConsoleRouteCreated = true
|
||||
}
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Error while waiting for console route")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !czero.installHistory.ClusterConsoleRouteURLCreated {
|
||||
available, url, err := czero.API.OpenShift.IsConsoleRouteURLAvailable()
|
||||
if available && url != "" && err == nil {
|
||||
czero.clusterConsoleRouteURL = url
|
||||
czero.installHistory.ClusterConsoleRouteURLCreated = true
|
||||
}
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Error while waiting for console route URL")
|
||||
}
|
||||
}
|
||||
|
||||
if czero.installHistory.ClusterOperatorsInitialized &&
|
||||
czero.installHistory.ClusterConsoleRouteCreated &&
|
||||
czero.installHistory.ClusterConsoleRouteURLCreated {
|
||||
czero.installHistory.ClusterInstallComplete = true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetClusterRestAPIMetadata Retrieve the current cluster metadata from the Agent Rest API
|
||||
func (czero *Cluster) GetClusterRestAPIMetadata() (*models.Cluster, error) {
|
||||
// GET /v2/clusters/{cluster_zero_id}
|
||||
if czero.clusterID != nil {
|
||||
getClusterParams := &installer.V2GetClusterParams{ClusterID: *czero.clusterID}
|
||||
result, err := czero.API.Rest.Client.Installer.V2GetCluster(czero.Ctx, getClusterParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result.Payload, nil
|
||||
}
|
||||
return nil, errors.New("no clusterID known for the cluster")
|
||||
}
|
||||
|
||||
// HasErrored Determine if the cluster installation has errored using the models from the Agent Rest API.
|
||||
func (czero *Cluster) HasErrored(status string) (bool, string) {
|
||||
clusterErrorStates := map[string]bool{
|
||||
models.ClusterStatusAddingHosts: false,
|
||||
models.ClusterStatusCancelled: false,
|
||||
models.ClusterStatusInstalling: false,
|
||||
models.ClusterStatusInstallingPendingUserAction: true,
|
||||
models.ClusterStatusInsufficient: true,
|
||||
models.ClusterStatusError: true,
|
||||
models.ClusterStatusFinalizing: false,
|
||||
models.ClusterStatusPendingForInput: false,
|
||||
models.ClusterStatusPreparingForInstallation: false,
|
||||
models.ClusterStatusReady: false,
|
||||
}
|
||||
return clusterErrorStates[status], status
|
||||
}
|
||||
|
||||
// IsInstalling Determine if the cluster is still installing using the models from the Agent Rest API.
|
||||
func (czero *Cluster) IsInstalling(status string) (bool, string) {
|
||||
clusterInstallingStates := map[string]bool{
|
||||
models.ClusterStatusAddingHosts: true,
|
||||
models.ClusterStatusCancelled: false,
|
||||
models.ClusterStatusInstalling: true,
|
||||
models.ClusterStatusInstallingPendingUserAction: false,
|
||||
models.ClusterStatusInsufficient: false,
|
||||
models.ClusterStatusError: false,
|
||||
models.ClusterStatusFinalizing: true,
|
||||
models.ClusterStatusPendingForInput: true,
|
||||
models.ClusterStatusPreparingForInstallation: true,
|
||||
models.ClusterStatusReady: true,
|
||||
}
|
||||
return clusterInstallingStates[status], status
|
||||
}
|
||||
|
||||
// PrintInfraEnvRestAPIEventList Prints the whole event list for debugging
|
||||
func (czero *Cluster) PrintInfraEnvRestAPIEventList() {
|
||||
if czero.installHistory.RestAPIInfraEnvEventList != nil {
|
||||
for i := 0; i < len(czero.installHistory.RestAPIInfraEnvEventList); i++ {
|
||||
logrus.Debug(*czero.installHistory.RestAPIInfraEnvEventList[i].Message)
|
||||
}
|
||||
} else {
|
||||
logrus.Debug("No events logged from the Agent Rest API")
|
||||
}
|
||||
}
|
||||
|
||||
// PrintInstallationComplete Prints the installation complete information
|
||||
func (czero *Cluster) PrintInstallationComplete() error {
|
||||
absDir, err := filepath.Abs(czero.assetDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeconfig := filepath.Join(absDir, "auth", "kubeconfig")
|
||||
logrus.Info("Install complete!")
|
||||
logrus.Infof("To access the cluster as the system:admin user when using 'oc', run\n export KUBECONFIG=%s", kubeconfig)
|
||||
logrus.Infof("Access the OpenShift web-console here: %s", czero.clusterConsoleRouteURL)
|
||||
// TODO: log kubeadmin password for the console
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// PrintInstallStatus Print a human friendly message using the models from the Agent Rest API.
|
||||
func (czero *Cluster) PrintInstallStatus(cluster *models.Cluster) error {
|
||||
|
||||
friendlyStatus := humanFriendlyClusterInstallStatus(*cluster.Status)
|
||||
logrus.Trace(friendlyStatus)
|
||||
// Don't print the same status message back to back
|
||||
if *cluster.Status != czero.installHistory.RestAPIPreviousClusterStatus {
|
||||
logrus.Info(friendlyStatus)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Human friendly install status strings mapped to the Agent Rest API cluster statuses
|
||||
func humanFriendlyClusterInstallStatus(status string) string {
|
||||
clusterStoppedInstallingStates := map[string]string{
|
||||
models.ClusterStatusAddingHosts: "Cluster is adding hosts",
|
||||
models.ClusterStatusCancelled: "Cluster installation cancelled",
|
||||
models.ClusterStatusError: "Cluster has hosts in error",
|
||||
models.ClusterStatusFinalizing: "Finalizing cluster installation",
|
||||
models.ClusterStatusInstalling: "Cluster installation in progress",
|
||||
models.ClusterStatusInstallingPendingUserAction: "Cluster has hosts requiring user input",
|
||||
models.ClusterStatusInsufficient: "Cluster is not ready for install. Check host validations",
|
||||
models.ClusterStatusPendingForInput: "User input is required to continue cluster installation",
|
||||
models.ClusterStatusPreparingForInstallation: "Preparing cluster for installation",
|
||||
models.ClusterStatusReady: "Cluster is ready for install",
|
||||
}
|
||||
return clusterStoppedInstallingStates[status]
|
||||
|
||||
}
|
||||
|
||||
// Update the install history struct when we see the status from the Agent Rest API
|
||||
func (czero *Cluster) updateInstallHistoryClusterStatus(cluster *models.Cluster) {
|
||||
switch *cluster.Status {
|
||||
case models.ClusterStatusAddingHosts:
|
||||
czero.installHistory.RestAPIClusterStatusAddingHostsSeen = true
|
||||
case models.ClusterStatusCancelled:
|
||||
czero.installHistory.RestAPIClusterStatusCancelledSeen = true
|
||||
case models.ClusterStatusError:
|
||||
czero.installHistory.RestAPIClusterStatusErrorSeen = true
|
||||
case models.ClusterStatusFinalizing:
|
||||
czero.installHistory.RestAPIClusterStatusFinalizingSeen = true
|
||||
case models.ClusterStatusInsufficient:
|
||||
czero.installHistory.RestAPIClusterStatusInsufficientSeen = true
|
||||
case models.ClusterStatusInstalling:
|
||||
czero.installHistory.RestAPIClusterStatusInstallingSeen = true
|
||||
case models.ClusterStatusInstallingPendingUserAction:
|
||||
czero.installHistory.RestAPIClusterStatusInstallingPendingUserActionSeen = true
|
||||
case models.ClusterStatusPendingForInput:
|
||||
czero.installHistory.RestAPIClusterStatusPendingForInputSeen = true
|
||||
case models.ClusterStatusPreparingForInstallation:
|
||||
czero.installHistory.RestAPIClusterStatusPreparingForInstallationSeen = true
|
||||
case models.ClusterStatusReady:
|
||||
czero.installHistory.RestAPIClusterStatusReadySeen = true
|
||||
}
|
||||
}
|
||||
90
pkg/agent/kube.go
Normal file
90
pkg/agent/kube.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// ClusterKubeAPIClient is a kube client to interact with the cluster that agent installer is installing.
|
||||
type ClusterKubeAPIClient struct {
|
||||
Client *kubernetes.Clientset
|
||||
ctx context.Context
|
||||
config *rest.Config
|
||||
configPath string
|
||||
}
|
||||
|
||||
// NewClusterKubeAPIClient Create a new kube client to interact with the cluster under install.
|
||||
func NewClusterKubeAPIClient(ctx context.Context, assetDir string) (*ClusterKubeAPIClient, error) {
|
||||
|
||||
kubeClient := &ClusterKubeAPIClient{}
|
||||
|
||||
kubeconfigpath := filepath.Join(assetDir, "auth", "kubeconfig")
|
||||
kubeconfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error loading kubeconfig from assets")
|
||||
}
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating a Kubernetes client from assets failed")
|
||||
}
|
||||
|
||||
kubeClient.Client = kubeclient
|
||||
kubeClient.ctx = ctx
|
||||
kubeClient.config = kubeconfig
|
||||
kubeClient.configPath = kubeconfigpath
|
||||
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
||||
// IsKubeAPILive Determine if the cluster under install has initailized the kubenertes API.
|
||||
func (kube *ClusterKubeAPIClient) IsKubeAPILive() (bool, error) {
|
||||
|
||||
discovery := kube.Client.Discovery()
|
||||
version, err := discovery.ServerVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
logrus.Debugf("cluster API is up and running %s", version)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DoesKubeConfigExist Determine if the kubeconfig for the cluster can be used without errors.
|
||||
func (kube *ClusterKubeAPIClient) DoesKubeConfigExist() (bool, error) {
|
||||
|
||||
_, err := clientcmd.LoadFromFile(kube.configPath)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "error loading kubeconfig from file")
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsBootstrapConfigMapComplete Detemine if the cluster's bootstrap configmap has the status complete.
|
||||
func (kube *ClusterKubeAPIClient) IsBootstrapConfigMapComplete() (bool, error) {
|
||||
|
||||
// Get latest version of bootstrap configmap
|
||||
bootstrap, err := kube.Client.CoreV1().ConfigMaps("kube-system").Get(kube.ctx, "bootstrap", v1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "bootstrap configmap not found")
|
||||
}
|
||||
// Found a bootstrap configmap need to check its status
|
||||
if bootstrap != nil {
|
||||
status, ok := bootstrap.Data["status"]
|
||||
if !ok {
|
||||
logrus.Debug("no status found in bootstrap configmap")
|
||||
return false, nil
|
||||
}
|
||||
if status == "complete" {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
152
pkg/agent/ocp.go
Normal file
152
pkg/agent/ocp.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
configclient "github.com/openshift/client-go/config/clientset/versioned"
|
||||
routeclient "github.com/openshift/client-go/route/clientset/versioned"
|
||||
cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers"
|
||||
"github.com/openshift/library-go/pkg/route/routeapihelpers"
|
||||
)
|
||||
|
||||
// ClusterOpenShiftAPIClient Kube client using the OpenShift clientset instead of the Kubernetes clientset
|
||||
type ClusterOpenShiftAPIClient struct {
|
||||
ConfigClient *configclient.Clientset
|
||||
RouteClient *routeclient.Clientset
|
||||
ctx context.Context
|
||||
config *rest.Config
|
||||
configPath string
|
||||
}
|
||||
|
||||
const (
|
||||
// Need to keep these updated if they change
|
||||
consoleNamespace = "openshift-console"
|
||||
consoleRouteName = "console"
|
||||
)
|
||||
|
||||
// NewClusterOpenShiftAPIClient Create a kube client with OCP understanding
|
||||
func NewClusterOpenShiftAPIClient(ctx context.Context, assetDir string) (*ClusterOpenShiftAPIClient, error) {
|
||||
|
||||
ocpClient := &ClusterOpenShiftAPIClient{}
|
||||
|
||||
kubeconfigpath := filepath.Join(assetDir, "auth", "kubeconfig")
|
||||
kubeconfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating kubeconfig for ocp config client")
|
||||
}
|
||||
|
||||
configClient, err := configclient.NewForConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating an ocp config client")
|
||||
}
|
||||
|
||||
routeClient, err := routeclient.NewForConfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating an ocp route client")
|
||||
}
|
||||
|
||||
ocpClient.ConfigClient = configClient
|
||||
ocpClient.RouteClient = routeClient
|
||||
ocpClient.ctx = ctx
|
||||
ocpClient.config = kubeconfig
|
||||
ocpClient.configPath = kubeconfigpath
|
||||
|
||||
return ocpClient, nil
|
||||
|
||||
}
|
||||
|
||||
// AreClusterOperatorsInitialized Waits for all Openshift cluster operators to initialize
|
||||
func (ocp *ClusterOpenShiftAPIClient) AreClusterOperatorsInitialized() (bool, error) {
|
||||
|
||||
var lastError string
|
||||
failing := configv1.ClusterStatusConditionType("Failing")
|
||||
|
||||
version, err := ocp.ConfigClient.ConfigV1().ClusterVersions().Get(ocp.ctx, "version", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Getting ClusterVersion object")
|
||||
}
|
||||
|
||||
if cov1helpers.IsStatusConditionTrue(version.Status.Conditions, configv1.OperatorAvailable) &&
|
||||
cov1helpers.IsStatusConditionFalse(version.Status.Conditions, failing) &&
|
||||
cov1helpers.IsStatusConditionFalse(version.Status.Conditions, configv1.OperatorProgressing) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if cov1helpers.IsStatusConditionTrue(version.Status.Conditions, failing) {
|
||||
lastError = cov1helpers.FindStatusCondition(version.Status.Conditions, failing).Message
|
||||
} else if cov1helpers.IsStatusConditionTrue(version.Status.Conditions, configv1.OperatorProgressing) {
|
||||
lastError = cov1helpers.FindStatusCondition(version.Status.Conditions, configv1.OperatorProgressing).Message
|
||||
}
|
||||
logrus.Debugf("Still waiting for the cluster to initialize: %s", lastError)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// IsConsoleRouteAvailable Check if the OCP console route is created
|
||||
func (ocp *ClusterOpenShiftAPIClient) IsConsoleRouteAvailable() (bool, error) {
|
||||
route, err := ocp.RouteClient.RouteV1().Routes(consoleNamespace).Get(ocp.ctx, consoleRouteName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
logrus.Debugf("Route found in openshift-console namespace: %s", consoleRouteName)
|
||||
if _, _, err2 := routeapihelpers.IngressURI(route, ""); err2 == nil {
|
||||
logrus.Debug("OpenShift console route is admitted")
|
||||
return true, nil
|
||||
} else if err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
return false, errors.Wrap(err, "Waiting for openshift-console route")
|
||||
|
||||
}
|
||||
|
||||
// IsConsoleRouteURLAvailable Check if the console route URL is available
|
||||
func (ocp *ClusterOpenShiftAPIClient) IsConsoleRouteURLAvailable() (bool, string, error) {
|
||||
url := ""
|
||||
route, err := ocp.RouteClient.RouteV1().Routes(consoleNamespace).Get(ocp.ctx, consoleRouteName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if uri, _, err2 := routeapihelpers.IngressURI(route, ""); err2 == nil {
|
||||
url = uri.String()
|
||||
} else {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
if url == "" {
|
||||
return false, url, errors.Wrap(err, "Waiting for openshift-console URL")
|
||||
}
|
||||
return true, url, nil
|
||||
}
|
||||
|
||||
// LogClusterOperatorConditions Log OCP cluster operator conditions
|
||||
func (ocp *ClusterOpenShiftAPIClient) LogClusterOperatorConditions() error {
|
||||
|
||||
operators, err := ocp.ConfigClient.ConfigV1().ClusterOperators().List(ocp.ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Listing ClusterOperator objects")
|
||||
}
|
||||
|
||||
for _, operator := range operators.Items {
|
||||
for _, condition := range operator.Status.Conditions {
|
||||
if condition.Type == configv1.OperatorUpgradeable {
|
||||
continue
|
||||
} else if condition.Type == configv1.OperatorAvailable && condition.Status == configv1.ConditionTrue {
|
||||
continue
|
||||
} else if (condition.Type == configv1.OperatorDegraded || condition.Type == configv1.OperatorProgressing) && condition.Status == configv1.ConditionFalse {
|
||||
continue
|
||||
}
|
||||
if condition.Type == configv1.OperatorDegraded {
|
||||
logrus.Errorf("Cluster operator %s %s is %s with %s: %s", operator.ObjectMeta.Name, condition.Type, condition.Status, condition.Reason, condition.Message)
|
||||
} else {
|
||||
logrus.Infof("Cluster operator %s %s is %s with %s: %s", operator.ObjectMeta.Name, condition.Type, condition.Status, condition.Reason, condition.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
156
pkg/agent/rest.go
Normal file
156
pkg/agent/rest.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/assisted-service/client"
|
||||
"github.com/openshift/assisted-service/client/events"
|
||||
"github.com/openshift/assisted-service/client/installer"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset/agent/agentconfig"
|
||||
"github.com/openshift/installer/pkg/asset/agent/image"
|
||||
"github.com/openshift/installer/pkg/asset/agent/manifests"
|
||||
assetstore "github.com/openshift/installer/pkg/asset/store"
|
||||
"github.com/openshift/installer/pkg/types/agent"
|
||||
)
|
||||
|
||||
// NodeZeroRestClient is a struct to interact with the Agent Rest API that is on node zero.
|
||||
type NodeZeroRestClient struct {
|
||||
Client *client.AssistedInstall
|
||||
ctx context.Context
|
||||
config client.Config
|
||||
NodeZeroIP string
|
||||
}
|
||||
|
||||
// NewNodeZeroRestClient Initialize a new rest client to interact with the Agent Rest API on node zero.
|
||||
func NewNodeZeroRestClient(ctx context.Context, assetDir string) (*NodeZeroRestClient, error) {
|
||||
restClient := &NodeZeroRestClient{}
|
||||
agentConfigAsset := &agentconfig.AgentConfig{}
|
||||
agentManifestsAsset := &manifests.AgentManifests{}
|
||||
|
||||
assetStore, err := assetstore.NewStore(assetDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create asset store")
|
||||
}
|
||||
|
||||
agentConfig, agentConfigError := assetStore.Load(agentConfigAsset)
|
||||
agentManifests, manifestError := assetStore.Load(agentManifestsAsset)
|
||||
|
||||
if agentConfigError != nil {
|
||||
logrus.Debug(errors.Wrapf(agentConfigError, "failed to load %s", agentConfigAsset.Name()))
|
||||
}
|
||||
if manifestError != nil {
|
||||
logrus.Debug(errors.Wrapf(manifestError, "failed to load %s", agentManifestsAsset.Name()))
|
||||
}
|
||||
if agentConfigError != nil || manifestError != nil {
|
||||
return nil, errors.New("failed to load AgentConfig or NMStateConfig")
|
||||
}
|
||||
|
||||
var RendezvousIP string
|
||||
var rendezvousIPError error
|
||||
var emptyNMStateConfigs []*v1beta1.NMStateConfig
|
||||
|
||||
if agentConfig != nil && agentManifests != nil {
|
||||
RendezvousIP, rendezvousIPError = image.RetrieveRendezvousIP(agentConfig.(*agentconfig.AgentConfig).Config, agentManifests.(*manifests.AgentManifests).NMStateConfigs)
|
||||
} else if agentConfig == nil && agentManifests != nil {
|
||||
RendezvousIP, rendezvousIPError = image.RetrieveRendezvousIP(&agent.Config{}, agentManifests.(*manifests.AgentManifests).NMStateConfigs)
|
||||
} else if agentConfig != nil && agentManifests == nil {
|
||||
RendezvousIP, rendezvousIPError = image.RetrieveRendezvousIP(agentConfig.(*agentconfig.AgentConfig).Config, emptyNMStateConfigs)
|
||||
} else {
|
||||
return nil, errors.New("both AgentConfig and NMStateConfig are empty")
|
||||
}
|
||||
if rendezvousIPError != nil {
|
||||
return nil, rendezvousIPError
|
||||
}
|
||||
|
||||
config := client.Config{}
|
||||
config.URL = &url.URL{
|
||||
Scheme: "http",
|
||||
Host: net.JoinHostPort(RendezvousIP, "8090"),
|
||||
Path: client.DefaultBasePath,
|
||||
}
|
||||
client := client.New(config)
|
||||
|
||||
restClient.Client = client
|
||||
restClient.ctx = ctx
|
||||
restClient.config = config
|
||||
restClient.NodeZeroIP = RendezvousIP
|
||||
|
||||
return restClient, nil
|
||||
}
|
||||
|
||||
// IsRestAPILive Determine if the Agent Rest API on node zero has initialized
|
||||
func (rest *NodeZeroRestClient) IsRestAPILive() (bool, error) {
|
||||
// GET /v2/infraenvs
|
||||
listInfraEnvsParams := installer.NewListInfraEnvsParams()
|
||||
_, err := rest.Client.Installer.ListInfraEnvs(rest.ctx, listInfraEnvsParams)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetRestAPIServiceBaseURL Return the url of the Agent Rest API on node zero
|
||||
func (rest *NodeZeroRestClient) GetRestAPIServiceBaseURL() *url.URL {
|
||||
return rest.config.URL
|
||||
}
|
||||
|
||||
// GetInfraEnvEvents Return the event list for the provided infraEnvID from the Agent Rest API
|
||||
func (rest *NodeZeroRestClient) GetInfraEnvEvents(infraEnvID *strfmt.UUID) (models.EventList, error) {
|
||||
listEventsParams := &events.V2ListEventsParams{InfraEnvID: infraEnvID}
|
||||
clusterEventsResult, err := rest.Client.Events.V2ListEvents(rest.ctx, listEventsParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clusterEventsResult.Payload, nil
|
||||
}
|
||||
|
||||
// getClusterID Return the cluster ID assigned by the Agent Rest API
|
||||
func (rest *NodeZeroRestClient) getClusterID() (*strfmt.UUID, error) {
|
||||
// GET /v2/clusters and return first result
|
||||
listClusterParams := installer.NewV2ListClustersParams()
|
||||
clusterResult, err := rest.Client.Installer.V2ListClusters(rest.ctx, listClusterParams)
|
||||
clusterList := clusterResult.Payload
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(clusterList) == 1 {
|
||||
clusterID := clusterList[0].ID
|
||||
return clusterID, nil
|
||||
} else if len(clusterList) == 0 {
|
||||
logrus.Debug("cluster is not registered in rest API")
|
||||
return nil, nil
|
||||
} else {
|
||||
logrus.Infof("found too many clusters. number of clusters found: %d", len(clusterList))
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getClusterID Return the infraEnv ID associated with the cluster in the Agent Rest API
|
||||
func (rest *NodeZeroRestClient) getClusterInfraEnvID() (*strfmt.UUID, error) {
|
||||
// GET /v2/infraenvs and return first result
|
||||
listInfraEnvParams := installer.NewListInfraEnvsParams()
|
||||
infraEnvResult, err := rest.Client.Installer.ListInfraEnvs(rest.ctx, listInfraEnvParams)
|
||||
infraEnvList := infraEnvResult.Payload
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(infraEnvList) == 1 {
|
||||
clusterInfraEnvID := infraEnvList[0].ID
|
||||
return clusterInfraEnvID, nil
|
||||
} else if len(infraEnvList) == 0 {
|
||||
logrus.Debug("infraenv is not registered in rest API")
|
||||
return nil, nil
|
||||
} else {
|
||||
logrus.Infof("found too many infraenvs. number of infraenvs found: %d", len(infraEnvList))
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
139
pkg/agent/validations.go
Normal file
139
pkg/agent/validations.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/openshift/assisted-service/api/common"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
validationFailure string = "failure"
|
||||
validationError string = "error"
|
||||
)
|
||||
|
||||
// Re-using Assisted UI host validation labels (see https://github.com/openshift-assisted/assisted-ui-lib)
|
||||
// for logging human-friendly messages in case of validation failures
|
||||
var hostValidationLabels = map[string]string{
|
||||
"odf-requirements-satisfied": "ODF requirements",
|
||||
"disk-encryption-requirements-satisfied": "Disk encryption requirements",
|
||||
"compatible-with-cluster-platform": "",
|
||||
"has-default-route": "Default route to host",
|
||||
"sufficient-network-latency-requirement-for-role": "Network latency",
|
||||
"sufficient-packet-loss-requirement-for-role": "Packet loss",
|
||||
"has-inventory": "Hardware information",
|
||||
"has-min-cpu-cores": "Minimum CPU cores",
|
||||
"has-min-memory": "Minimum Memory",
|
||||
"has-min-valid-disks": "Minimum disks of required size",
|
||||
"has-cpu-cores-for-role": "Minimum CPU cores for selected role",
|
||||
"has-memory-for-role": "Minimum memory for selected role",
|
||||
"hostname-unique": "Unique hostname",
|
||||
"hostname-valid": "Valid hostname",
|
||||
"connected": "Connected",
|
||||
"media-connected": "Media Connected",
|
||||
"machine-cidr-defined": "Machine CIDR",
|
||||
"belongs-to-machine-cidr": "Belongs to machine CIDR",
|
||||
"ignition-downloadable": "Ignition file downloadable",
|
||||
"belongs-to-majority-group": "Belongs to majority connected group",
|
||||
"valid-platform-network-settings": "Platform network settings",
|
||||
"ntp-synced": "NTP synchronization",
|
||||
"container-images-available": "Container images availability",
|
||||
"lso-requirements-satisfied": "LSO requirements",
|
||||
"ocs-requirements-satisfied": "OCS requirements",
|
||||
"sufficient-installation-disk-speed": "Installation disk speed",
|
||||
"cnv-requirements-satisfied": "CNV requirements",
|
||||
"api-domain-name-resolved-correctly": "API domain name resolution",
|
||||
"api-int-domain-name-resolved-correctly": "API internal domain name resolution",
|
||||
"apps-domain-name-resolved-correctly": "Application ingress domain name resolution",
|
||||
"dns-wildcard-not-configured": "DNS wildcard not configured",
|
||||
"non-overlapping-subnets": "Non overlapping subnets",
|
||||
"vsphere-disk-uuid-enabled": "Vsphere disk uuidenabled",
|
||||
}
|
||||
|
||||
type validationTrace struct {
|
||||
header string
|
||||
category string
|
||||
label string
|
||||
message string
|
||||
}
|
||||
|
||||
var previousValidations []validationTrace
|
||||
|
||||
func logValidationsStatus(errorMsg string, validations string, log *logrus.Logger) []validationTrace {
|
||||
|
||||
traces := []validationTrace{}
|
||||
if validations == "" {
|
||||
return traces
|
||||
}
|
||||
|
||||
validationsInfo := common.ValidationsStatus{}
|
||||
err := json.Unmarshal([]byte(validations), &validationsInfo)
|
||||
if err != nil {
|
||||
return []validationTrace{{header: errorMsg, message: "unable to verify validations"}}
|
||||
}
|
||||
|
||||
for category, validationResults := range validationsInfo {
|
||||
for _, r := range validationResults {
|
||||
switch r.Status {
|
||||
case validationFailure, validationError:
|
||||
label := r.ID
|
||||
if v, ok := hostValidationLabels[r.ID]; ok {
|
||||
label = v
|
||||
}
|
||||
|
||||
traces = append(traces, validationTrace{
|
||||
header: errorMsg,
|
||||
category: category,
|
||||
label: label,
|
||||
message: r.Message,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return traces
|
||||
}
|
||||
|
||||
func checkHostsValidations(cluster *models.Cluster, log *logrus.Logger) bool {
|
||||
|
||||
var currentValidations []validationTrace
|
||||
|
||||
currentValidations = append(currentValidations, logValidationsStatus("Validation failure found for cluster", cluster.ValidationsInfo, log)...)
|
||||
for _, h := range cluster.Hosts {
|
||||
currentValidations = append(currentValidations, logValidationsStatus(fmt.Sprintf("Validation failure found for %s", h.RequestedHostname), h.ValidationsInfo, log)...)
|
||||
}
|
||||
|
||||
sort.Slice(currentValidations, func(i, j int) bool {
|
||||
if currentValidations[i].header != currentValidations[j].header {
|
||||
return currentValidations[i].header < currentValidations[j].header
|
||||
}
|
||||
if currentValidations[i].category != currentValidations[j].category {
|
||||
return currentValidations[i].category < currentValidations[j].category
|
||||
}
|
||||
return currentValidations[i].label < currentValidations[j].label
|
||||
})
|
||||
|
||||
if !reflect.DeepEqual(currentValidations, previousValidations) {
|
||||
previousValidations = currentValidations
|
||||
|
||||
if len(previousValidations) == 0 {
|
||||
log.Info("Pre-installation validations are OK")
|
||||
return true
|
||||
}
|
||||
|
||||
log.Info("Checking for validation failures ----------------------------------------------")
|
||||
for _, v := range previousValidations {
|
||||
log.WithFields(logrus.Fields{
|
||||
"category": v.category,
|
||||
"label": v.label,
|
||||
"message": v.message,
|
||||
}).Error(v.header)
|
||||
}
|
||||
}
|
||||
|
||||
return len(previousValidations) == 0
|
||||
}
|
||||
106
pkg/agent/validations_test.go
Normal file
106
pkg/agent/validations_test.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/openshift/assisted-service/models"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCheckHostsValidation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hosts []*models.Host
|
||||
expectedResult bool
|
||||
expectedLogs []string
|
||||
}{
|
||||
{
|
||||
name: "no-validations",
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "no-failures",
|
||||
hosts: []*models.Host{
|
||||
{
|
||||
RequestedHostname: "master-0.ostest.test.metalkube.org",
|
||||
ValidationsInfo: "{\"hardware\":[{\"id\":\"has-inventory\",\"status\":\"success\",\"message\":\"Valid inventory exists for the host\"}]}",
|
||||
},
|
||||
},
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "single-host-failure",
|
||||
hosts: []*models.Host{
|
||||
{
|
||||
RequestedHostname: "master-0.ostest.test.metalkube.org",
|
||||
ValidationsInfo: `{"hardware":[{"id":"has-min-valid-disks","status":"failure","message":"No eligible disks were found, please check specific disks to see why they are not eligible"},{"id":"has-cpu-cores-for-role","status":"success","message":"Sufficient CPU cores for role master"},{"id":"has-memory-for-role","status":"success","message":"Sufficient RAM for role master"}]}`,
|
||||
},
|
||||
},
|
||||
expectedResult: false,
|
||||
expectedLogs: []string{
|
||||
`Checking for validation failures ----------------------------------------------`,
|
||||
`level=error msg="Validation failure found for master\-0.ostest.test.metalkube.org" category=hardware label="Minimum disks of required size" message="No eligible disks were found, please check specific disks to see why they are not eligible"`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple-hosts-failure",
|
||||
hosts: []*models.Host{
|
||||
{
|
||||
RequestedHostname: "master-0.ostest.test.metalkube.org",
|
||||
ValidationsInfo: `{"hardware":[{"id":"has-min-valid-disks","status":"failure","message":"No eligible disks were found, please check specific disks to see why they are not eligible"},{"id":"has-cpu-cores-for-role","status":"success","message":"Sufficient CPU cores for role master"},{"id":"has-memory-for-role","status":"success","message":"Sufficient RAM for role master"}]}`,
|
||||
},
|
||||
{
|
||||
RequestedHostname: "master-1.ostest.test.metalkube.org",
|
||||
ValidationsInfo: `{"hardware":[{"id":"has-min-valid-disks","status":"failure","message":"No eligible disks were found, please check specific disks to see why they are not eligible"},{"id":"has-cpu-cores-for-role","status":"success","message":"Sufficient CPU cores for role master"},{"id":"has-memory-for-role","status":"success","message":"Sufficient RAM for role master"}]}`,
|
||||
},
|
||||
},
|
||||
expectedResult: false,
|
||||
expectedLogs: []string{
|
||||
`Checking for validation failures ----------------------------------------------`,
|
||||
`level=error msg="Validation failure found for master\-0.ostest.test.metalkube.org" category=hardware label="Minimum disks of required size" message="No eligible disks were found, please check specific disks to see why they are not eligible"`,
|
||||
`level=error msg="Validation failure found for master\-1.ostest.test.metalkube.org" category=hardware label="Minimum disks of required size" message="No eligible disks were found, please check specific disks to see why they are not eligible"`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "malformed-json",
|
||||
hosts: []*models.Host{
|
||||
{
|
||||
RequestedHostname: "master-0.ostest.test.metalkube.org",
|
||||
ValidationsInfo: `not a valid info`,
|
||||
},
|
||||
},
|
||||
expectedResult: false,
|
||||
expectedLogs: []string{
|
||||
`Checking for validation failures ----------------------------------------------"`,
|
||||
`Validation failure found for master-0.ostest.test.metalkube.org`,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
cluster := &models.Cluster{
|
||||
Hosts: tt.hosts,
|
||||
}
|
||||
|
||||
logger, hook := test.NewNullLogger()
|
||||
assert.Equal(t, tt.expectedResult, checkHostsValidations(cluster, logger))
|
||||
|
||||
assert.Equal(t, len(tt.expectedLogs), len(hook.Entries))
|
||||
for _, expectedMsg := range tt.expectedLogs {
|
||||
|
||||
matchFound := false
|
||||
for _, s := range hook.AllEntries() {
|
||||
logLine, err := s.String()
|
||||
assert.NoError(t, err)
|
||||
if regexp.MustCompile(expectedMsg).Match([]byte(logLine)) {
|
||||
matchFound = true
|
||||
}
|
||||
}
|
||||
assert.True(t, matchFound, "Unable to find log trace for `%s`", expectedMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
88
pkg/agent/waitfor.go
Normal file
88
pkg/agent/waitfor.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// WaitForBootstrapComplete Wait for the bootstrap process to complete on
|
||||
// cluster installations triggered by the agent installer.
|
||||
func WaitForBootstrapComplete(assetDir string) (*Cluster, error) {
|
||||
|
||||
ctx := context.Background()
|
||||
cluster, err := NewCluster(ctx, assetDir)
|
||||
if err != nil {
|
||||
logrus.Warn("unable to make cluster object to track installation")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
previous := time.Now()
|
||||
timeout := 30 * time.Minute
|
||||
waitContext, cancel := context.WithTimeout(cluster.Ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
wait.Until(func() {
|
||||
bootstrap, err := cluster.IsBootstrapComplete()
|
||||
if bootstrap && err == nil {
|
||||
logrus.Info("cluster bootstrap is complete")
|
||||
cancel()
|
||||
}
|
||||
|
||||
current := time.Now()
|
||||
elapsed := current.Sub(previous)
|
||||
elapsedTotal := current.Sub(start)
|
||||
if elapsed >= 1*time.Minute {
|
||||
logrus.Tracef("elapsed: %s, elapsedTotal: %s", elapsed.String(), elapsedTotal.String())
|
||||
previous = current
|
||||
}
|
||||
|
||||
}, 2*time.Second, waitContext.Done())
|
||||
|
||||
waitErr := waitContext.Err()
|
||||
if waitErr != nil && waitErr != context.Canceled {
|
||||
if err != nil {
|
||||
return cluster, errors.Wrap(err, "bootstrap process returned error")
|
||||
}
|
||||
return cluster, errors.Wrap(waitErr, "bootstrap process timed out")
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// WaitForInstallComplete Waits for the cluster installation triggered by the
|
||||
// agent installer to be complete.
|
||||
func WaitForInstallComplete(assetDir string) (*Cluster, error) {
|
||||
|
||||
cluster, err := WaitForBootstrapComplete(assetDir)
|
||||
|
||||
if err != nil {
|
||||
return cluster, errors.Wrap(err, "error occured during bootstrap process")
|
||||
}
|
||||
|
||||
timeout := 90 * time.Minute
|
||||
waitContext, cancel := context.WithTimeout(cluster.Ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
wait.Until(func() {
|
||||
installed, err := cluster.IsInstallComplete()
|
||||
if installed && err == nil {
|
||||
logrus.Info("Cluster is installed")
|
||||
cancel()
|
||||
}
|
||||
|
||||
}, 2*time.Second, waitContext.Done())
|
||||
|
||||
waitErr := waitContext.Err()
|
||||
if waitErr != nil && waitErr != context.Canceled {
|
||||
if err != nil {
|
||||
return cluster, errors.Wrap(err, "Error occurred during installation")
|
||||
}
|
||||
return cluster, errors.Wrap(waitErr, "Cluster installation timed out")
|
||||
}
|
||||
return cluster, nil
|
||||
}
|
||||
7
pkg/asset/agent/OWNERS
Normal file
7
pkg/asset/agent/OWNERS
Normal file
@@ -0,0 +1,7 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- agent-approvers
|
||||
reviewers:
|
||||
- agent-reviewers
|
||||
278
pkg/asset/agent/agentconfig/agent_config.go
Normal file
278
pkg/asset/agent/agentconfig/agent_config.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package agentconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/types/agent"
|
||||
"github.com/openshift/installer/pkg/types/agent/conversion"
|
||||
)
|
||||
|
||||
var (
|
||||
agentConfigFilename = "agent-config.yaml"
|
||||
)
|
||||
|
||||
// AgentConfig reads the agent-config.yaml file.
|
||||
type AgentConfig struct {
|
||||
File *asset.File
|
||||
Config *agent.Config
|
||||
Template string
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*AgentConfig)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*AgentConfig) Name() string {
|
||||
return "Agent Config"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*AgentConfig) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{}
|
||||
}
|
||||
|
||||
// Generate generates the Agent Config manifest.
|
||||
func (a *AgentConfig) Generate(dependencies asset.Parents) error {
|
||||
|
||||
// TODO: We are temporarily generating a template of the agent-config.yaml
|
||||
// Change this when its interactive survey is implemented.
|
||||
agentConfigTemplate := `#
|
||||
# Note: This is a sample AgentConfig file showing
|
||||
# which fields are available to aid you in creating your
|
||||
# own agent-config.yaml file.
|
||||
#
|
||||
apiVersion: v1alpha1
|
||||
kind: AgentConfig
|
||||
metadata:
|
||||
name: example-agent-config
|
||||
namespace: cluster0
|
||||
# All fields are optional
|
||||
rendezvousIP: your-node0-ip
|
||||
hosts:
|
||||
# If a host is listed, then at least one interface
|
||||
# needs to be specified.
|
||||
- hostname: change-to-hostname
|
||||
role: master
|
||||
# For more information about rootDeviceHints:
|
||||
# https://docs.openshift.com/container-platform/4.10/installing/installing_bare_metal_ipi/ipi-install-installation-workflow.html#root-device-hints_ipi-install-installation-workflow
|
||||
rootDeviceHints:
|
||||
deviceName: /dev/sda
|
||||
interfaces are used to identify the host to apply this configuration to
|
||||
interfaces:
|
||||
- macAddress: 00:00:00:00:00:00
|
||||
name: host-network-interface-name
|
||||
# networkConfig contains the network configuration for the host in NMState format.
|
||||
# See https://nmstate.io/examples.html for examples.
|
||||
networkConfig:
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
state: up
|
||||
mac-address: 00:00:00:00:00:00
|
||||
ipv4:
|
||||
enabled: true
|
||||
address:
|
||||
- ip: 192.168.122.2
|
||||
prefix-length: 23
|
||||
dhcp: false
|
||||
`
|
||||
|
||||
a.Template = agentConfigTemplate
|
||||
|
||||
// TODO: template is not validated
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistToFile writes the agent-config.yaml file to the assets folder
|
||||
func (a *AgentConfig) PersistToFile(directory string) error {
|
||||
templatePath := filepath.Join(directory, agentConfigFilename)
|
||||
templateByte := []byte(a.Template)
|
||||
|
||||
err := os.WriteFile(templatePath, templateByte, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (a *AgentConfig) Files() []*asset.File {
|
||||
if a.File != nil {
|
||||
return []*asset.File{a.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns agent config asset from the disk.
|
||||
func (a *AgentConfig) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
file, err := f.FetchByName(agentConfigFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", agentConfigFilename))
|
||||
}
|
||||
|
||||
config := &agent.Config{}
|
||||
if err := yaml.UnmarshalStrict(file.Data, config); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to unmarshal %s", agentConfigFilename)
|
||||
}
|
||||
|
||||
// Upconvert any deprecated fields
|
||||
if err := conversion.ConvertAgentConfig(config); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
a.File, a.Config = file, config
|
||||
if err = a.finish(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (a *AgentConfig) finish() error {
|
||||
if err := a.validateAgent().ToAggregate(); err != nil {
|
||||
return errors.Wrapf(err, "invalid Agent Config configuration")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AgentConfig) validateAgent() field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if err := a.validateNodesHaveAtLeastOneMacAddressDefined(); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
if err := a.validateRootDeviceHints(); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
if err := a.validateRoles(); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *AgentConfig) validateNodesHaveAtLeastOneMacAddressDefined() field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
if len(a.Config.Hosts) == 0 {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
rootPath := field.NewPath("Hosts")
|
||||
|
||||
for i := range a.Config.Hosts {
|
||||
node := a.Config.Hosts[i]
|
||||
interfacePath := rootPath.Index(i).Child("Interfaces")
|
||||
if len(node.Interfaces) == 0 {
|
||||
allErrs = append(allErrs, field.Required(interfacePath, "at least one interface must be defined for each node"))
|
||||
}
|
||||
|
||||
for j := range node.Interfaces {
|
||||
if node.Interfaces[j].MacAddress == "" {
|
||||
macAddressPath := interfacePath.Index(j).Child("macAddress")
|
||||
allErrs = append(allErrs, field.Required(macAddressPath, "each interface must have a MAC address defined"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *AgentConfig) validateRootDeviceHints() field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
rootPath := field.NewPath("Hosts")
|
||||
|
||||
for i, host := range a.Config.Hosts {
|
||||
hostPath := rootPath.Index(i)
|
||||
if host.RootDeviceHints.WWNWithExtension != "" {
|
||||
allErrs = append(allErrs, field.Forbidden(
|
||||
hostPath.Child("RootDeviceHints", "WWNWithExtension"),
|
||||
"WWN extensions are not supported in root device hints"))
|
||||
}
|
||||
if host.RootDeviceHints.WWNVendorExtension != "" {
|
||||
allErrs = append(allErrs, field.Forbidden(
|
||||
hostPath.Child("RootDeviceHints", "WWNVendorExtension"),
|
||||
"WWN vendor extensions are not supported in root device hints"))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *AgentConfig) validateRoles() field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
rootPath := field.NewPath("Hosts")
|
||||
|
||||
for i, host := range a.Config.Hosts {
|
||||
hostPath := rootPath.Index(i)
|
||||
if len(host.Role) > 0 && host.Role != "master" && host.Role != "worker" {
|
||||
allErrs = append(allErrs, field.Forbidden(
|
||||
hostPath.Child("Host"),
|
||||
"host role has incorrect value. Role must either be 'master' or 'worker'"))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// HostConfigFileMap is a map from a filepath ("<host>/<file>") to file content
|
||||
// for hostconfig files.
|
||||
type HostConfigFileMap map[string][]byte
|
||||
|
||||
// HostConfigFiles returns a map from filename to contents of the files used for
|
||||
// host-specific configuration by the agent installer client
|
||||
func (a *AgentConfig) HostConfigFiles() (HostConfigFileMap, error) {
|
||||
if a == nil || a.Config == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
files := HostConfigFileMap{}
|
||||
for i, host := range a.Config.Hosts {
|
||||
name := fmt.Sprintf("host-%d", i)
|
||||
if host.Hostname != "" {
|
||||
name = host.Hostname
|
||||
}
|
||||
|
||||
macs := []string{}
|
||||
for _, iface := range host.Interfaces {
|
||||
macs = append(macs, strings.ToLower(iface.MacAddress)+"\n")
|
||||
}
|
||||
|
||||
if len(macs) > 0 {
|
||||
files[filepath.Join(name, "mac_addresses")] = []byte(strings.Join(macs, ""))
|
||||
}
|
||||
|
||||
rdh, err := yaml.Marshal(host.RootDeviceHints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rdh) > 0 && string(rdh) != "{}\n" {
|
||||
files[filepath.Join(name, "root-device-hints.yaml")] = rdh
|
||||
}
|
||||
|
||||
if len(host.Role) > 0 {
|
||||
files[filepath.Join(name, "role")] = []byte(host.Role)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func unmarshalJSON(b []byte) []byte {
|
||||
output, _ := yaml.JSONToYAML(b)
|
||||
return output
|
||||
}
|
||||
442
pkg/asset/agent/agentconfig/agent_config_test.go
Normal file
442
pkg/asset/agent/agentconfig/agent_config_test.go
Normal file
@@ -0,0 +1,442 @@
|
||||
package agentconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/openshift/installer/pkg/types/agent"
|
||||
"github.com/openshift/installer/pkg/types/baremetal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// func TestAgentConfig_Generate(t *testing.T) {
|
||||
|
||||
// cases := []struct {
|
||||
// name string
|
||||
// expectedError string
|
||||
// expectedConfig *agent.Config
|
||||
// }{
|
||||
// {
|
||||
// name: "generate-basic-template",
|
||||
// expectedConfig: &agent.Config{
|
||||
// TypeMeta: metav1.TypeMeta{
|
||||
// Kind: "AgentConfig",
|
||||
// APIVersion: agent.AgentConfigVersion,
|
||||
// },
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Name: "example-agent-config",
|
||||
// Namespace: "cluster0",
|
||||
// },
|
||||
// Spec: agent.Spec{
|
||||
// RendezvousIP: "your-node0-ip",
|
||||
// Hosts: []agent.Host{
|
||||
// {
|
||||
// Hostname: "change-to-hostname",
|
||||
// Role: "master",
|
||||
// RootDeviceHints: baremetal.RootDeviceHints{
|
||||
// DeviceName: "/dev/sda",
|
||||
// },
|
||||
// Interfaces: []*aiv1beta1.Interface{
|
||||
// {
|
||||
// Name: "your-network-interface-name",
|
||||
// MacAddress: "00:00:00:00:00",
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
// for _, tc := range cases {
|
||||
// t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
// parents := asset.Parents{}
|
||||
// asset := &AgentConfig{}
|
||||
// err := asset.Generate(parents)
|
||||
|
||||
// if tc.expectedError != "" {
|
||||
// assert.Equal(t, tc.expectedError, err.Error())
|
||||
// } else {
|
||||
// assert.NoError(t, err)
|
||||
// assert.Equal(t, tc.expectedConfig, asset.Config)
|
||||
// assert.NotEmpty(t, asset.Files())
|
||||
|
||||
// configFile := asset.Files()[0]
|
||||
// assert.Equal(t, "agent-config.yaml", configFile.Filename)
|
||||
|
||||
// var actualConfig agent.Config
|
||||
// err = yaml.Unmarshal(configFile.Data, &actualConfig)
|
||||
// assert.NoError(t, err)
|
||||
// assert.Equal(t, *tc.expectedConfig, actualConfig)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
|
||||
// }
|
||||
|
||||
func TestAgentConfig_LoadedFromDisk(t *testing.T) {
|
||||
falseBool := false
|
||||
falsePtr := &falseBool
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError string
|
||||
expectedConfig *agent.Config
|
||||
}{
|
||||
{
|
||||
name: "valid-config-single-node",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-cluster0
|
||||
rendezvousIP: 192.168.111.80
|
||||
hosts:
|
||||
- hostname: control-0.example.org
|
||||
role: master
|
||||
rootDeviceHints:
|
||||
deviceName: "/dev/sda"
|
||||
hctl: "hctl-value"
|
||||
model: "model-value"
|
||||
vendor: "vendor-value"
|
||||
serialNumber: "serial-number-value"
|
||||
minSizeGigabytes: 20
|
||||
wwn: "wwn-value"
|
||||
rotational: false
|
||||
interfaces:
|
||||
- name: enp2s0
|
||||
macAddress: 98:af:65:a5:8d:01
|
||||
- name: enp3s1
|
||||
macAddress: 28:d2:44:d2:b2:1a
|
||||
networkConfig:
|
||||
interfaces:`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &agent.Config{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: agent.AgentConfigVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "agent-config-cluster0",
|
||||
},
|
||||
RendezvousIP: "192.168.111.80",
|
||||
Hosts: []agent.Host{
|
||||
{
|
||||
Hostname: "control-0.example.org",
|
||||
Role: "master",
|
||||
RootDeviceHints: baremetal.RootDeviceHints{
|
||||
DeviceName: "/dev/sda",
|
||||
HCTL: "hctl-value",
|
||||
Model: "model-value",
|
||||
Vendor: "vendor-value",
|
||||
SerialNumber: "serial-number-value",
|
||||
MinSizeGigabytes: 20,
|
||||
WWN: "wwn-value",
|
||||
Rotational: falsePtr,
|
||||
},
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp2s0",
|
||||
MacAddress: "98:af:65:a5:8d:01",
|
||||
},
|
||||
{
|
||||
Name: "enp3s1",
|
||||
MacAddress: "28:d2:44:d2:b2:1a",
|
||||
},
|
||||
},
|
||||
NetworkConfig: aiv1beta1.NetConfig{
|
||||
Raw: unmarshalJSON([]byte("interfaces:")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid-config-multiple-nodes",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-cluster0
|
||||
rendezvousIP: 192.168.111.80
|
||||
hosts:
|
||||
- hostname: control-0.example.org
|
||||
role: master
|
||||
rootDeviceHints:
|
||||
deviceName: "/dev/sda"
|
||||
hctl: "hctl-value"
|
||||
model: "model-value"
|
||||
vendor: "vendor-value"
|
||||
serialNumber: "serial-number-value"
|
||||
minSizeGigabytes: 20
|
||||
wwn: "wwn-value"
|
||||
rotational: false
|
||||
interfaces:
|
||||
- name: enp2s0
|
||||
macAddress: 98:af:65:a5:8d:01
|
||||
- name: enp3s1
|
||||
macAddress: 28:d2:44:d2:b2:1a
|
||||
networkConfig:
|
||||
interfaces:
|
||||
- hostname: control-1.example.org
|
||||
role: master
|
||||
interfaces:
|
||||
- name: enp2s0
|
||||
macAddress: 98:af:65:a5:8d:02
|
||||
- name: enp3s1
|
||||
macAddress: 28:d2:44:d2:b2:1b`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &agent.Config{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: agent.AgentConfigVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "agent-config-cluster0",
|
||||
},
|
||||
RendezvousIP: "192.168.111.80",
|
||||
Hosts: []agent.Host{
|
||||
{
|
||||
Hostname: "control-0.example.org",
|
||||
Role: "master",
|
||||
RootDeviceHints: baremetal.RootDeviceHints{
|
||||
DeviceName: "/dev/sda",
|
||||
HCTL: "hctl-value",
|
||||
Model: "model-value",
|
||||
Vendor: "vendor-value",
|
||||
SerialNumber: "serial-number-value",
|
||||
MinSizeGigabytes: 20,
|
||||
WWN: "wwn-value",
|
||||
Rotational: falsePtr,
|
||||
},
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp2s0",
|
||||
MacAddress: "98:af:65:a5:8d:01",
|
||||
},
|
||||
{
|
||||
Name: "enp3s1",
|
||||
MacAddress: "28:d2:44:d2:b2:1a",
|
||||
},
|
||||
},
|
||||
NetworkConfig: aiv1beta1.NetConfig{
|
||||
Raw: unmarshalJSON([]byte("interfaces:")),
|
||||
},
|
||||
},
|
||||
{
|
||||
Hostname: "control-1.example.org",
|
||||
Role: "master",
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp2s0",
|
||||
MacAddress: "98:af:65:a5:8d:02",
|
||||
},
|
||||
{
|
||||
Name: "enp3s1",
|
||||
MacAddress: "28:d2:44:d2:b2:1b",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not-yaml",
|
||||
data: `This is not a yaml file`,
|
||||
expectedError: "failed to unmarshal agent-config.yaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type agent.Config",
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: "failed to load agent-config.yaml file: fetch failed",
|
||||
},
|
||||
{
|
||||
name: "unknown-field",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-wrong
|
||||
wrongField: wrongValue`,
|
||||
expectedError: "failed to unmarshal agent-config.yaml: error unmarshaling JSON: while decoding JSON: json: unknown field \"wrongField\"",
|
||||
},
|
||||
{
|
||||
name: "interface-missing-mac-address-error",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-cluster0
|
||||
rendezvousIP: 192.168.111.80
|
||||
hosts:
|
||||
- hostname: control-0.example.org
|
||||
interfaces:
|
||||
- name: enp2s0
|
||||
- name: enp3s1
|
||||
macAddress: 28:d2:44:d2:b2:1a`,
|
||||
expectedError: "invalid Agent Config configuration: Hosts[0].Interfaces[0].macAddress: Required value: each interface must have a MAC address defined",
|
||||
},
|
||||
{
|
||||
name: "unsupported wwn extension root device hint",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-cluster0
|
||||
rendezvousIP: 192.168.111.80
|
||||
hosts:
|
||||
- hostname: control-0.example.org
|
||||
interfaces:
|
||||
- name: enp2s0
|
||||
macAddress: 98:af:65:a5:8d:01
|
||||
rootDeviceHints:
|
||||
wwnWithExtension: "wwn-with-extension-value"`,
|
||||
expectedError: "invalid Agent Config configuration: Hosts[0].RootDeviceHints.WWNWithExtension: Forbidden: WWN extensions are not supported in root device hints",
|
||||
},
|
||||
{
|
||||
name: "unsupported wwn vendor extension root device hint",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-cluster0
|
||||
rendezvousIP: 192.168.111.80
|
||||
hosts:
|
||||
- hostname: control-0.example.org
|
||||
interfaces:
|
||||
- name: enp2s0
|
||||
macAddress: 98:af:65:a5:8d:01
|
||||
rootDeviceHints:
|
||||
wwnVendorExtension: "wwn-with-vendor-extension-value"`,
|
||||
expectedError: "invalid Agent Config configuration: Hosts[0].RootDeviceHints.WWNVendorExtension: Forbidden: WWN vendor extensions are not supported in root device hints",
|
||||
},
|
||||
{
|
||||
name: "node-hostname-and-role-are-not-required",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-cluster0
|
||||
rendezvousIP: 192.168.111.80
|
||||
hosts:
|
||||
- interfaces:
|
||||
- name: enp3s1
|
||||
macAddress: 28:d2:44:d2:b2:1a`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &agent.Config{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: agent.AgentConfigVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "agent-config-cluster0",
|
||||
},
|
||||
RendezvousIP: "192.168.111.80",
|
||||
Hosts: []agent.Host{
|
||||
{
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp3s1",
|
||||
MacAddress: "28:d2:44:d2:b2:1a",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "host-roles-have-correct-values",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-cluster0
|
||||
rendezvousIP: 192.168.111.80
|
||||
hosts:
|
||||
- role: master
|
||||
interfaces:
|
||||
- name: enp3s1
|
||||
macAddress: 28:d2:44:d2:b2:1a
|
||||
- role: worker
|
||||
interfaces:
|
||||
- name: enp3s1
|
||||
macAddress: 28:d2:44:d2:b2:1b`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &agent.Config{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: agent.AgentConfigVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "agent-config-cluster0",
|
||||
},
|
||||
RendezvousIP: "192.168.111.80",
|
||||
Hosts: []agent.Host{
|
||||
{
|
||||
Role: "master",
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp3s1",
|
||||
MacAddress: "28:d2:44:d2:b2:1a",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Role: "worker",
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp3s1",
|
||||
MacAddress: "28:d2:44:d2:b2:1b",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "host-roles-have-incorrect-values",
|
||||
data: `
|
||||
apiVersion: v1alpha1
|
||||
metadata:
|
||||
name: agent-config-cluster0
|
||||
rendezvousIP: 192.168.111.80
|
||||
hosts:
|
||||
- role: invalid-role
|
||||
interfaces:
|
||||
- name: enp3s1
|
||||
macAddress: 28:d2:44:d2:b2:1a`,
|
||||
expectedError: "invalid Agent Config configuration: Hosts[0].Host: Forbidden: host role has incorrect value. Role must either be 'master' or 'worker'",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(agentConfigFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: agentConfigFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &AgentConfig{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedFound {
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config, "unexpected Config in AgentConfig")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
96
pkg/asset/agent/image/agentimage.go
Normal file
96
pkg/asset/agent/image/agentimage.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/openshift/assisted-image-service/pkg/isoeditor"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: Make this relative to the directory passed as --dir rather than
|
||||
// the current working directory
|
||||
agentISOFilename = "agent.iso"
|
||||
)
|
||||
|
||||
// AgentImage is an asset that generates the bootable image used to install clusters.
|
||||
type AgentImage struct {
|
||||
imageReader isoeditor.ImageReader
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*AgentImage)(nil)
|
||||
|
||||
// Dependencies returns the assets on which the Bootstrap asset depends.
|
||||
func (a *AgentImage) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&Ignition{},
|
||||
&BaseIso{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the image file for to ISO asset.
|
||||
func (a *AgentImage) Generate(dependencies asset.Parents) error {
|
||||
ignition := &Ignition{}
|
||||
dependencies.Get(ignition)
|
||||
|
||||
baseImage := &BaseIso{}
|
||||
dependencies.Get(baseImage)
|
||||
|
||||
ignitionByte, err := json.Marshal(ignition.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ignitionContent := &isoeditor.IgnitionContent{Config: ignitionByte}
|
||||
custom, err := isoeditor.NewRHCOSStreamReader(baseImage.File.Filename, ignitionContent, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.imageReader = custom
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistToFile writes the iso image in the assets folder
|
||||
func (a *AgentImage) PersistToFile(directory string) error {
|
||||
if a.imageReader == nil {
|
||||
return errors.New("image reader not available")
|
||||
}
|
||||
|
||||
defer a.imageReader.Close()
|
||||
agentIsoFile := filepath.Join(directory, agentISOFilename)
|
||||
|
||||
// Remove symlink if it exists
|
||||
os.Remove(agentIsoFile)
|
||||
|
||||
output, err := os.Create(agentIsoFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close()
|
||||
|
||||
_, err = io.Copy(output, a.imageReader)
|
||||
return err
|
||||
}
|
||||
|
||||
// Name returns the human-friendly name of the asset.
|
||||
func (a *AgentImage) Name() string {
|
||||
return "Agent Installer ISO"
|
||||
}
|
||||
|
||||
// Load returns the ISO from disk.
|
||||
func (a *AgentImage) Load(f asset.FileFetcher) (bool, error) {
|
||||
// The ISO will not be needed by another asset so load is noop.
|
||||
// This is implemented because it is required by WritableAsset
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (a *AgentImage) Files() []*asset.File {
|
||||
// Return empty array because File will never be loaded.
|
||||
return []*asset.File{}
|
||||
}
|
||||
181
pkg/asset/agent/image/baseiso.go
Normal file
181
pkg/asset/agent/image/baseiso.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/openshift/assisted-service/pkg/executer"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/agent/manifests"
|
||||
"github.com/openshift/installer/pkg/asset/agent/mirror"
|
||||
"github.com/openshift/installer/pkg/rhcos"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// BaseIso generates the base ISO file for the image
|
||||
type BaseIso struct {
|
||||
File *asset.File
|
||||
}
|
||||
|
||||
const (
|
||||
// TODO - add support for other architectures
|
||||
archName = "x86_64"
|
||||
)
|
||||
|
||||
var (
|
||||
baseIsoFilename = ""
|
||||
)
|
||||
|
||||
var _ asset.WritableAsset = (*BaseIso)(nil)
|
||||
|
||||
// Name returns the human-friendly name of the asset.
|
||||
func (i *BaseIso) Name() string {
|
||||
return "BaseIso Image"
|
||||
}
|
||||
|
||||
// getIsoFile is a pluggable function that gets the base ISO file
|
||||
type getIsoFile func() (string, error)
|
||||
|
||||
type getIso struct {
|
||||
getter getIsoFile
|
||||
}
|
||||
|
||||
func newGetIso(getter getIsoFile) *getIso {
|
||||
return &getIso{getter: getter}
|
||||
}
|
||||
|
||||
// GetIsoPluggable defines the method to use get the baseIso file
|
||||
var GetIsoPluggable = downloadIso
|
||||
|
||||
// Download the ISO using the URL in rhcos.json
|
||||
func downloadIso() (string, error) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Get the ISO to use from rhcos.json
|
||||
st, err := rhcos.FetchCoreOSBuild(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Defaults to using the x86_64 baremetal ISO for all platforms
|
||||
// archName := arch.RpmArch(string(config.ControlPlane.Architecture))
|
||||
streamArch, err := st.GetArchitecture(archName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if artifacts, ok := streamArch.Artifacts["metal"]; ok {
|
||||
if format, ok := artifacts.Formats["iso"]; ok {
|
||||
url := format.Disk.Location
|
||||
|
||||
cachedImage, err := DownloadImageFile(url)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to download base ISO image %s", url)
|
||||
}
|
||||
return cachedImage, nil
|
||||
}
|
||||
} else {
|
||||
return "", errors.Wrap(err, "invalid artifact")
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no ISO found to download for %s", archName)
|
||||
}
|
||||
|
||||
func getIsoFromReleasePayload() (string, error) {
|
||||
|
||||
// TODO
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Dependencies returns dependencies used by the asset.
|
||||
func (i *BaseIso) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&manifests.AgentManifests{},
|
||||
&agent.OptionalInstallConfig{},
|
||||
&mirror.RegistriesConf{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the baseIso
|
||||
func (i *BaseIso) Generate(dependencies asset.Parents) error {
|
||||
|
||||
log := logrus.New()
|
||||
// TODO - if image registry location is defined in InstallConfig,
|
||||
// ic := &agent.OptionalInstallConfig{}
|
||||
// p.Get(ic)
|
||||
|
||||
// use the GetIso function to get the BaseIso from the release payload
|
||||
agentManifests := &manifests.AgentManifests{}
|
||||
dependencies.Get(agentManifests)
|
||||
|
||||
var baseIsoFileName string
|
||||
var err error
|
||||
if agentManifests.ClusterImageSet != nil {
|
||||
releaseImage := agentManifests.ClusterImageSet.Spec.ReleaseImage
|
||||
pullSecret := agentManifests.GetPullSecretData()
|
||||
registriesConf := &mirror.RegistriesConf{}
|
||||
dependencies.Get(agentManifests, registriesConf)
|
||||
|
||||
// If we have the image registry location and 'oc' command is available then get from release payload
|
||||
ocRelease := NewRelease(&executer.CommonExecuter{},
|
||||
Config{MaxTries: OcDefaultTries, RetryDelay: OcDefaultRetryDelay})
|
||||
|
||||
log.Info("Extracting base ISO from release payload")
|
||||
baseIsoFileName, err = ocRelease.GetBaseIso(log, releaseImage, pullSecret, registriesConf.MirrorConfig, archName)
|
||||
if err == nil {
|
||||
log.Debugf("Extracted base ISO image %s from release payload", baseIsoFileName)
|
||||
i.File = &asset.File{Filename: baseIsoFileName}
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, &exec.Error{}) { // Already warned about missing oc binary
|
||||
log.Warning("Failed to extract base ISO from release payload - check registry configuration")
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Downloading base ISO")
|
||||
isoGetter := newGetIso(GetIsoPluggable)
|
||||
baseIsoFileName, err2 := isoGetter.getter()
|
||||
if err2 == nil {
|
||||
log.Debugf("Using base ISO image %s", baseIsoFileName)
|
||||
i.File = &asset.File{Filename: baseIsoFileName}
|
||||
return nil
|
||||
}
|
||||
log.Debugf("Failed to download base ISO: %s", err2)
|
||||
|
||||
return errors.Wrap(err, "failed to get base ISO image")
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (i *BaseIso) Files() []*asset.File {
|
||||
|
||||
if i.File != nil {
|
||||
return []*asset.File{i.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns the cached baseIso
|
||||
func (i *BaseIso) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
if baseIsoFilename == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
baseIso, err := f.FetchByName(baseIsoFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", baseIsoFilename))
|
||||
}
|
||||
|
||||
i.File = baseIso
|
||||
return true, nil
|
||||
}
|
||||
30
pkg/asset/agent/image/baseiso_test.go
Normal file
30
pkg/asset/agent/image/baseiso_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/agent/manifests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInfraBaseIso_Generate(t *testing.T) {
|
||||
|
||||
GetIsoPluggable = func() (string, error) {
|
||||
return "some-openshift-release.iso", nil
|
||||
}
|
||||
|
||||
parents := asset.Parents{}
|
||||
manifests := &manifests.AgentManifests{}
|
||||
installConfig := &agent.OptionalInstallConfig{}
|
||||
parents.Add(manifests, installConfig)
|
||||
|
||||
asset := &BaseIso{}
|
||||
err := asset.Generate(parents)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotEmpty(t, asset.Files())
|
||||
baseIso := asset.Files()[0]
|
||||
assert.Equal(t, baseIso.Filename, "some-openshift-release.iso")
|
||||
}
|
||||
270
pkg/asset/agent/image/cache.go
Normal file
270
pkg/asset/agent/image/cache.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/h2non/filetype/matchers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ulikunitz/xz"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Note this code resides in tfvars/internal so it can't be imported and was therefore
|
||||
// copied here. Will move this to a common location where it can be used by all pkgs.
|
||||
|
||||
const (
|
||||
applicationName = "agent"
|
||||
imageDataType = "image"
|
||||
)
|
||||
|
||||
// GetFileFromCache returns path of the cached file if found, otherwise returns an empty string
|
||||
// or error
|
||||
func GetFileFromCache(fileName string, cacheDir string) (string, error) {
|
||||
|
||||
filePath := filepath.Join(cacheDir, fileName)
|
||||
|
||||
// If the file has already been cached, return its path
|
||||
_, err := os.Stat(filePath)
|
||||
if err == nil {
|
||||
logrus.Debugf("The file was found in cache: %v. Reusing...", filePath)
|
||||
return filePath, nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetCacheDir returns a local path of the cache, where the installer should put the data:
|
||||
// <user_cache_dir>/agent/<dataType>_cache
|
||||
// If the directory doesn't exist, it will be automatically created.
|
||||
func GetCacheDir(dataType string) (string, error) {
|
||||
if dataType == "" {
|
||||
return "", errors.Errorf("data type can't be an empty string")
|
||||
}
|
||||
|
||||
userCacheDir, err := os.UserCacheDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cacheDir := filepath.Join(userCacheDir, applicationName, dataType+"_cache")
|
||||
|
||||
_, err = os.Stat(cacheDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(cacheDir, 0755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return cacheDir, nil
|
||||
}
|
||||
|
||||
// cacheFile puts data in the cache
|
||||
func cacheFile(reader io.Reader, filePath string, sha256Checksum string) (err error) {
|
||||
logrus.Debugf("Unpacking file into %q...", filePath)
|
||||
|
||||
flockPath := fmt.Sprintf("%s.lock", filePath)
|
||||
flock, err := os.Create(flockPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer flock.Close()
|
||||
defer func() {
|
||||
err2 := os.Remove(flockPath)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
}()
|
||||
|
||||
err = unix.Flock(int(flock.Fd()), unix.LOCK_EX)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err2 := unix.Flock(int(flock.Fd()), unix.LOCK_UN)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = os.Stat(filePath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil // another cacheFile beat us to it
|
||||
}
|
||||
|
||||
tempPath := fmt.Sprintf("%s.tmp", filePath)
|
||||
|
||||
// Delete the temporary file that may have been left over from previous launches.
|
||||
err = os.Remove(tempPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Errorf("failed to clean up %s: %v", tempPath, err)
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("Temporary file %v that remained after the previous launches was deleted", tempPath)
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(tempPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0444)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
closed := false
|
||||
defer func() {
|
||||
if !closed {
|
||||
file.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Detect whether we know how to decompress the file
|
||||
// See http://golang.org/pkg/net/http/#DetectContentType for why we use 512
|
||||
buf := make([]byte, 512)
|
||||
_, err = reader.Read(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader = io.MultiReader(bytes.NewReader(buf), reader)
|
||||
switch {
|
||||
case matchers.Gz(buf):
|
||||
logrus.Debug("decompressing the image archive as gz")
|
||||
uncompressor, err := gzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer uncompressor.Close()
|
||||
reader = uncompressor
|
||||
case matchers.Xz(buf):
|
||||
logrus.Debug("decompressing the image archive as xz")
|
||||
uncompressor, err := xz.NewReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader = uncompressor
|
||||
default:
|
||||
// No need for an interposer otherwise
|
||||
logrus.Debug("no known archive format detected for image, assuming no decompression necessary")
|
||||
}
|
||||
|
||||
// Wrap the reader in TeeReader to calculate sha256 checksum on the fly
|
||||
hasher := sha256.New()
|
||||
if sha256Checksum != "" {
|
||||
reader = io.TeeReader(reader, hasher)
|
||||
}
|
||||
|
||||
_, err = io.Copy(file, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
closed = true
|
||||
|
||||
// Validate sha256 checksum
|
||||
if sha256Checksum != "" {
|
||||
foundChecksum := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
if sha256Checksum != foundChecksum {
|
||||
logrus.Error("File sha256 checksum is invalid.")
|
||||
return errors.Errorf("Checksum mismatch for %s; expected=%s found=%s", filePath, sha256Checksum, foundChecksum)
|
||||
}
|
||||
|
||||
logrus.Debug("Checksum validation is complete...")
|
||||
}
|
||||
|
||||
return os.Rename(tempPath, filePath)
|
||||
}
|
||||
|
||||
// urlWithIntegrity pairs a URL with an optional expected sha256 checksum (after decompression, if any)
|
||||
// If the query string contains sha256 parameter (i.e. https://example.com/data.bin?sha256=098a5a...),
|
||||
// then the downloaded data checksum will be compared with the provided value.
|
||||
type urlWithIntegrity struct {
|
||||
location url.URL
|
||||
uncompressedSHA256 string
|
||||
}
|
||||
|
||||
func (u *urlWithIntegrity) uncompressedName() string {
|
||||
n := filepath.Base(u.location.Path)
|
||||
return strings.TrimSuffix(strings.TrimSuffix(n, ".gz"), ".xz")
|
||||
}
|
||||
|
||||
// download obtains a file from a given URL, puts it in the cache folder, defined by dataType parameter,
|
||||
// and returns the local file path.
|
||||
func (u *urlWithIntegrity) download(dataType string) (string, error) {
|
||||
fileName := u.uncompressedName()
|
||||
|
||||
cacheDir, err := GetCacheDir(dataType)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
filePath, err := GetFileFromCache(fileName, cacheDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if filePath != "" {
|
||||
// Found cached file
|
||||
return filePath, nil
|
||||
}
|
||||
|
||||
// Send a request to get the file
|
||||
resp, err := http.Get(u.location.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check server response
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", errors.Errorf("bad status: %s", resp.Status)
|
||||
}
|
||||
|
||||
filePath = filepath.Join(cacheDir, fileName)
|
||||
err = cacheFile(resp.Body, filePath, u.uncompressedSHA256)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filePath, nil
|
||||
}
|
||||
|
||||
// DownloadImageFile is a helper function that obtains an image file from a given URL,
|
||||
// puts it in the cache and returns the local file path. If the file is compressed
|
||||
// by a known compressor, the file is uncompressed prior to being returned.
|
||||
func DownloadImageFile(baseURL string) (string, error) {
|
||||
logrus.Debugf("Obtaining RHCOS image file from '%v'", baseURL)
|
||||
|
||||
var u urlWithIntegrity
|
||||
parsedURL, err := url.ParseRequestURI(baseURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
q := parsedURL.Query()
|
||||
if uncompressedSHA256, ok := q["sha256"]; ok {
|
||||
u.uncompressedSHA256 = uncompressedSHA256[0]
|
||||
q.Del("sha256")
|
||||
parsedURL.RawQuery = q.Encode()
|
||||
}
|
||||
u.location = *parsedURL
|
||||
|
||||
return u.download(imageDataType)
|
||||
}
|
||||
401
pkg/asset/agent/image/ignition.go
Normal file
401
pkg/asset/agent/image/ignition.go
Normal file
@@ -0,0 +1,401 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/ignition/v2/config/util"
|
||||
igntypes "github.com/coreos/ignition/v2/config/v3_2/types"
|
||||
"github.com/google/uuid"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1"
|
||||
"github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent/agentconfig"
|
||||
"github.com/openshift/installer/pkg/asset/agent/manifests"
|
||||
"github.com/openshift/installer/pkg/asset/agent/mirror"
|
||||
"github.com/openshift/installer/pkg/asset/ignition"
|
||||
"github.com/openshift/installer/pkg/asset/ignition/bootstrap"
|
||||
"github.com/openshift/installer/pkg/asset/tls"
|
||||
"github.com/openshift/installer/pkg/types/agent"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const manifestPath = "/etc/assisted/manifests"
|
||||
const hostnamesPath = "/etc/assisted/hostnames"
|
||||
const nmConnectionsPath = "/etc/assisted/network"
|
||||
const extraManifestPath = "/etc/assisted/extra-manifests"
|
||||
|
||||
// Ignition is an asset that generates the agent installer ignition file.
|
||||
type Ignition struct {
|
||||
Config *igntypes.Config
|
||||
}
|
||||
|
||||
// agentTemplateData is the data used to replace values in agent template
|
||||
// files.
|
||||
type agentTemplateData struct {
|
||||
ServiceProtocol string
|
||||
ServiceBaseURL string
|
||||
PullSecret string
|
||||
// PullSecretToken is token to use for authentication when AUTH_TYPE=rhsso
|
||||
// in assisted-service
|
||||
PullSecretToken string
|
||||
NodeZeroIP string
|
||||
AssistedServiceHost string
|
||||
APIVIP string
|
||||
ControlPlaneAgents int
|
||||
WorkerAgents int
|
||||
ReleaseImages string
|
||||
ReleaseImage string
|
||||
ReleaseImageMirror string
|
||||
HaveMirrorConfig bool
|
||||
InfraEnvID string
|
||||
}
|
||||
|
||||
var (
|
||||
agentEnabledServices = []string{
|
||||
"agent.service",
|
||||
"assisted-service-db.service",
|
||||
"assisted-service-pod.service",
|
||||
"assisted-service.service",
|
||||
"create-cluster-and-infraenv.service",
|
||||
"node-zero.service",
|
||||
"multipathd.service",
|
||||
"pre-network-manager-config.service",
|
||||
"selinux.service",
|
||||
"set-hostname.service",
|
||||
"start-cluster-installation.service",
|
||||
}
|
||||
)
|
||||
|
||||
// Name returns the human-friendly name of the asset.
|
||||
func (a *Ignition) Name() string {
|
||||
return "Agent Installer Ignition"
|
||||
}
|
||||
|
||||
// Dependencies returns the assets on which the Ignition asset depends.
|
||||
func (a *Ignition) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&manifests.AgentManifests{},
|
||||
&manifests.ExtraManifests{},
|
||||
&tls.KubeAPIServerLBSignerCertKey{},
|
||||
&tls.KubeAPIServerLocalhostSignerCertKey{},
|
||||
&tls.KubeAPIServerServiceNetworkSignerCertKey{},
|
||||
&tls.AdminKubeConfigSignerCertKey{},
|
||||
&tls.AdminKubeConfigClientCertKey{},
|
||||
&agentconfig.AgentConfig{},
|
||||
&mirror.RegistriesConf{},
|
||||
&mirror.CaBundle{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the agent installer ignition.
|
||||
func (a *Ignition) Generate(dependencies asset.Parents) error {
|
||||
agentManifests := &manifests.AgentManifests{}
|
||||
agentConfigAsset := &agentconfig.AgentConfig{}
|
||||
extraManifests := &manifests.ExtraManifests{}
|
||||
dependencies.Get(agentManifests, agentConfigAsset, extraManifests)
|
||||
|
||||
infraEnv := agentManifests.InfraEnv
|
||||
|
||||
config := igntypes.Config{
|
||||
Ignition: igntypes.Ignition{
|
||||
Version: igntypes.MaxVersion.String(),
|
||||
},
|
||||
Passwd: igntypes.Passwd{
|
||||
Users: []igntypes.PasswdUser{
|
||||
{
|
||||
Name: "core",
|
||||
SSHAuthorizedKeys: []igntypes.SSHAuthorizedKey{
|
||||
igntypes.SSHAuthorizedKey(infraEnv.Spec.SSHAuthorizedKey),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if len(agentManifests.NMStateConfigs) == 0 {
|
||||
return errors.New("at least one NMState configuration must be provided")
|
||||
}
|
||||
|
||||
nodeZeroIP, err := RetrieveRendezvousIP(agentConfigAsset.Config, agentManifests.NMStateConfigs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: don't hard-code target arch
|
||||
releaseImageList, err := releaseImageList(agentManifests.ClusterImageSet.Spec.ReleaseImage, "x86_64")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
registriesConfig := &mirror.RegistriesConf{}
|
||||
registryCABundle := &mirror.CaBundle{}
|
||||
dependencies.Get(registriesConfig, registryCABundle)
|
||||
|
||||
releaseImageMirror := getMirrorFromRelease(agentManifests.ClusterImageSet.Spec.ReleaseImage, registriesConfig)
|
||||
|
||||
infraEnvID := uuid.New().String()
|
||||
logrus.Debug("Generated random infra-env id ", infraEnvID)
|
||||
|
||||
agentTemplateData := getTemplateData(
|
||||
agentManifests.GetPullSecretData(),
|
||||
nodeZeroIP,
|
||||
releaseImageList,
|
||||
agentManifests.ClusterImageSet.Spec.ReleaseImage,
|
||||
releaseImageMirror,
|
||||
len(registriesConfig.MirrorConfig) > 0,
|
||||
agentManifests.AgentClusterInstall,
|
||||
infraEnvID)
|
||||
|
||||
err = bootstrap.AddStorageFiles(&config, "/", "agent/files", agentTemplateData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set up bootstrap service recording
|
||||
if err := bootstrap.AddStorageFiles(&config,
|
||||
"/usr/local/bin/bootstrap-service-record.sh",
|
||||
"bootstrap/files/usr/local/bin/bootstrap-service-record.sh",
|
||||
nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use bootstrap script to get container images
|
||||
relImgData := struct{ ReleaseImage string }{
|
||||
ReleaseImage: agentManifests.ClusterImageSet.Spec.ReleaseImage,
|
||||
}
|
||||
for _, script := range []string{"release-image.sh", "release-image-download.sh"} {
|
||||
if err := bootstrap.AddStorageFiles(&config,
|
||||
"/usr/local/bin/"+script,
|
||||
"bootstrap/files/usr/local/bin/"+script+".template",
|
||||
relImgData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// add ZTP manifests to manifestPath
|
||||
for _, file := range agentManifests.FileList {
|
||||
manifestFile := ignition.FileFromBytes(filepath.Join(manifestPath, filepath.Base(file.Filename)),
|
||||
"root", 0600, file.Data)
|
||||
config.Storage.Files = append(config.Storage.Files, manifestFile)
|
||||
}
|
||||
|
||||
// add AgentConfig if provided
|
||||
if agentConfigAsset.Config != nil {
|
||||
agentConfigFile := ignition.FileFromBytes(filepath.Join(manifestPath, filepath.Base(agentConfigAsset.File.Filename)),
|
||||
"root", 0600, agentConfigAsset.File.Data)
|
||||
config.Storage.Files = append(config.Storage.Files, agentConfigFile)
|
||||
}
|
||||
|
||||
addMacAddressToHostnameMappings(&config, agentConfigAsset)
|
||||
|
||||
err = addStaticNetworkConfig(&config, agentManifests.StaticNetworkConfigs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bootstrap.AddSystemdUnits(&config, "agent/systemd/units", agentTemplateData, agentEnabledServices)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addTLSData(&config, dependencies)
|
||||
|
||||
addMirrorData(&config, registriesConfig, registryCABundle)
|
||||
|
||||
addHostConfig(&config, agentConfigAsset)
|
||||
|
||||
addExtraManifests(&config, extraManifests)
|
||||
|
||||
a.Config = &config
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTemplateData(pullSecret, nodeZeroIP, releaseImageList, releaseImage,
|
||||
releaseImageMirror string, haveMirrorConfig bool,
|
||||
agentClusterInstall *hiveext.AgentClusterInstall,
|
||||
infraEnvID string) *agentTemplateData {
|
||||
serviceBaseURL := url.URL{
|
||||
Scheme: "http",
|
||||
Host: net.JoinHostPort(nodeZeroIP, "8090"),
|
||||
Path: "/",
|
||||
}
|
||||
|
||||
return &agentTemplateData{
|
||||
ServiceProtocol: serviceBaseURL.Scheme,
|
||||
ServiceBaseURL: serviceBaseURL.String(),
|
||||
PullSecret: pullSecret,
|
||||
PullSecretToken: "",
|
||||
NodeZeroIP: serviceBaseURL.Hostname(),
|
||||
AssistedServiceHost: serviceBaseURL.Host,
|
||||
APIVIP: agentClusterInstall.Spec.APIVIP,
|
||||
ControlPlaneAgents: agentClusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents,
|
||||
WorkerAgents: agentClusterInstall.Spec.ProvisionRequirements.WorkerAgents,
|
||||
ReleaseImages: releaseImageList,
|
||||
ReleaseImage: releaseImage,
|
||||
ReleaseImageMirror: releaseImageMirror,
|
||||
HaveMirrorConfig: haveMirrorConfig,
|
||||
InfraEnvID: infraEnvID,
|
||||
}
|
||||
}
|
||||
|
||||
func addStaticNetworkConfig(config *igntypes.Config, staticNetworkConfig []*models.HostStaticNetworkConfig) (err error) {
|
||||
if len(staticNetworkConfig) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the static network configuration from nmstate and generate NetworkManager ignition files
|
||||
filesList, err := manifests.GetNMIgnitionFiles(staticNetworkConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range filesList {
|
||||
nmFilePath := path.Join(nmConnectionsPath, filesList[i].FilePath)
|
||||
nmStateIgnFile := ignition.FileFromBytes(nmFilePath, "root", 0600, []byte(filesList[i].FileContents))
|
||||
config.Storage.Files = append(config.Storage.Files, nmStateIgnFile)
|
||||
}
|
||||
|
||||
nmStateScriptFilePath := "/usr/local/bin/pre-network-manager-config.sh"
|
||||
// A local version of the assisted-service internal script is currently used
|
||||
nmStateScript := ignition.FileFromBytes(nmStateScriptFilePath, "root", 0755, []byte(manifests.PreNetworkConfigScript))
|
||||
config.Storage.Files = append(config.Storage.Files, nmStateScript)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addTLSData(config *igntypes.Config, dependencies asset.Parents) {
|
||||
certKeys := []asset.Asset{
|
||||
&tls.KubeAPIServerLBSignerCertKey{},
|
||||
&tls.KubeAPIServerLocalhostSignerCertKey{},
|
||||
&tls.KubeAPIServerServiceNetworkSignerCertKey{},
|
||||
&tls.AdminKubeConfigSignerCertKey{},
|
||||
&tls.AdminKubeConfigClientCertKey{},
|
||||
}
|
||||
dependencies.Get(certKeys...)
|
||||
|
||||
for _, ck := range certKeys {
|
||||
for _, d := range ck.(asset.WritableAsset).Files() {
|
||||
f := ignition.FileFromBytes(path.Join("/opt/agent", d.Filename), "root", 0600, d.Data)
|
||||
config.Storage.Files = append(config.Storage.Files, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addMirrorData(config *igntypes.Config, registriesConfig *mirror.RegistriesConf, registryCABundle *mirror.CaBundle) {
|
||||
|
||||
// This is required for assisted-service to build the ICSP for openshift-install
|
||||
if registriesConfig.File != nil {
|
||||
registriesFile := ignition.FileFromBytes("/etc/containers/registries.conf",
|
||||
"root", 0600, registriesConfig.File.Data)
|
||||
config.Storage.Files = append(config.Storage.Files, registriesFile)
|
||||
}
|
||||
|
||||
// This is required for the agent to run the podman commands to the mirror
|
||||
if registryCABundle.File != nil && len(registryCABundle.File.Data) > 0 {
|
||||
caFile := ignition.FileFromBytes("/etc/pki/ca-trust/source/anchors/domain.crt",
|
||||
"root", 0600, registryCABundle.File.Data)
|
||||
config.Storage.Files = append(config.Storage.Files, caFile)
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a file named with a host's MAC address. The desired hostname
|
||||
// is the file's content. The files are read by a systemd service that
|
||||
// sets the hostname using "hostnamectl set-hostname" when the ISO boots.
|
||||
func addMacAddressToHostnameMappings(
|
||||
config *igntypes.Config,
|
||||
agentConfigAsset *agentconfig.AgentConfig) {
|
||||
if agentConfigAsset.Config == nil || len(agentConfigAsset.Config.Hosts) == 0 {
|
||||
return
|
||||
}
|
||||
for _, host := range agentConfigAsset.Config.Hosts {
|
||||
if host.Hostname != "" {
|
||||
file := ignition.FileFromBytes(filepath.Join(hostnamesPath,
|
||||
strings.ToLower(filepath.Base(host.Interfaces[0].MacAddress))),
|
||||
"root", 0600, []byte(host.Hostname))
|
||||
config.Storage.Files = append(config.Storage.Files, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addHostConfig(config *igntypes.Config, agentConfig *agentconfig.AgentConfig) error {
|
||||
confs, err := agentConfig.HostConfigFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for path, content := range confs {
|
||||
hostConfigFile := ignition.FileFromBytes(filepath.Join("/etc/assisted/hostconfig", path), "root", 0644, content)
|
||||
config.Storage.Files = append(config.Storage.Files, hostConfigFile)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addExtraManifests(config *igntypes.Config, extraManifests *manifests.ExtraManifests) {
|
||||
|
||||
user := "root"
|
||||
mode := 0644
|
||||
|
||||
config.Storage.Directories = append(config.Storage.Directories, igntypes.Directory{
|
||||
Node: igntypes.Node{
|
||||
Path: extraManifestPath,
|
||||
User: igntypes.NodeUser{
|
||||
Name: &user,
|
||||
},
|
||||
Overwrite: util.BoolToPtr(true),
|
||||
},
|
||||
DirectoryEmbedded1: igntypes.DirectoryEmbedded1{
|
||||
Mode: &mode,
|
||||
},
|
||||
})
|
||||
|
||||
for _, file := range extraManifests.FileList {
|
||||
extraFile := ignition.FileFromBytes(filepath.Join(extraManifestPath, filepath.Base(file.Filename)), user, mode, file.Data)
|
||||
config.Storage.Files = append(config.Storage.Files, extraFile)
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveRendezvousIP Returns the Rendezvous IP from either AgentConfig or NMStateConfig
|
||||
func RetrieveRendezvousIP(agentConfig *agent.Config, nmStateConfigs []*v1beta1.NMStateConfig) (string, error) {
|
||||
var err error
|
||||
var rendezvousIP string
|
||||
|
||||
if agentConfig != nil && agentConfig.RendezvousIP != "" {
|
||||
rendezvousIP = agentConfig.RendezvousIP
|
||||
logrus.Debug("RendezvousIP from the AgentConfig ", rendezvousIP)
|
||||
|
||||
} else if len(nmStateConfigs) > 0 {
|
||||
rendezvousIP, err = manifests.GetNodeZeroIP(nmStateConfigs)
|
||||
logrus.Debug("RendezvousIP from the NMStateConfig ", rendezvousIP)
|
||||
} else {
|
||||
err = errors.New("missing rendezvousIP in agent-config or at least one NMStateConfig manifest")
|
||||
}
|
||||
return rendezvousIP, err
|
||||
}
|
||||
|
||||
func getMirrorFromRelease(releaseImage string, registriesConfig *mirror.RegistriesConf) string {
|
||||
|
||||
releaseImageMirror := ""
|
||||
source := regexp.MustCompile(`^(.+?)(@sha256)?:(.+)`).FindStringSubmatch(releaseImage)
|
||||
for _, config := range registriesConfig.MirrorConfig {
|
||||
if config.Location == source[1] {
|
||||
// include the tag with the build release image
|
||||
if len(source) == 4 {
|
||||
// Has Sha256
|
||||
releaseImageMirror = fmt.Sprintf("%s%s:%s", config.Mirror, source[2], source[3])
|
||||
} else if len(source) == 3 {
|
||||
releaseImageMirror = fmt.Sprintf("%s:%s", config.Mirror, source[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return releaseImageMirror
|
||||
}
|
||||
497
pkg/asset/agent/image/ignition_test.go
Normal file
497
pkg/asset/agent/image/ignition_test.go
Normal file
@@ -0,0 +1,497 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
igntypes "github.com/coreos/ignition/v2/config/v3_2/types"
|
||||
|
||||
hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1"
|
||||
"github.com/openshift/assisted-service/api/v1beta1"
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent/agentconfig"
|
||||
"github.com/openshift/installer/pkg/asset/agent/manifests"
|
||||
"github.com/openshift/installer/pkg/asset/agent/mirror"
|
||||
"github.com/openshift/installer/pkg/asset/tls"
|
||||
"github.com/openshift/installer/pkg/types/agent"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Unable to test Generate because bootstrap.AddStorageFiles
|
||||
// returns error in unit test:
|
||||
// open data/agent/files: no such file or directory
|
||||
// Unit test working directory is ./pkg/asset/agent/image
|
||||
// While normal execution working directory is ./data
|
||||
// func TestIgnition_Generate(t *testing.T) {}
|
||||
|
||||
func TestIgnition_getTemplateData(t *testing.T) {
|
||||
clusterImageSet := &hivev1.ClusterImageSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "openshift-v4.10.0",
|
||||
},
|
||||
Spec: hivev1.ClusterImageSetSpec{
|
||||
ReleaseImage: "quay.io:443/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64",
|
||||
},
|
||||
}
|
||||
pullSecret := "pull-secret"
|
||||
nodeZeroIP := "192.168.111.80"
|
||||
agentClusterInstall := &hiveext.AgentClusterInstall{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-agent-cluster-install",
|
||||
Namespace: "cluster0",
|
||||
},
|
||||
Spec: hiveext.AgentClusterInstallSpec{
|
||||
APIVIP: "192.168.111.2",
|
||||
SSHPublicKey: "ssh-rsa AAAAmyKey",
|
||||
ProvisionRequirements: hiveext.ProvisionRequirements{
|
||||
ControlPlaneAgents: 3,
|
||||
WorkerAgents: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
releaseImage := "quay.io:443/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64"
|
||||
releaseImageMirror := "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image"
|
||||
infraEnvID := "random-infra-env-id"
|
||||
haveMirrorConfig := true
|
||||
|
||||
releaseImageList, err := releaseImageList(clusterImageSet.Spec.ReleaseImage, "x86_64")
|
||||
assert.NoError(t, err)
|
||||
templateData := getTemplateData(pullSecret, nodeZeroIP, releaseImageList, releaseImage, releaseImageMirror, haveMirrorConfig, agentClusterInstall, infraEnvID)
|
||||
assert.Equal(t, "http", templateData.ServiceProtocol)
|
||||
assert.Equal(t, "http://"+nodeZeroIP+":8090/", templateData.ServiceBaseURL)
|
||||
assert.Equal(t, pullSecret, templateData.PullSecret)
|
||||
assert.Equal(t, "", templateData.PullSecretToken)
|
||||
assert.Equal(t, nodeZeroIP, templateData.NodeZeroIP)
|
||||
assert.Equal(t, nodeZeroIP+":8090", templateData.AssistedServiceHost)
|
||||
assert.Equal(t, agentClusterInstall.Spec.APIVIP, templateData.APIVIP)
|
||||
assert.Equal(t, agentClusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents, templateData.ControlPlaneAgents)
|
||||
assert.Equal(t, agentClusterInstall.Spec.ProvisionRequirements.WorkerAgents, templateData.WorkerAgents)
|
||||
assert.Equal(t, releaseImageList, templateData.ReleaseImages)
|
||||
assert.Equal(t, releaseImage, templateData.ReleaseImage)
|
||||
assert.Equal(t, releaseImageMirror, templateData.ReleaseImageMirror)
|
||||
assert.Equal(t, haveMirrorConfig, templateData.HaveMirrorConfig)
|
||||
assert.Equal(t, infraEnvID, templateData.InfraEnvID)
|
||||
}
|
||||
|
||||
func TestIgnition_addStaticNetworkConfig(t *testing.T) {
|
||||
cases := []struct {
|
||||
Name string
|
||||
staticNetworkConfig []*models.HostStaticNetworkConfig
|
||||
expectedError string
|
||||
expectedFileList []string
|
||||
}{
|
||||
{
|
||||
Name: "default",
|
||||
staticNetworkConfig: []*models.HostStaticNetworkConfig{
|
||||
{
|
||||
MacInterfaceMap: models.MacInterfaceMap{
|
||||
{LogicalNicName: "eth0", MacAddress: "52:54:01:aa:aa:a1"},
|
||||
},
|
||||
NetworkYaml: "interfaces:\n- ipv4:\n address:\n - ip: 192.168.122.21\n prefix-length: 24\n enabled: true\n mac-address: 52:54:01:aa:aa:a1\n name: eth0\n state: up\n type: ethernet\n",
|
||||
},
|
||||
},
|
||||
expectedError: "",
|
||||
expectedFileList: []string{
|
||||
"/etc/assisted/network/host0/eth0.nmconnection",
|
||||
"/etc/assisted/network/host0/mac_interface.ini",
|
||||
"/usr/local/bin/pre-network-manager-config.sh",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "no-static-network-configs",
|
||||
staticNetworkConfig: []*models.HostStaticNetworkConfig{},
|
||||
expectedError: "",
|
||||
expectedFileList: nil,
|
||||
},
|
||||
{
|
||||
Name: "error-processing-config",
|
||||
staticNetworkConfig: []*models.HostStaticNetworkConfig{
|
||||
{
|
||||
MacInterfaceMap: models.MacInterfaceMap{
|
||||
{LogicalNicName: "eth0", MacAddress: "52:54:01:aa:aa:a1"},
|
||||
},
|
||||
NetworkYaml: "interfaces:\n- ipv4:\n address:\n - ip: bad-ip\n prefix-length: 24\n enabled: true\n mac-address: 52:54:01:aa:aa:a1\n name: eth0\n state: up\n type: ethernet\n",
|
||||
},
|
||||
},
|
||||
expectedError: "'bad-ip' does not appear to be an IPv4 or IPv6 address",
|
||||
expectedFileList: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
config := igntypes.Config{}
|
||||
err := addStaticNetworkConfig(&config, tc.staticNetworkConfig)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Regexp(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var fileList []string
|
||||
for _, file := range config.Storage.Files {
|
||||
fileList = append(fileList, file.Node.Path)
|
||||
}
|
||||
assert.Equal(t, tc.expectedFileList, fileList)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetrieveRendezvousIP(t *testing.T) {
|
||||
rawConfig := `interfaces:
|
||||
- ipv4:
|
||||
address:
|
||||
- ip: "192.168.122.21"`
|
||||
cases := []struct {
|
||||
Name string
|
||||
agentConfig *agent.Config
|
||||
nmStateConfigs []*v1beta1.NMStateConfig
|
||||
expectedRendezvousIP string
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
Name: "valid-agent-config-provided-with-RendezvousIP",
|
||||
agentConfig: &agent.Config{
|
||||
RendezvousIP: "192.168.122.21",
|
||||
Hosts: []agent.Host{
|
||||
{
|
||||
Hostname: "control-0.example.org",
|
||||
Role: "master",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRendezvousIP: "192.168.122.21",
|
||||
},
|
||||
{
|
||||
Name: "no-agent-config-provided-so-read-from-nmstateconfig",
|
||||
nmStateConfigs: []*v1beta1.NMStateConfig{
|
||||
{
|
||||
Spec: v1beta1.NMStateConfigSpec{
|
||||
NetConfig: v1beta1.NetConfig{
|
||||
Raw: []byte(rawConfig),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRendezvousIP: "192.168.122.21",
|
||||
},
|
||||
{
|
||||
Name: "neither-agent-config-was-provided-with-RendezvousIP-nor-nmstateconfig-manifest",
|
||||
agentConfig: &agent.Config{
|
||||
Hosts: []agent.Host{
|
||||
{
|
||||
Hostname: "control-0.example.org",
|
||||
Role: "master",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: "missing rendezvousIP in agent-config or at least one NMStateConfig manifest",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
rendezvousIP, err := RetrieveRendezvousIP(tc.agentConfig, tc.nmStateConfigs)
|
||||
if tc.expectedError != "" {
|
||||
assert.Regexp(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedRendezvousIP, rendezvousIP)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAddHostConfig_Roles(t *testing.T) {
|
||||
cases := []struct {
|
||||
Name string
|
||||
agentConfig *agentconfig.AgentConfig
|
||||
expectedNumberOfHostConfigFiles int
|
||||
}{
|
||||
{
|
||||
Name: "one-host-role-defined",
|
||||
agentConfig: &agentconfig.AgentConfig{
|
||||
Config: &agent.Config{
|
||||
Hosts: []agent.Host{
|
||||
{
|
||||
Role: "master",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNumberOfHostConfigFiles: 1,
|
||||
},
|
||||
{
|
||||
Name: "multiple-host-roles-defined",
|
||||
agentConfig: &agentconfig.AgentConfig{
|
||||
Config: &agent.Config{
|
||||
Hosts: []agent.Host{
|
||||
{
|
||||
Role: "master",
|
||||
},
|
||||
{
|
||||
Role: "master",
|
||||
},
|
||||
{
|
||||
Role: "master",
|
||||
},
|
||||
{
|
||||
Role: "worker",
|
||||
},
|
||||
{
|
||||
Role: "worker",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedNumberOfHostConfigFiles: 5,
|
||||
},
|
||||
{
|
||||
Name: "zero-host-roles-defined",
|
||||
expectedNumberOfHostConfigFiles: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
config := &igntypes.Config{}
|
||||
err := addHostConfig(config, tc.agentConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(config.Storage.Files), tc.expectedNumberOfHostConfigFiles)
|
||||
for _, file := range config.Storage.Files {
|
||||
assert.Equal(t, true, strings.HasPrefix(file.Path, "/etc/assisted/hostconfig"))
|
||||
assert.Equal(t, true, strings.HasSuffix(file.Path, "role"))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestIgnition_Generate(t *testing.T) {
|
||||
|
||||
// This patch currently allows testing the Ignition asset using the embedded resources.
|
||||
// TODO: Replace it by mocking the filesystem in bootstrap.AddStorageFiles()
|
||||
workingDirectory, _ := os.Getwd()
|
||||
os.Chdir(path.Join(workingDirectory, "../../../../data"))
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
overrideDeps []asset.Asset
|
||||
expectedError string
|
||||
expectedFiles []string
|
||||
}{
|
||||
{
|
||||
name: "no-extra-manifests",
|
||||
expectedFiles: []string{},
|
||||
},
|
||||
{
|
||||
name: "default",
|
||||
overrideDeps: []asset.Asset{
|
||||
&manifests.ExtraManifests{
|
||||
FileList: []*asset.File{
|
||||
{
|
||||
Filename: "openshift/test-configmap.yaml",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedFiles: []string{
|
||||
"/etc/assisted/extra-manifests/test-configmap.yaml",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
deps := buildIgnitionAssetDefaultDependencies()
|
||||
|
||||
for _, od := range tc.overrideDeps {
|
||||
for i, d := range deps {
|
||||
if d.Name() == od.Name() {
|
||||
deps[i] = od
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(deps...)
|
||||
|
||||
ignitionAsset := &Ignition{}
|
||||
err := ignitionAsset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, ignitionAsset.Config.Storage.Directories, 1)
|
||||
assert.Equal(t, "/etc/assisted/extra-manifests", ignitionAsset.Config.Storage.Directories[0].Node.Path)
|
||||
|
||||
for _, f := range tc.expectedFiles {
|
||||
found := false
|
||||
for _, i := range ignitionAsset.Config.Storage.Files {
|
||||
if i.Node.Path == f {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, fmt.Sprintf("Expected file %s not found", f))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// This test util create the minimum valid set of dependencies for the
|
||||
// Ignition asset
|
||||
func buildIgnitionAssetDefaultDependencies() []asset.Asset {
|
||||
secretDataBytes, _ := base64.StdEncoding.DecodeString("super-secret")
|
||||
|
||||
return []asset.Asset{
|
||||
&manifests.AgentManifests{
|
||||
InfraEnv: &v1beta1.InfraEnv{
|
||||
Spec: v1beta1.InfraEnvSpec{
|
||||
SSHAuthorizedKey: "my-ssh-key",
|
||||
},
|
||||
},
|
||||
ClusterImageSet: &hivev1.ClusterImageSet{
|
||||
Spec: hivev1.ClusterImageSetSpec{
|
||||
ReleaseImage: "registry.ci.openshift.org/origin/release:4.11",
|
||||
},
|
||||
},
|
||||
PullSecret: &v1.Secret{
|
||||
Data: map[string][]byte{
|
||||
".dockerconfigjson": secretDataBytes,
|
||||
},
|
||||
},
|
||||
AgentClusterInstall: &hiveext.AgentClusterInstall{
|
||||
Spec: hiveext.AgentClusterInstallSpec{
|
||||
APIVIP: "192.168.111.5",
|
||||
ProvisionRequirements: hiveext.ProvisionRequirements{
|
||||
ControlPlaneAgents: 3,
|
||||
WorkerAgents: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
NMStateConfigs: []*aiv1beta1.NMStateConfig{
|
||||
{
|
||||
Spec: aiv1beta1.NMStateConfigSpec{
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "eth0",
|
||||
MacAddress: "00:01:02:03:04:05",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&agentconfig.AgentConfig{
|
||||
Config: &agent.Config{
|
||||
RendezvousIP: "192.168.111.80",
|
||||
},
|
||||
File: &asset.File{
|
||||
Filename: "/cluster-manifests/agent-config.yaml",
|
||||
},
|
||||
},
|
||||
&manifests.ExtraManifests{},
|
||||
&mirror.RegistriesConf{},
|
||||
&mirror.CaBundle{},
|
||||
&tls.KubeAPIServerLBSignerCertKey{},
|
||||
&tls.KubeAPIServerLocalhostSignerCertKey{},
|
||||
&tls.KubeAPIServerServiceNetworkSignerCertKey{},
|
||||
&tls.AdminKubeConfigSignerCertKey{},
|
||||
&tls.AdminKubeConfigClientCertKey{},
|
||||
}
|
||||
}
|
||||
|
||||
func TestIgnition_getMirrorFromRelease(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
release string
|
||||
registriesConf mirror.RegistriesConf
|
||||
expectedMirror string
|
||||
}{
|
||||
{
|
||||
name: "no-mirror",
|
||||
release: "registry.ci.openshift.org/ocp/release:latest",
|
||||
registriesConf: mirror.RegistriesConf{},
|
||||
expectedMirror: "",
|
||||
},
|
||||
{
|
||||
name: "mirror-no-match",
|
||||
release: "registry.ci.openshift.org/ocp/release:4.11.0-0.nightly-foo",
|
||||
registriesConf: mirror.RegistriesConf{
|
||||
File: &asset.File{
|
||||
Filename: "registries.conf",
|
||||
Data: []byte(""),
|
||||
},
|
||||
MirrorConfig: []mirror.RegistriesConfig{
|
||||
{
|
||||
Location: "some.registry.org/release",
|
||||
Mirror: "some.mirror.org",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedMirror: "",
|
||||
},
|
||||
{
|
||||
name: "mirror-match",
|
||||
release: "registry.ci.openshift.org/ocp/release:4.11.0-0.nightly-foo",
|
||||
registriesConf: mirror.RegistriesConf{
|
||||
File: &asset.File{
|
||||
Filename: "registries.conf",
|
||||
Data: []byte(""),
|
||||
},
|
||||
MirrorConfig: []mirror.RegistriesConfig{
|
||||
{
|
||||
Location: "registry.ci.openshift.org/ocp/release",
|
||||
Mirror: "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedMirror: "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image:4.11.0-0.nightly-foo",
|
||||
},
|
||||
{
|
||||
name: "mirror-match-with-checksum",
|
||||
release: "quay.io/openshift-release-dev/ocp-release@sha256:300bce8246cf880e792e106607925de0a404484637627edf5f517375517d54a4",
|
||||
registriesConf: mirror.RegistriesConf{
|
||||
File: &asset.File{
|
||||
Filename: "registries.conf",
|
||||
Data: []byte(""),
|
||||
},
|
||||
MirrorConfig: []mirror.RegistriesConfig{
|
||||
{
|
||||
Location: "quay.io/openshift-release-dev/ocp-v4.0-art-dev",
|
||||
Mirror: "localhost:5000/openshift4/openshift/release",
|
||||
},
|
||||
{
|
||||
Location: "quay.io/openshift-release-dev/ocp-release",
|
||||
Mirror: "localhost:5000/openshift-release-dev/ocp-release",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedMirror: "localhost:5000/openshift-release-dev/ocp-release@sha256:300bce8246cf880e792e106607925de0a404484637627edf5f517375517d54a4",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mirror := getMirrorFromRelease(tc.release, &tc.registriesConf)
|
||||
|
||||
assert.Equal(t, tc.expectedMirror, mirror)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
234
pkg/asset/agent/image/oc.go
Normal file
234
pkg/asset/agent/image/oc.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1"
|
||||
"github.com/openshift/assisted-service/pkg/executer"
|
||||
"github.com/openshift/installer/pkg/asset/agent/mirror"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/thedevsaddam/retry"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
machineOsImageName = "machine-os-images"
|
||||
coreOsFileName = "/coreos/coreos-%s.iso"
|
||||
//OcDefaultTries is the number of times to execute the oc command on failues
|
||||
OcDefaultTries = 5
|
||||
// OcDefaultRetryDelay is the time between retries
|
||||
OcDefaultRetryDelay = time.Second * 5
|
||||
)
|
||||
|
||||
// Config is used to set up the retries for extracting the base ISO
|
||||
type Config struct {
|
||||
MaxTries uint
|
||||
RetryDelay time.Duration
|
||||
}
|
||||
|
||||
// Release is the interface to use the oc command to the get image info
|
||||
type Release interface {
|
||||
GetBaseIso(log logrus.FieldLogger, releaseImage string, pullSecret string, mirrorConfig []mirror.RegistriesConfig, architecture string) (string, error)
|
||||
}
|
||||
|
||||
type release struct {
|
||||
executer executer.Executer
|
||||
config Config
|
||||
}
|
||||
|
||||
// NewRelease is used to set up the executor to run oc commands
|
||||
func NewRelease(executer executer.Executer, config Config) Release {
|
||||
return &release{executer: executer, config: config}
|
||||
}
|
||||
|
||||
const (
|
||||
templateGetImage = "oc adm release info --image-for=%s --insecure=%t %s"
|
||||
templateImageExtract = "oc image extract --path %s:%s --confirm %s"
|
||||
templateImageExtractWithIcsp = "oc image extract --path %s:%s --confirm --icsp-file=%s %s"
|
||||
)
|
||||
|
||||
// Get the CoreOS ISO from the releaseImage
|
||||
func (r *release) GetBaseIso(log logrus.FieldLogger, releaseImage string, pullSecret string, mirrorConfig []mirror.RegistriesConfig, architecture string) (string, error) {
|
||||
|
||||
// Get the machine-os-images pullspec from the release and use that to get the CoreOS ISO
|
||||
image, err := r.getImageFromRelease(log, machineOsImageName, releaseImage, pullSecret, len(mirrorConfig) > 0)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
cacheDir, err := GetCacheDir(imageDataType)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
filename := fmt.Sprintf(coreOsFileName, architecture)
|
||||
// Check if file is already cached
|
||||
filePath, err := GetFileFromCache(path.Base(filename), cacheDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if filePath != "" {
|
||||
// Found cached file
|
||||
return filePath, nil
|
||||
}
|
||||
|
||||
path, err := r.extractFileFromImage(log, image, filename, cacheDir, pullSecret, mirrorConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path, err
|
||||
}
|
||||
|
||||
func (r *release) getImageFromRelease(log logrus.FieldLogger, imageName, releaseImage, pullSecret string, haveMirror bool) (string, error) {
|
||||
// This requires the 'oc' command so make sure its available
|
||||
_, err := exec.LookPath("oc")
|
||||
if err != nil {
|
||||
if haveMirror {
|
||||
log.Warning("Unable to validate mirror config because \"oc\" command is not available")
|
||||
} else {
|
||||
log.Debug("Skipping ISO extraction; \"oc\" command is not available")
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
cmd := fmt.Sprintf(templateGetImage, imageName, true, releaseImage)
|
||||
|
||||
log.Debugf("Fetching image from OCP release (%s)", cmd)
|
||||
image, err := execute(log, r.executer, pullSecret, cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func (r *release) extractFileFromImage(log logrus.FieldLogger, image, file, cacheDir, pullSecret string, mirrorConfig []mirror.RegistriesConfig) (string, error) {
|
||||
|
||||
var cmd string
|
||||
if len(mirrorConfig) > 0 {
|
||||
log.Debugf("Using mirror configuration")
|
||||
icspFile, err := getIcspFileFromRegistriesConfig(log, mirrorConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer removeIcspFile(icspFile)
|
||||
cmd = fmt.Sprintf(templateImageExtractWithIcsp, file, cacheDir, icspFile, image)
|
||||
} else {
|
||||
cmd = fmt.Sprintf(templateImageExtract, file, cacheDir, image)
|
||||
}
|
||||
|
||||
log.Debugf("extracting %s to %s, %s", file, cacheDir, cmd)
|
||||
_, err := retry.Do(r.config.MaxTries, r.config.RetryDelay, execute, log, r.executer, pullSecret, cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// set path
|
||||
path := filepath.Join(cacheDir, path.Base(file))
|
||||
log.Info("Successfully extracted base ISO from the release")
|
||||
log.Debugf("Base ISO %s cached at %s", file, path)
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func execute(log logrus.FieldLogger, executer executer.Executer, pullSecret string, command string) (string, error) {
|
||||
|
||||
ps, err := executer.TempFile("", "registry-config")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
ps.Close()
|
||||
os.Remove(ps.Name())
|
||||
}()
|
||||
_, err = ps.Write([]byte(pullSecret))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// flush the buffer to ensure the file can be read
|
||||
ps.Close()
|
||||
executeCommand := command[:] + " --registry-config=" + ps.Name()
|
||||
args := strings.Split(executeCommand, " ")
|
||||
|
||||
stdout, stderr, exitCode := executer.Execute(args[0], args[1:]...)
|
||||
|
||||
if exitCode == 0 {
|
||||
return strings.TrimSpace(stdout), nil
|
||||
}
|
||||
|
||||
err = fmt.Errorf("command '%s' exited with non-zero exit code %d: %s\n%s", executeCommand, exitCode, stdout, stderr)
|
||||
log.Error(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create a temporary file containing the ImageContentPolicySources
|
||||
func getIcspFileFromRegistriesConfig(log logrus.FieldLogger, mirrorConfig []mirror.RegistriesConfig) (string, error) {
|
||||
|
||||
contents, err := getIcspContents(mirrorConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if contents == nil {
|
||||
log.Debugf("No registry entries to build ICSP file")
|
||||
return "", nil
|
||||
}
|
||||
|
||||
icspFile, err := ioutil.TempFile("", "icsp-file")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
log.Debugf("Building ICSP file from registries.conf with contents %s", contents)
|
||||
if _, err := icspFile.Write(contents); err != nil {
|
||||
icspFile.Close()
|
||||
os.Remove(icspFile.Name())
|
||||
return "", err
|
||||
}
|
||||
icspFile.Close()
|
||||
|
||||
return icspFile.Name(), nil
|
||||
}
|
||||
|
||||
// Convert the data in registries.conf into ICSP format
|
||||
func getIcspContents(mirrorConfig []mirror.RegistriesConfig) ([]byte, error) {
|
||||
|
||||
icsp := operatorv1alpha1.ImageContentSourcePolicy{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: operatorv1alpha1.SchemeGroupVersion.String(),
|
||||
Kind: "ImageContentSourcePolicy",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "image-policy",
|
||||
// not namespaced
|
||||
},
|
||||
}
|
||||
|
||||
icsp.Spec.RepositoryDigestMirrors = make([]operatorv1alpha1.RepositoryDigestMirrors, len(mirrorConfig))
|
||||
for i, mirrorRegistries := range mirrorConfig {
|
||||
icsp.Spec.RepositoryDigestMirrors[i] = operatorv1alpha1.RepositoryDigestMirrors{Source: mirrorRegistries.Location, Mirrors: []string{mirrorRegistries.Mirror}}
|
||||
}
|
||||
|
||||
// Convert to json first so json tags are handled
|
||||
jsonData, err := json.Marshal(&icsp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contents, err := yaml.JSONToYAML(jsonData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
func removeIcspFile(filename string) {
|
||||
if filename != "" {
|
||||
os.Remove(filename)
|
||||
}
|
||||
}
|
||||
52
pkg/asset/agent/image/oc_test.go
Normal file
52
pkg/asset/agent/image/oc_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset/agent/mirror"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetIcspContents(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
mirrorConfig []mirror.RegistriesConfig
|
||||
expectedError string
|
||||
expectedConfig string
|
||||
}{
|
||||
{
|
||||
name: "valid-config",
|
||||
mirrorConfig: []mirror.RegistriesConfig{
|
||||
{
|
||||
Location: "registry.ci.openshift.org/ocp/release",
|
||||
Mirror: "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image",
|
||||
},
|
||||
{
|
||||
Location: "quay.io/openshift-release-dev/ocp-v4.0-art-dev",
|
||||
Mirror: "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image",
|
||||
},
|
||||
},
|
||||
expectedConfig: "apiVersion: operator.openshift.io/v1alpha1\nkind: ImageContentSourcePolicy\nmetadata:\n creationTimestamp: null\n name: image-policy\nspec:\n repositoryDigestMirrors:\n - mirrors:\n - virthost.ostest.test.metalkube.org:5000/localimages/local-release-image\n source: registry.ci.openshift.org/ocp/release\n - mirrors:\n - virthost.ostest.test.metalkube.org:5000/localimages/local-release-image\n source: quay.io/openshift-release-dev/ocp-v4.0-art-dev\n",
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
name: "empty-config",
|
||||
mirrorConfig: []mirror.RegistriesConfig{},
|
||||
expectedConfig: "apiVersion: operator.openshift.io/v1alpha1\nkind: ImageContentSourcePolicy\nmetadata:\n creationTimestamp: null\n name: image-policy\nspec:\n repositoryDigestMirrors: []\n",
|
||||
expectedError: "",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
contents, err := getIcspContents(tc.mirrorConfig)
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectedConfig, string(contents))
|
||||
})
|
||||
}
|
||||
}
|
||||
72
pkg/asset/agent/image/releaseimage.go
Normal file
72
pkg/asset/agent/image/releaseimage.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/installer/pkg/version"
|
||||
)
|
||||
|
||||
type releaseImage struct {
|
||||
ReleaseVersion string `json:"openshift_version"`
|
||||
Arch string `json:"cpu_architecture"`
|
||||
PullSpec string `json:"url"`
|
||||
Tag string `json:"version"`
|
||||
}
|
||||
|
||||
func isDigest(pullspec string) bool {
|
||||
return regexp.MustCompile(`.*sha256:[a-fA-F0-9]{64}$`).MatchString(pullspec)
|
||||
}
|
||||
|
||||
func releaseImageFromPullSpec(pullSpec, arch string) (releaseImage, error) {
|
||||
|
||||
// When the pullspec it's a digest let's use the current version
|
||||
// stored in the installer
|
||||
if isDigest(pullSpec) {
|
||||
versionString, err := version.Version()
|
||||
if err != nil {
|
||||
return releaseImage{}, err
|
||||
}
|
||||
|
||||
return releaseImage{
|
||||
ReleaseVersion: versionString,
|
||||
Arch: arch,
|
||||
PullSpec: pullSpec,
|
||||
Tag: versionString,
|
||||
}, nil
|
||||
}
|
||||
|
||||
components := strings.Split(pullSpec, ":")
|
||||
if len(components) < 2 {
|
||||
return releaseImage{}, fmt.Errorf("invalid release image \"%s\"", pullSpec)
|
||||
}
|
||||
lastIndex := len(components) - 1
|
||||
tag := strings.TrimSuffix(components[lastIndex], fmt.Sprintf("-%s", arch))
|
||||
|
||||
versionComponents := strings.Split(tag, ".")
|
||||
if len(versionComponents) < 2 {
|
||||
return releaseImage{}, fmt.Errorf("invalid release image version \"%s\"", tag)
|
||||
}
|
||||
relVersion := strings.Join(versionComponents[:2], ".")
|
||||
|
||||
return releaseImage{
|
||||
ReleaseVersion: relVersion,
|
||||
Arch: arch,
|
||||
PullSpec: pullSpec,
|
||||
Tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func releaseImageList(pullSpec, arch string) (string, error) {
|
||||
|
||||
relImage, err := releaseImageFromPullSpec(pullSpec, arch)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
imageList := []interface{}{relImage}
|
||||
text, err := json.Marshal(imageList)
|
||||
return string(text), err
|
||||
}
|
||||
72
pkg/asset/agent/image/releaseimage_test.go
Normal file
72
pkg/asset/agent/image/releaseimage_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReleaseImageList(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
pullSpec string
|
||||
arch string
|
||||
result string
|
||||
}{
|
||||
{
|
||||
name: "4.10rc",
|
||||
pullSpec: "quay.io/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64",
|
||||
arch: "x86_64",
|
||||
result: "[{\"openshift_version\":\"4.10\",\"cpu_architecture\":\"x86_64\",\"url\":\"quay.io/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64\",\"version\":\"4.10.0-rc.1\"}]",
|
||||
},
|
||||
{
|
||||
name: "pull-spec-includes-port-number",
|
||||
pullSpec: "quay.io:433/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64",
|
||||
arch: "x86_64",
|
||||
result: "[{\"openshift_version\":\"4.10\",\"cpu_architecture\":\"x86_64\",\"url\":\"quay.io:433/openshift-release-dev/ocp-release:4.10.0-rc.1-x86_64\",\"version\":\"4.10.0-rc.1\"}]",
|
||||
},
|
||||
{
|
||||
name: "arm",
|
||||
pullSpec: "quay.io/openshift-release-dev/ocp-release:4.10.0-rc.1-aarch64",
|
||||
arch: "aarch64",
|
||||
result: "[{\"openshift_version\":\"4.10\",\"cpu_architecture\":\"aarch64\",\"url\":\"quay.io/openshift-release-dev/ocp-release:4.10.0-rc.1-aarch64\",\"version\":\"4.10.0-rc.1\"}]",
|
||||
},
|
||||
{
|
||||
name: "4.11ci",
|
||||
pullSpec: "registry.ci.openshift.org/ocp/release:4.11.0-0.ci-2022-05-16-202609",
|
||||
arch: "x86_64",
|
||||
result: "[{\"openshift_version\":\"4.11\",\"cpu_architecture\":\"x86_64\",\"url\":\"registry.ci.openshift.org/ocp/release:4.11.0-0.ci-2022-05-16-202609\",\"version\":\"4.11.0-0.ci-2022-05-16-202609\"}]",
|
||||
},
|
||||
{
|
||||
name: "CI-ephemeral",
|
||||
pullSpec: "registry.build04.ci.openshift.org/ci-op-m7rfgytz/release@sha256:ebb203f24ee060d61bdb466696a9c20b3841f9929badf9b81fc99cbedc2a679e",
|
||||
arch: "x86_64",
|
||||
result: "[{\"openshift_version\":\"was not built correctly\",\"cpu_architecture\":\"x86_64\",\"url\":\"registry.build04.ci.openshift.org/ci-op-m7rfgytz/release@sha256:ebb203f24ee060d61bdb466696a9c20b3841f9929badf9b81fc99cbedc2a679e\",\"version\":\"was not built correctly\"}]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
output, err := releaseImageList(tc.pullSpec, tc.arch)
|
||||
assert.NoError(t, err)
|
||||
if err == nil {
|
||||
assert.Equal(t, tc.result, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReleaseImageListErrors(t *testing.T) {
|
||||
cases := []string{
|
||||
"",
|
||||
"quay.io/openshift-release-dev/ocp-release-4.10",
|
||||
"quay.io/openshift-release-dev/ocp-release:4",
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
_, err := releaseImageList(tc, "x86_64")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
186
pkg/asset/agent/installconfig.go
Normal file
186
pkg/asset/agent/installconfig.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/installconfig"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
"github.com/openshift/installer/pkg/types/baremetal"
|
||||
"github.com/openshift/installer/pkg/types/none"
|
||||
"github.com/openshift/installer/pkg/types/vsphere"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
installConfigFilename = "install-config.yaml"
|
||||
)
|
||||
|
||||
// supportedPlatforms lists the supported platforms for agent installer
|
||||
var supportedPlatforms = []string{baremetal.Name, vsphere.Name, none.Name}
|
||||
|
||||
// OptionalInstallConfig is an InstallConfig where the default is empty, rather
|
||||
// than generated from running the survey.
|
||||
type OptionalInstallConfig struct {
|
||||
installconfig.InstallConfig
|
||||
Supplied bool
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed by an
|
||||
// InstallConfig asset.
|
||||
func (a *OptionalInstallConfig) Dependencies() []asset.Asset {
|
||||
// Return no dependencies for the Agent install config, because it is
|
||||
// optional. We don't need to run the survey if it doesn't exist, since the
|
||||
// user may have supplied cluster-manifests that fully define the cluster.
|
||||
return []asset.Asset{}
|
||||
}
|
||||
|
||||
// Generate generates the install-config.yaml file.
|
||||
func (a *OptionalInstallConfig) Generate(parents asset.Parents) error {
|
||||
// Just generate an empty install config, since we have no dependencies.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load returns the installconfig from disk.
|
||||
func (a *OptionalInstallConfig) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
var found bool
|
||||
|
||||
// First load the provided install config to early validate
|
||||
// as per agent installer specific requirements
|
||||
// Detailed generic validations of install config are
|
||||
// done by pkg/asset/installconfig/installconfig.go
|
||||
installConfig, err := a.loadEarly(f)
|
||||
if err != nil {
|
||||
return found, err
|
||||
}
|
||||
|
||||
if err := a.validateInstallConfig(installConfig).ToAggregate(); err != nil {
|
||||
return found, errors.Wrapf(err, "invalid install-config configuration")
|
||||
}
|
||||
|
||||
found, err = a.InstallConfig.Load(f)
|
||||
if found && err == nil {
|
||||
a.Supplied = true
|
||||
}
|
||||
return found, err
|
||||
}
|
||||
|
||||
// loadEarly loads the install config from the disk
|
||||
// to be able to validate early for agent installer
|
||||
func (a *OptionalInstallConfig) loadEarly(f asset.FileFetcher) (*types.InstallConfig, error) {
|
||||
|
||||
file, err := f.FetchByName(installConfigFilename)
|
||||
config := &types.InstallConfig{}
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return config, nil
|
||||
}
|
||||
return config, errors.Wrap(err, asset.InstallConfigError)
|
||||
}
|
||||
|
||||
if err := yaml.UnmarshalStrict(file.Data, config, yaml.DisallowUnknownFields); err != nil {
|
||||
if strings.Contains(err.Error(), "unknown field") {
|
||||
err = errors.Wrapf(err, "failed to parse first occurence of unknown field")
|
||||
}
|
||||
err = errors.Wrapf(err, "failed to unmarshal %s", installConfigFilename)
|
||||
return config, errors.Wrap(err, asset.InstallConfigError)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (a *OptionalInstallConfig) validateInstallConfig(installConfig *types.InstallConfig) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
if err := a.validateSupportedPlatforms(installConfig); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
if err := a.validateVIPsAreSet(installConfig); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
if err := a.validateSNOConfiguration(installConfig); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *OptionalInstallConfig) validateSupportedPlatforms(installConfig *types.InstallConfig) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
fieldPath := field.NewPath("Platform")
|
||||
|
||||
if installConfig.Platform.Name() != "" && !a.contains(installConfig.Platform.Name(), supportedPlatforms) {
|
||||
allErrs = append(allErrs, field.NotSupported(fieldPath, installConfig.Platform.Name(), supportedPlatforms))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *OptionalInstallConfig) validateVIPsAreSet(installConfig *types.InstallConfig) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
var fieldPath *field.Path
|
||||
|
||||
if installConfig.Platform.Name() == baremetal.Name {
|
||||
if installConfig.Platform.BareMetal.APIVIP == "" {
|
||||
fieldPath = field.NewPath("Platform", "Baremetal", "ApiVip")
|
||||
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf("apiVip must be set for %s platform", baremetal.Name)))
|
||||
}
|
||||
if installConfig.Platform.BareMetal.IngressVIP == "" {
|
||||
fieldPath = field.NewPath("Platform", "Baremetal", "IngressVip")
|
||||
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf("ingressVip must be set for %s platform", baremetal.Name)))
|
||||
}
|
||||
}
|
||||
|
||||
if installConfig.Platform.Name() == vsphere.Name {
|
||||
if installConfig.Platform.VSphere.APIVIP == "" {
|
||||
fieldPath = field.NewPath("Platform", "VSphere", "ApiVip")
|
||||
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf("apiVip must be set for %s platform", vsphere.Name)))
|
||||
}
|
||||
if installConfig.Platform.VSphere.IngressVIP == "" {
|
||||
fieldPath = field.NewPath("Platform", "VSphere", "IngressVip")
|
||||
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf("ingressVip must be set for %s platform", vsphere.Name)))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *OptionalInstallConfig) validateSNOConfiguration(installConfig *types.InstallConfig) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
var fieldPath *field.Path
|
||||
|
||||
// platform None always imply SNO cluster
|
||||
if installConfig.Platform.Name() == none.Name {
|
||||
if *installConfig.ControlPlane.Replicas != 1 {
|
||||
fieldPath = field.NewPath("ControlPlane", "Replicas")
|
||||
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf("control plane replicas must be 1 for %s platform. Found %v", none.Name, *installConfig.ControlPlane.Replicas)))
|
||||
} else if len(installConfig.Compute) == 0 {
|
||||
fieldPath = field.NewPath("Compute", "Replicas")
|
||||
allErrs = append(allErrs, field.Required(fieldPath, "Installing a Single Node Openshift requires explicitly setting compute replicas to zero"))
|
||||
}
|
||||
|
||||
var workers int
|
||||
for _, worker := range installConfig.Compute {
|
||||
workers = workers + int(*worker.Replicas)
|
||||
}
|
||||
if workers != 0 {
|
||||
fieldPath = field.NewPath("Compute", "Replicas")
|
||||
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf("total number of worker replicas must be 0 for %s platform. Found %v", none.Name, workers)))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *OptionalInstallConfig) contains(platform string, supportedPlatforms []string) bool {
|
||||
for _, p := range supportedPlatforms {
|
||||
if p == platform {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
213
pkg/asset/agent/installconfig_test.go
Normal file
213
pkg/asset/agent/installconfig_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/openshift/installer/pkg/ipnet"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
"github.com/openshift/installer/pkg/types/none"
|
||||
)
|
||||
|
||||
func TestInstallConfigLoad(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError string
|
||||
expectedConfig *types.InstallConfig
|
||||
}{
|
||||
{
|
||||
name: "unsupported platform",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-cluster
|
||||
baseDomain: test-domain
|
||||
platform:
|
||||
aws:
|
||||
region: us-east-1
|
||||
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
|
||||
`,
|
||||
expectedFound: false,
|
||||
expectedError: `invalid install-config configuration: Platform: Unsupported value: "aws": supported values: "baremetal", "vsphere", "none"`,
|
||||
},
|
||||
{
|
||||
name: "apiVip not set for baremetal platform",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-cluster
|
||||
baseDomain: test-domain
|
||||
platform:
|
||||
baremetal:
|
||||
hosts:
|
||||
- name: host1
|
||||
bootMACAddress: 52:54:01:xx:zz:z1
|
||||
ingressVip: 192.168.122.11
|
||||
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
|
||||
`,
|
||||
expectedFound: false,
|
||||
expectedError: "invalid install-config configuration: Platform.Baremetal.ApiVip: Required value: apiVip must be set for baremetal platform",
|
||||
},
|
||||
{
|
||||
name: "ingressVip not set for vsphere platform",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-cluster
|
||||
baseDomain: test-domain
|
||||
platform:
|
||||
vsphere:
|
||||
apiVip: 192.168.122.10
|
||||
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
|
||||
`,
|
||||
expectedFound: false,
|
||||
expectedError: "invalid install-config configuration: Platform.VSphere.IngressVip: Required value: ingressVip must be set for vsphere platform",
|
||||
},
|
||||
{
|
||||
name: "invalid configuration for none platform for sno",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-cluster
|
||||
baseDomain: test-domain
|
||||
compute:
|
||||
- architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: worker
|
||||
platform: {}
|
||||
replicas: 2
|
||||
controlPlane:
|
||||
architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: master
|
||||
platform: {}
|
||||
replicas: 3
|
||||
platform:
|
||||
none : {}
|
||||
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
|
||||
`,
|
||||
expectedFound: false,
|
||||
expectedError: "invalid install-config configuration: [ControlPlane.Replicas: Required value: control plane replicas must be 1 for none platform. Found 3, Compute.Replicas: Required value: total number of worker replicas must be 0 for none platform. Found 2]",
|
||||
},
|
||||
{
|
||||
name: "no compute.replicas set for SNO",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-cluster
|
||||
baseDomain: test-domain
|
||||
controlPlane:
|
||||
architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: master
|
||||
platform: {}
|
||||
replicas: 1
|
||||
platform:
|
||||
none : {}
|
||||
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
|
||||
`,
|
||||
expectedFound: false,
|
||||
expectedError: "invalid install-config configuration: Compute.Replicas: Required value: Installing a Single Node Openshift requires explicitly setting compute replicas to zero",
|
||||
},
|
||||
{
|
||||
name: "valid configuration for none platform for sno",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: test-cluster
|
||||
baseDomain: test-domain
|
||||
compute:
|
||||
- architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: worker
|
||||
platform: {}
|
||||
replicas: 0
|
||||
controlPlane:
|
||||
architecture: amd64
|
||||
hyperthreading: Enabled
|
||||
name: master
|
||||
platform: {}
|
||||
replicas: 1
|
||||
platform:
|
||||
none : {}
|
||||
pullSecret: "{\"auths\":{\"example.com\":{\"auth\":\"authorization value\"}}}"
|
||||
`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &types.InstallConfig{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: types.InstallConfigVersion,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-cluster",
|
||||
},
|
||||
BaseDomain: "test-domain",
|
||||
Networking: &types.Networking{
|
||||
MachineNetwork: []types.MachineNetworkEntry{
|
||||
{CIDR: *ipnet.MustParseCIDR("10.0.0.0/16")},
|
||||
},
|
||||
NetworkType: "OpenShiftSDN",
|
||||
ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("172.30.0.0/16")},
|
||||
ClusterNetwork: []types.ClusterNetworkEntry{
|
||||
{
|
||||
CIDR: *ipnet.MustParseCIDR("10.128.0.0/14"),
|
||||
HostPrefix: 23,
|
||||
},
|
||||
},
|
||||
},
|
||||
ControlPlane: &types.MachinePool{
|
||||
Name: "master",
|
||||
Replicas: pointer.Int64Ptr(1),
|
||||
Hyperthreading: types.HyperthreadingEnabled,
|
||||
Architecture: types.ArchitectureAMD64,
|
||||
},
|
||||
Compute: []types.MachinePool{
|
||||
{
|
||||
Name: "worker",
|
||||
Replicas: pointer.Int64Ptr(0),
|
||||
Hyperthreading: types.HyperthreadingEnabled,
|
||||
Architecture: types.ArchitectureAMD64,
|
||||
},
|
||||
},
|
||||
Platform: types.Platform{None: &none.Platform{}},
|
||||
PullSecret: `{"auths":{"example.com":{"auth":"authorization value"}}}`,
|
||||
Publish: types.ExternalPublishingStrategy,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(installConfigFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: installConfigFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
).MaxTimes(2)
|
||||
|
||||
asset := &OptionalInstallConfig{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
if tc.expectedFound {
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config, "unexpected Config in InstallConfig")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
142
pkg/asset/agent/manifests/agent.go
Normal file
142
pkg/asset/agent/manifests/agent.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1"
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
const (
|
||||
// This could be change to "cluster-manifests" once all the agent code will be migrated to using
|
||||
// assets (and will stop reading from the hard-code "manifests" relative path)
|
||||
clusterManifestDir = "cluster-manifests"
|
||||
)
|
||||
|
||||
var (
|
||||
_ asset.WritableAsset = (*AgentManifests)(nil)
|
||||
)
|
||||
|
||||
// AgentManifests generates all the required manifests by the agent installer.
|
||||
type AgentManifests struct {
|
||||
FileList []*asset.File
|
||||
|
||||
PullSecret *corev1.Secret
|
||||
InfraEnv *aiv1beta1.InfraEnv
|
||||
StaticNetworkConfigs []*models.HostStaticNetworkConfig
|
||||
NMStateConfigs []*aiv1beta1.NMStateConfig
|
||||
AgentClusterInstall *hiveext.AgentClusterInstall
|
||||
ClusterDeployment *hivev1.ClusterDeployment
|
||||
ClusterImageSet *hivev1.ClusterImageSet
|
||||
}
|
||||
|
||||
// Name returns a human friendly name.
|
||||
func (m *AgentManifests) Name() string {
|
||||
return "Agent Manifests"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed the asset.
|
||||
func (m *AgentManifests) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&AgentPullSecret{},
|
||||
&InfraEnv{},
|
||||
&NMStateConfig{},
|
||||
&AgentClusterInstall{},
|
||||
&ClusterDeployment{},
|
||||
&ClusterImageSet{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the respective manifest files.
|
||||
func (m *AgentManifests) Generate(dependencies asset.Parents) error {
|
||||
for _, a := range []asset.WritableAsset{
|
||||
&AgentPullSecret{},
|
||||
&InfraEnv{},
|
||||
&NMStateConfig{},
|
||||
&AgentClusterInstall{},
|
||||
&ClusterDeployment{},
|
||||
&ClusterImageSet{},
|
||||
} {
|
||||
dependencies.Get(a)
|
||||
m.FileList = append(m.FileList, a.Files()...)
|
||||
|
||||
switch v := a.(type) {
|
||||
case *AgentPullSecret:
|
||||
m.PullSecret = v.Config
|
||||
case *InfraEnv:
|
||||
m.InfraEnv = v.Config
|
||||
case *NMStateConfig:
|
||||
m.StaticNetworkConfigs = append(m.StaticNetworkConfigs, v.StaticNetworkConfig...)
|
||||
m.NMStateConfigs = append(m.NMStateConfigs, v.Config...)
|
||||
case *AgentClusterInstall:
|
||||
m.AgentClusterInstall = v.Config
|
||||
case *ClusterDeployment:
|
||||
m.ClusterDeployment = v.Config
|
||||
case *ClusterImageSet:
|
||||
m.ClusterImageSet = v.Config
|
||||
}
|
||||
}
|
||||
|
||||
asset.SortFiles(m.FileList)
|
||||
|
||||
return m.finish()
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (m *AgentManifests) Files() []*asset.File {
|
||||
return m.FileList
|
||||
}
|
||||
|
||||
// Load currently does nothing
|
||||
func (m *AgentManifests) Load(f asset.FileFetcher) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetPullSecretData returns the content of the pull secret
|
||||
func (m *AgentManifests) GetPullSecretData() string {
|
||||
return m.PullSecret.StringData[".dockerconfigjson"]
|
||||
}
|
||||
|
||||
func (m *AgentManifests) finish() error {
|
||||
if err := m.validateAgentManifests().ToAggregate(); err != nil {
|
||||
return errors.Wrapf(err, "invalid agent configuration")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AgentManifests) validateAgentManifests() field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if err := m.validateNMStateLabelSelector(); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (m *AgentManifests) validateNMStateLabelSelector() field.ErrorList {
|
||||
|
||||
var allErrs field.ErrorList
|
||||
|
||||
fieldPath := field.NewPath("Spec", "NMStateConfigLabelSelector", "MatchLabels")
|
||||
|
||||
for _, networkConfig := range m.NMStateConfigs {
|
||||
if !reflect.DeepEqual(m.InfraEnv.Spec.NMStateConfigLabelSelector.MatchLabels, networkConfig.ObjectMeta.Labels) {
|
||||
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf("infraEnv and %s.NMStateConfig labels do not match. Expected: %s Found: %s",
|
||||
networkConfig.Name,
|
||||
m.InfraEnv.Spec.NMStateConfigLabelSelector.MatchLabels,
|
||||
networkConfig.ObjectMeta.Labels)))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
123
pkg/asset/agent/manifests/agent_test.go
Normal file
123
pkg/asset/agent/manifests/agent_test.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1"
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
)
|
||||
|
||||
func TestAgentManifests_Generate(t *testing.T) {
|
||||
|
||||
fakeSecret := &corev1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "fake-secret"},
|
||||
}
|
||||
fakeInfraEnv := &aiv1beta1.InfraEnv{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "fake-infraEnv"},
|
||||
}
|
||||
fakeStaticNetworkConfig := []*models.HostStaticNetworkConfig{{NetworkYaml: "some-yaml"}}
|
||||
fakeNMStatConfig := []*aiv1beta1.NMStateConfig{{ObjectMeta: v1.ObjectMeta{Name: "fake-nmState"}}}
|
||||
fakeAgentClusterInstall := &hiveext.AgentClusterInstall{ObjectMeta: v1.ObjectMeta{Name: "fake-agentClusterInstall"}}
|
||||
fakeClusterDeployment := &hivev1.ClusterDeployment{ObjectMeta: v1.ObjectMeta{Name: "fake-clusterDeployment"}}
|
||||
fakeClusterImageSet := &hivev1.ClusterImageSet{ObjectMeta: v1.ObjectMeta{Name: "fake-clusterImageSet"}}
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
Assets []asset.WritableAsset
|
||||
ExpectedPullSecret *corev1.Secret
|
||||
ExpectedInfraEnv *aiv1beta1.InfraEnv
|
||||
ExpectedStaticNetworkConfig []*models.HostStaticNetworkConfig
|
||||
ExpectedNMStateConfig []*aiv1beta1.NMStateConfig
|
||||
ExpectedAgentClusterInstall *hiveext.AgentClusterInstall
|
||||
ExpectedClusterDeployment *hivev1.ClusterDeployment
|
||||
ExpectedClusterImageSet *hivev1.ClusterImageSet
|
||||
ExpectedError string
|
||||
}{
|
||||
{
|
||||
Name: "default",
|
||||
Assets: []asset.WritableAsset{
|
||||
&AgentPullSecret{Config: fakeSecret},
|
||||
&InfraEnv{Config: fakeInfraEnv},
|
||||
&NMStateConfig{
|
||||
StaticNetworkConfig: fakeStaticNetworkConfig,
|
||||
Config: fakeNMStatConfig,
|
||||
},
|
||||
&AgentClusterInstall{Config: fakeAgentClusterInstall},
|
||||
&ClusterDeployment{Config: fakeClusterDeployment},
|
||||
&ClusterImageSet{Config: fakeClusterImageSet},
|
||||
},
|
||||
ExpectedPullSecret: fakeSecret,
|
||||
ExpectedInfraEnv: fakeInfraEnv,
|
||||
ExpectedStaticNetworkConfig: fakeStaticNetworkConfig,
|
||||
ExpectedNMStateConfig: fakeNMStatConfig,
|
||||
ExpectedAgentClusterInstall: fakeAgentClusterInstall,
|
||||
ExpectedClusterDeployment: fakeClusterDeployment,
|
||||
ExpectedClusterImageSet: fakeClusterImageSet,
|
||||
},
|
||||
{
|
||||
Name: "invalid-NMStateLabelSelector",
|
||||
Assets: []asset.WritableAsset{
|
||||
&AgentPullSecret{},
|
||||
&InfraEnv{Config: &aiv1beta1.InfraEnv{
|
||||
Spec: aiv1beta1.InfraEnvSpec{
|
||||
NMStateConfigLabelSelector: v1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"missing-label": "missing-label",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
&NMStateConfig{
|
||||
StaticNetworkConfig: fakeStaticNetworkConfig,
|
||||
Config: fakeNMStatConfig,
|
||||
},
|
||||
&AgentClusterInstall{},
|
||||
&ClusterDeployment{},
|
||||
&ClusterImageSet{},
|
||||
},
|
||||
ExpectedError: "invalid agent configuration: Spec.NMStateConfigLabelSelector.MatchLabels: Required value: infraEnv and fake-nmState.NMStateConfig labels do not match. Expected: map[missing-label:missing-label] Found: map[]",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.Name, func(t *testing.T) {
|
||||
m := &AgentManifests{}
|
||||
|
||||
fakeParent := asset.Parents{}
|
||||
for _, a := range tt.Assets {
|
||||
fakeParent.Add(a)
|
||||
}
|
||||
|
||||
err := m.Generate(fakeParent)
|
||||
if tt.ExpectedError != "" {
|
||||
assert.Equal(t, tt.ExpectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
if tt.ExpectedPullSecret != nil {
|
||||
assert.Equal(t, tt.ExpectedPullSecret, m.PullSecret)
|
||||
}
|
||||
if tt.ExpectedInfraEnv != nil {
|
||||
assert.Equal(t, tt.ExpectedInfraEnv, m.InfraEnv)
|
||||
}
|
||||
if tt.ExpectedStaticNetworkConfig != nil {
|
||||
assert.Equal(t, tt.ExpectedStaticNetworkConfig, m.StaticNetworkConfigs)
|
||||
}
|
||||
if tt.ExpectedNMStateConfig != nil {
|
||||
assert.Equal(t, tt.ExpectedNMStateConfig, m.NMStateConfigs)
|
||||
}
|
||||
if tt.ExpectedClusterDeployment != nil {
|
||||
assert.Equal(t, tt.ExpectedClusterDeployment, m.ClusterDeployment)
|
||||
}
|
||||
if tt.ExpectedClusterImageSet != nil {
|
||||
assert.Equal(t, tt.ExpectedClusterImageSet, m.ClusterImageSet)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
174
pkg/asset/agent/manifests/agentclusterinstall.go
Normal file
174
pkg/asset/agent/manifests/agentclusterinstall.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1"
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/ipnet"
|
||||
"github.com/openshift/installer/pkg/validate"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
agentClusterInstallFilename = filepath.Join(clusterManifestDir, "agent-cluster-install.yaml")
|
||||
)
|
||||
|
||||
// AgentClusterInstall generates the agent-cluster-install.yaml file.
|
||||
type AgentClusterInstall struct {
|
||||
File *asset.File
|
||||
Config *hiveext.AgentClusterInstall
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*AgentClusterInstall)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*AgentClusterInstall) Name() string {
|
||||
return "AgentClusterInstall Config"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*AgentClusterInstall) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the AgentClusterInstall manifest.
|
||||
func (a *AgentClusterInstall) Generate(dependencies asset.Parents) error {
|
||||
installConfig := &agent.OptionalInstallConfig{}
|
||||
dependencies.Get(installConfig)
|
||||
|
||||
if installConfig.Config != nil {
|
||||
var numberOfWorkers int = 0
|
||||
for _, compute := range installConfig.Config.Compute {
|
||||
numberOfWorkers = numberOfWorkers + int(*compute.Replicas)
|
||||
}
|
||||
|
||||
clusterNetwork := []hiveext.ClusterNetworkEntry{}
|
||||
for _, cn := range installConfig.Config.Networking.ClusterNetwork {
|
||||
_, cidr, err := net.ParseCIDR(cn.CIDR.String())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse ClusterNetwork CIDR")
|
||||
}
|
||||
err = validate.SubnetCIDR(cidr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate ClusterNetwork CIDR")
|
||||
}
|
||||
|
||||
entry := hiveext.ClusterNetworkEntry{
|
||||
CIDR: cidr.String(),
|
||||
HostPrefix: cn.HostPrefix,
|
||||
}
|
||||
clusterNetwork = append(clusterNetwork, entry)
|
||||
}
|
||||
|
||||
serviceNetwork := []string{}
|
||||
for _, sn := range installConfig.Config.Networking.ServiceNetwork {
|
||||
cidr, err := ipnet.ParseCIDR(sn.String())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse ServiceNetwork CIDR")
|
||||
}
|
||||
serviceNetwork = append(serviceNetwork, cidr.String())
|
||||
}
|
||||
|
||||
agentClusterInstall := &hiveext.AgentClusterInstall{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getAgentClusterInstallName(installConfig),
|
||||
Namespace: getObjectMetaNamespace(installConfig),
|
||||
},
|
||||
Spec: hiveext.AgentClusterInstallSpec{
|
||||
ImageSetRef: &hivev1.ClusterImageSetReference{
|
||||
Name: getClusterImageSetReferenceName(),
|
||||
},
|
||||
ClusterDeploymentRef: corev1.LocalObjectReference{
|
||||
Name: getClusterDeploymentName(installConfig),
|
||||
},
|
||||
Networking: hiveext.Networking{
|
||||
ClusterNetwork: clusterNetwork,
|
||||
ServiceNetwork: serviceNetwork,
|
||||
},
|
||||
SSHPublicKey: strings.Trim(installConfig.Config.SSHKey, "|\n\t"),
|
||||
ProvisionRequirements: hiveext.ProvisionRequirements{
|
||||
ControlPlaneAgents: int(*installConfig.Config.ControlPlane.Replicas),
|
||||
WorkerAgents: numberOfWorkers,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
apiVIP, ingressVIP := getVIPs(&installConfig.Config.Platform)
|
||||
|
||||
// set APIVIP and IngressVIP only for non SNO cluster for Baremetal and Vsphere platforms
|
||||
// SNO cluster is determined by number of ControlPlaneAgents which should be 1
|
||||
if int(*installConfig.Config.ControlPlane.Replicas) > 1 && apiVIP != "" && ingressVIP != "" {
|
||||
agentClusterInstall.Spec.APIVIP = apiVIP
|
||||
agentClusterInstall.Spec.IngressVIP = ingressVIP
|
||||
}
|
||||
|
||||
a.Config = agentClusterInstall
|
||||
|
||||
agentClusterInstallData, err := yaml.Marshal(agentClusterInstall)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal agent installer AgentClusterInstall")
|
||||
}
|
||||
|
||||
a.File = &asset.File{
|
||||
Filename: agentClusterInstallFilename,
|
||||
Data: agentClusterInstallData,
|
||||
}
|
||||
}
|
||||
return a.finish()
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (a *AgentClusterInstall) Files() []*asset.File {
|
||||
if a.File != nil {
|
||||
return []*asset.File{a.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns agentclusterinstall asset from the disk.
|
||||
func (a *AgentClusterInstall) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
agentClusterInstallFile, err := f.FetchByName(agentClusterInstallFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", agentClusterInstallFilename))
|
||||
}
|
||||
|
||||
a.File = agentClusterInstallFile
|
||||
|
||||
agentClusterInstall := &hiveext.AgentClusterInstall{}
|
||||
if err := yaml.UnmarshalStrict(agentClusterInstallFile.Data, agentClusterInstall); err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal %s", agentClusterInstallFilename)
|
||||
return false, err
|
||||
}
|
||||
a.Config = agentClusterInstall
|
||||
|
||||
if err = a.finish(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (a *AgentClusterInstall) finish() error {
|
||||
|
||||
if a.Config == nil {
|
||||
return errors.New("missing configuration or manifest file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
282
pkg/asset/agent/manifests/agentclusterinstall_test.go
Normal file
282
pkg/asset/agent/manifests/agentclusterinstall_test.go
Normal file
@@ -0,0 +1,282 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1"
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
func TestAgentClusterInstall_Generate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
dependencies []asset.Asset
|
||||
expectedError string
|
||||
expectedConfig *hiveext.AgentClusterInstall
|
||||
}{
|
||||
{
|
||||
name: "missing install config",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
},
|
||||
expectedError: "missing configuration or manifest file",
|
||||
},
|
||||
{
|
||||
name: "valid configuration",
|
||||
dependencies: []asset.Asset{
|
||||
getValidOptionalInstallConfig(),
|
||||
},
|
||||
expectedConfig: &hiveext.AgentClusterInstall{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getAgentClusterInstallName(getValidOptionalInstallConfig()),
|
||||
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
|
||||
},
|
||||
Spec: hiveext.AgentClusterInstallSpec{
|
||||
ImageSetRef: &hivev1.ClusterImageSetReference{
|
||||
Name: getClusterImageSetReferenceName(),
|
||||
},
|
||||
ClusterDeploymentRef: corev1.LocalObjectReference{
|
||||
Name: getClusterDeploymentName(getValidOptionalInstallConfig()),
|
||||
},
|
||||
Networking: hiveext.Networking{
|
||||
ClusterNetwork: []hiveext.ClusterNetworkEntry{
|
||||
{
|
||||
CIDR: "192.168.111.0/24",
|
||||
HostPrefix: 23,
|
||||
},
|
||||
},
|
||||
ServiceNetwork: []string{"172.30.0.0/16"},
|
||||
},
|
||||
SSHPublicKey: strings.Trim(TestSSHKey, "|\n\t"),
|
||||
ProvisionRequirements: hiveext.ProvisionRequirements{
|
||||
ControlPlaneAgents: 3,
|
||||
WorkerAgents: 5,
|
||||
},
|
||||
APIVIP: "192.168.122.10",
|
||||
IngressVIP: "192.168.122.11",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(tc.dependencies...)
|
||||
|
||||
asset := &AgentClusterInstall{}
|
||||
err := asset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config)
|
||||
assert.NotEmpty(t, asset.Files())
|
||||
|
||||
configFile := asset.Files()[0]
|
||||
assert.Equal(t, "cluster-manifests/agent-cluster-install.yaml", configFile.Filename)
|
||||
|
||||
var actualConfig hiveext.AgentClusterInstall
|
||||
err = yaml.Unmarshal(configFile.Data, &actualConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *tc.expectedConfig, actualConfig)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// func TestAgentClusterInstall_Generate(t *testing.T) {
|
||||
|
||||
// installConfig := &agent.OptionalInstallConfig{
|
||||
// Config: &types.InstallConfig{
|
||||
// ObjectMeta: v1.ObjectMeta{
|
||||
// Name: "cluster0-name",
|
||||
// Namespace: "cluster0-namespace",
|
||||
// },
|
||||
// SSHKey: "ssh-key",
|
||||
// ControlPlane: &types.MachinePool{
|
||||
// Name: "master",
|
||||
// Replicas: pointer.Int64Ptr(3),
|
||||
// Platform: types.MachinePoolPlatform{},
|
||||
// },
|
||||
// Compute: []types.MachinePool{
|
||||
// {
|
||||
// Name: "worker-machine-pool-1",
|
||||
// Replicas: pointer.Int64Ptr(2),
|
||||
// },
|
||||
// {
|
||||
// Name: "worker-machine-pool-2",
|
||||
// Replicas: pointer.Int64Ptr(3),
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// parents := asset.Parents{}
|
||||
// parents.Add(installConfig)
|
||||
|
||||
// asset := &AgentClusterInstall{}
|
||||
// err := asset.Generate(parents)
|
||||
// assert.NoError(t, err)
|
||||
|
||||
// assert.NotEmpty(t, asset.Files())
|
||||
// aciFile := asset.Files()[0]
|
||||
// assert.Equal(t, "cluster-manifests/agent-cluster-install.yaml", aciFile.Filename)
|
||||
|
||||
// aci := &hiveext.AgentClusterInstall{}
|
||||
// err = yaml.Unmarshal(aciFile.Data, &aci)
|
||||
// assert.NoError(t, err)
|
||||
|
||||
// assert.Equal(t, "agent-cluster-install", aci.Name)
|
||||
// assert.Equal(t, "cluster0-namespace", aci.Namespace)
|
||||
// assert.Equal(t, "cluster0-name", aci.Spec.ClusterDeploymentRef.Name)
|
||||
// assert.Equal(t, 3, aci.Spec.ProvisionRequirements.ControlPlaneAgents)
|
||||
|
||||
// assert.Equal(t, 5, aci.Spec.ProvisionRequirements.WorkerAgents)
|
||||
// assert.Equal(t, "ssh-key", aci.Spec.SSHPublicKey)
|
||||
// }
|
||||
|
||||
func TestAgentClusterInstall_LoadedFromDisk(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError bool
|
||||
expectedConfig *hiveext.AgentClusterInstall
|
||||
}{
|
||||
{
|
||||
name: "valid-config-file",
|
||||
data: `
|
||||
metadata:
|
||||
name: test-agent-cluster-install
|
||||
namespace: cluster0
|
||||
spec:
|
||||
apiVIP: 192.168.111.5
|
||||
ingressVIP: 192.168.111.4
|
||||
clusterDeploymentRef:
|
||||
name: ostest
|
||||
imageSetRef:
|
||||
name: openshift-v4.10.0
|
||||
networking:
|
||||
clusterNetwork:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostPrefix: 23
|
||||
serviceNetwork:
|
||||
- 172.30.0.0/16
|
||||
provisionRequirements:
|
||||
controlPlaneAgents: 3
|
||||
workerAgents: 2
|
||||
sshPublicKey: |
|
||||
ssh-rsa AAAAmyKey`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &hiveext.AgentClusterInstall{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-agent-cluster-install",
|
||||
Namespace: "cluster0",
|
||||
},
|
||||
Spec: hiveext.AgentClusterInstallSpec{
|
||||
APIVIP: "192.168.111.5",
|
||||
IngressVIP: "192.168.111.4",
|
||||
ClusterDeploymentRef: corev1.LocalObjectReference{
|
||||
Name: "ostest",
|
||||
},
|
||||
ImageSetRef: &hivev1.ClusterImageSetReference{
|
||||
Name: "openshift-v4.10.0",
|
||||
},
|
||||
Networking: hiveext.Networking{
|
||||
ClusterNetwork: []hiveext.ClusterNetworkEntry{
|
||||
{
|
||||
CIDR: "10.128.0.0/14",
|
||||
HostPrefix: 23,
|
||||
},
|
||||
},
|
||||
ServiceNetwork: []string{
|
||||
"172.30.0.0/16",
|
||||
},
|
||||
},
|
||||
ProvisionRequirements: hiveext.ProvisionRequirements{
|
||||
ControlPlaneAgents: 3,
|
||||
WorkerAgents: 2,
|
||||
},
|
||||
SSHPublicKey: "ssh-rsa AAAAmyKey",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not-yaml",
|
||||
data: `This is not a yaml file`,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
data: "",
|
||||
expectedFound: true,
|
||||
expectedConfig: &hiveext.AgentClusterInstall{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "unknown-field",
|
||||
data: `
|
||||
metadata:
|
||||
name: test-agent-cluster-install
|
||||
namespace: cluster0
|
||||
spec:
|
||||
wrongField: wrongValue`,
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(agentClusterInstallFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: agentClusterInstallFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &AgentClusterInstall{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedError {
|
||||
assert.Error(t, err, "expected error from Load")
|
||||
} else {
|
||||
assert.NoError(t, err, "unexpected error from Load")
|
||||
}
|
||||
if tc.expectedFound {
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config, "unexpected Config in AgentClusterInstall")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
157
pkg/asset/agent/manifests/agentpullsecret.go
Normal file
157
pkg/asset/agent/manifests/agentpullsecret.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
agentPullSecretName = "pull-secret"
|
||||
agentPullSecretFilename = filepath.Join(clusterManifestDir, fmt.Sprintf("%s.yaml", agentPullSecretName))
|
||||
)
|
||||
|
||||
// AgentPullSecret generates the pull-secret file used by the agent installer.
|
||||
type AgentPullSecret struct {
|
||||
File *asset.File
|
||||
Config *corev1.Secret
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*AgentPullSecret)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*AgentPullSecret) Name() string {
|
||||
return "Agent PullSecret"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*AgentPullSecret) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the AgentPullSecret manifest.
|
||||
func (a *AgentPullSecret) Generate(dependencies asset.Parents) error {
|
||||
|
||||
installConfig := &agent.OptionalInstallConfig{}
|
||||
dependencies.Get(installConfig)
|
||||
|
||||
if installConfig.Config != nil {
|
||||
secret := &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Secret",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getPullSecretName(installConfig),
|
||||
Namespace: getObjectMetaNamespace(installConfig),
|
||||
},
|
||||
StringData: map[string]string{
|
||||
".dockerconfigjson": installConfig.Config.PullSecret,
|
||||
},
|
||||
}
|
||||
a.Config = secret
|
||||
|
||||
secretData, err := yaml.Marshal(secret)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal agent secret")
|
||||
}
|
||||
|
||||
a.File = &asset.File{
|
||||
Filename: agentPullSecretFilename,
|
||||
Data: secretData,
|
||||
}
|
||||
}
|
||||
|
||||
return a.finish()
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (a *AgentPullSecret) Files() []*asset.File {
|
||||
if a.File != nil {
|
||||
return []*asset.File{a.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns the asset from disk.
|
||||
func (a *AgentPullSecret) Load(f asset.FileFetcher) (bool, error) {
|
||||
file, err := f.FetchByName(agentPullSecretFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", agentPullSecretFilename))
|
||||
}
|
||||
|
||||
config := &corev1.Secret{}
|
||||
if err := yaml.UnmarshalStrict(file.Data, config); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to unmarshal %s", agentPullSecretFilename)
|
||||
}
|
||||
|
||||
a.File, a.Config = file, config
|
||||
if err = a.finish(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (a *AgentPullSecret) finish() error {
|
||||
|
||||
if a.Config == nil {
|
||||
return errors.New("missing configuration or manifest file")
|
||||
}
|
||||
|
||||
if err := a.validatePullSecret().ToAggregate(); err != nil {
|
||||
return errors.Wrapf(err, "invalid PullSecret configuration")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *AgentPullSecret) validatePullSecret() field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if err := a.validateSecretIsNotEmpty(); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *AgentPullSecret) validateSecretIsNotEmpty() field.ErrorList {
|
||||
|
||||
var allErrs field.ErrorList
|
||||
|
||||
fieldPath := field.NewPath("StringData")
|
||||
|
||||
if len(a.Config.StringData) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fieldPath, "the pull secret is empty"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
pullSecret, ok := a.Config.StringData[".dockerconfigjson"]
|
||||
if !ok {
|
||||
allErrs = append(allErrs, field.Required(fieldPath, "the pull secret key '.dockerconfigjson' is not defined"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if pullSecret == "" {
|
||||
allErrs = append(allErrs, field.Required(fieldPath, "the pull secret does not contain any data"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
197
pkg/asset/agent/manifests/agentpullsecret_test.go
Normal file
197
pkg/asset/agent/manifests/agentpullsecret_test.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
func TestAgentPullSecret_Generate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
dependencies []asset.Asset
|
||||
expectedError string
|
||||
expectedConfig *corev1.Secret
|
||||
}{
|
||||
{
|
||||
name: "missing install config",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
},
|
||||
expectedError: "missing configuration or manifest file",
|
||||
},
|
||||
{
|
||||
name: "valid configuration",
|
||||
dependencies: []asset.Asset{
|
||||
getValidOptionalInstallConfig(),
|
||||
},
|
||||
expectedConfig: &corev1.Secret{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: getPullSecretName(getValidOptionalInstallConfig()),
|
||||
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
|
||||
},
|
||||
StringData: map[string]string{
|
||||
".dockerconfigjson": TestSecret,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(tc.dependencies...)
|
||||
|
||||
asset := &AgentPullSecret{}
|
||||
err := asset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config)
|
||||
assert.NotEmpty(t, asset.Files())
|
||||
|
||||
configFile := asset.Files()[0]
|
||||
assert.Equal(t, "cluster-manifests/pull-secret.yaml", configFile.Filename)
|
||||
|
||||
var actualConfig corev1.Secret
|
||||
err = yaml.Unmarshal(configFile.Data, &actualConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *tc.expectedConfig, actualConfig)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentPullSecret_LoadedFromDisk(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError string
|
||||
expectedConfig *corev1.Secret
|
||||
}{
|
||||
{
|
||||
name: "valid-config-file",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: pull-secret
|
||||
namespace: cluster-0
|
||||
stringData:
|
||||
.dockerconfigjson: c3VwZXItc2VjcmV0Cg==`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &corev1.Secret{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pull-secret",
|
||||
Namespace: "cluster-0",
|
||||
},
|
||||
StringData: map[string]string{
|
||||
".dockerconfigjson": "c3VwZXItc2VjcmV0Cg==",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not-yaml",
|
||||
data: `This is not a yaml file`,
|
||||
expectedError: "failed to unmarshal cluster-manifests/pull-secret.yaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type v1.Secret",
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
data: "",
|
||||
expectedError: "invalid PullSecret configuration: StringData: Required value: the pull secret is empty",
|
||||
},
|
||||
{
|
||||
name: "missing-string-data",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: pull-secret
|
||||
namespace: cluster-0`,
|
||||
expectedError: "invalid PullSecret configuration: StringData: Required value: the pull secret is empty",
|
||||
},
|
||||
{
|
||||
name: "missing-secret-key",
|
||||
data: `
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: pull-secret
|
||||
namespace: cluster-0
|
||||
stringData:
|
||||
.dockerconfigjson:`,
|
||||
expectedError: "invalid PullSecret configuration: StringData: Required value: the pull secret does not contain any data",
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: "failed to load cluster-manifests/pull-secret.yaml file: fetch failed",
|
||||
},
|
||||
{
|
||||
name: "unknown-field",
|
||||
data: `
|
||||
metadata:
|
||||
name: pull-secret
|
||||
namespace: cluster0
|
||||
spec:
|
||||
wrongField: wrongValue`,
|
||||
expectedError: "failed to unmarshal cluster-manifests/pull-secret.yaml: error converting YAML to JSON: yaml: line 2: found character that cannot start any token",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(agentPullSecretFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: agentPullSecretFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &AgentPullSecret{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
if tc.expectedFound {
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config, "unexpected Config in AgentPullSecret")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
128
pkg/asset/agent/manifests/clusterdeployment.go
Normal file
128
pkg/asset/agent/manifests/clusterdeployment.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
)
|
||||
|
||||
var (
|
||||
clusterDeploymentFilename = filepath.Join(clusterManifestDir, "cluster-deployment.yaml")
|
||||
)
|
||||
|
||||
// ClusterDeployment generates the cluster-deployment.yaml file.
|
||||
type ClusterDeployment struct {
|
||||
File *asset.File
|
||||
Config *hivev1.ClusterDeployment
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*ClusterDeployment)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*ClusterDeployment) Name() string {
|
||||
return "ClusterDeployment Config"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*ClusterDeployment) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the ClusterDeployment manifest.
|
||||
func (cd *ClusterDeployment) Generate(dependencies asset.Parents) error {
|
||||
installConfig := &agent.OptionalInstallConfig{}
|
||||
dependencies.Get(installConfig)
|
||||
|
||||
if installConfig.Config != nil {
|
||||
clusterDeployment := &hivev1.ClusterDeployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ClusterDeployment",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getClusterDeploymentName(installConfig),
|
||||
Namespace: getObjectMetaNamespace(installConfig),
|
||||
},
|
||||
Spec: hivev1.ClusterDeploymentSpec{
|
||||
ClusterName: getClusterDeploymentName(installConfig),
|
||||
BaseDomain: installConfig.Config.BaseDomain,
|
||||
PullSecretRef: &corev1.LocalObjectReference{
|
||||
Name: getPullSecretName(installConfig),
|
||||
},
|
||||
ClusterInstallRef: &hivev1.ClusterInstallLocalReference{
|
||||
Group: "extensions.hive.openshift.io",
|
||||
Version: "v1beta1",
|
||||
Kind: "AgentClusterInstall",
|
||||
Name: getAgentClusterInstallName(installConfig),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cd.Config = clusterDeployment
|
||||
clusterDeploymentData, err := yaml.Marshal(clusterDeployment)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal agent installer ClusterDeployment")
|
||||
}
|
||||
|
||||
cd.File = &asset.File{
|
||||
Filename: clusterDeploymentFilename,
|
||||
Data: clusterDeploymentData,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return cd.finish()
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (cd *ClusterDeployment) Files() []*asset.File {
|
||||
if cd.File != nil {
|
||||
return []*asset.File{cd.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns ClusterDeployment asset from the disk.
|
||||
func (cd *ClusterDeployment) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
file, err := f.FetchByName(clusterDeploymentFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", clusterDeploymentFilename))
|
||||
}
|
||||
|
||||
config := &hivev1.ClusterDeployment{}
|
||||
if err := yaml.UnmarshalStrict(file.Data, config); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to unmarshal %s", clusterDeploymentFilename)
|
||||
}
|
||||
|
||||
cd.File, cd.Config = file, config
|
||||
if err = cd.finish(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (cd *ClusterDeployment) finish() error {
|
||||
|
||||
if cd.Config == nil {
|
||||
return errors.New("missing configuration or manifest file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
219
pkg/asset/agent/manifests/clusterdeployment_test.go
Normal file
219
pkg/asset/agent/manifests/clusterdeployment_test.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
hivev1agent "github.com/openshift/hive/apis/hive/v1/agent"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
func TestClusterDeployment_Generate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
dependencies []asset.Asset
|
||||
expectedError string
|
||||
expectedConfig *hivev1.ClusterDeployment
|
||||
}{
|
||||
{
|
||||
name: "missing config",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
},
|
||||
expectedError: "missing configuration or manifest file",
|
||||
},
|
||||
{
|
||||
name: "valid configurations",
|
||||
dependencies: []asset.Asset{
|
||||
getValidOptionalInstallConfig(),
|
||||
},
|
||||
expectedConfig: &hivev1.ClusterDeployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ClusterDeployment",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getClusterDeploymentName(getValidOptionalInstallConfig()),
|
||||
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
|
||||
},
|
||||
Spec: hivev1.ClusterDeploymentSpec{
|
||||
ClusterName: getClusterDeploymentName(getValidOptionalInstallConfig()),
|
||||
BaseDomain: "testing.com",
|
||||
PullSecretRef: &corev1.LocalObjectReference{
|
||||
Name: getPullSecretName(getValidOptionalInstallConfig()),
|
||||
},
|
||||
ClusterInstallRef: &hivev1.ClusterInstallLocalReference{
|
||||
Group: "extensions.hive.openshift.io",
|
||||
Version: "v1beta1",
|
||||
Kind: "AgentClusterInstall",
|
||||
Name: getAgentClusterInstallName(getValidOptionalInstallConfig()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(tc.dependencies...)
|
||||
|
||||
asset := &ClusterDeployment{}
|
||||
err := asset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config)
|
||||
assert.NotEmpty(t, asset.Files())
|
||||
|
||||
configFile := asset.Files()[0]
|
||||
assert.Equal(t, "cluster-manifests/cluster-deployment.yaml", configFile.Filename)
|
||||
|
||||
var actualConfig hivev1.ClusterDeployment
|
||||
err = yaml.Unmarshal(configFile.Data, &actualConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *tc.expectedConfig, actualConfig)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestClusterDeployment_LoadedFromDisk(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError bool
|
||||
expectedConfig *hivev1.ClusterDeployment
|
||||
}{
|
||||
{
|
||||
name: "valid-config-file",
|
||||
data: `
|
||||
metadata:
|
||||
name: compact-cluster
|
||||
namespace: cluster0
|
||||
spec:
|
||||
baseDomain: agent.example.com
|
||||
clusterInstallRef:
|
||||
group: extensions.hive.openshift.io
|
||||
kind: AgentClusterInstall
|
||||
name: test-agent-cluster-install
|
||||
version: v1beta1
|
||||
clusterName: compact-cluster
|
||||
controlPlaneConfig:
|
||||
servingCertificates: {}
|
||||
platform:
|
||||
agentBareMetal:
|
||||
agentSelector:
|
||||
matchLabels:
|
||||
bla: aaa
|
||||
pullSecretRef:
|
||||
name: pull-secret`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &hivev1.ClusterDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "compact-cluster",
|
||||
Namespace: "cluster0",
|
||||
},
|
||||
Spec: hivev1.ClusterDeploymentSpec{
|
||||
BaseDomain: "agent.example.com",
|
||||
ClusterInstallRef: &hivev1.ClusterInstallLocalReference{
|
||||
Group: "extensions.hive.openshift.io",
|
||||
Kind: "AgentClusterInstall",
|
||||
Name: "test-agent-cluster-install",
|
||||
Version: "v1beta1",
|
||||
},
|
||||
ClusterName: "compact-cluster",
|
||||
ControlPlaneConfig: hivev1.ControlPlaneConfigSpec{},
|
||||
Platform: hivev1.Platform{
|
||||
AgentBareMetal: &hivev1agent.BareMetalPlatform{
|
||||
AgentSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"bla": "aaa",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
PullSecretRef: &corev1.LocalObjectReference{
|
||||
Name: "pull-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not-yaml",
|
||||
data: `This is not a yaml file`,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
data: "",
|
||||
expectedFound: true,
|
||||
expectedConfig: &hivev1.ClusterDeployment{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "unknown-field",
|
||||
data: `
|
||||
metadata:
|
||||
name: cluster-deployment-bad
|
||||
namespace: cluster0
|
||||
spec:
|
||||
wrongField: wrongValue`,
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(clusterDeploymentFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: clusterDeploymentFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &ClusterDeployment{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedError {
|
||||
assert.Error(t, err, "expected error from Load")
|
||||
} else {
|
||||
assert.NoError(t, err, "unexpected error from Load")
|
||||
}
|
||||
if tc.expectedFound {
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config, "unexpected Config in ClusterDeployment")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
154
pkg/asset/agent/manifests/clusterimageset.go
Normal file
154
pkg/asset/agent/manifests/clusterimageset.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/releaseimage"
|
||||
"github.com/openshift/installer/pkg/version"
|
||||
"github.com/pkg/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
clusterImageSetFilename = filepath.Join(clusterManifestDir, "cluster-image-set.yaml")
|
||||
)
|
||||
|
||||
// ClusterImageSet generates the cluster-image-set.yaml file.
|
||||
type ClusterImageSet struct {
|
||||
File *asset.File
|
||||
Config *hivev1.ClusterImageSet
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*ClusterImageSet)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*ClusterImageSet) Name() string {
|
||||
return "ClusterImageSet Config"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*ClusterImageSet) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&releaseimage.Image{},
|
||||
&agent.OptionalInstallConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the ClusterImageSet manifest.
|
||||
func (a *ClusterImageSet) Generate(dependencies asset.Parents) error {
|
||||
|
||||
releaseImage := &releaseimage.Image{}
|
||||
installConfig := &agent.OptionalInstallConfig{}
|
||||
dependencies.Get(releaseImage, installConfig)
|
||||
|
||||
currentVersion, err := version.Version()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if installConfig.Config != nil {
|
||||
clusterImageSet := &hivev1.ClusterImageSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("openshift-%s", currentVersion),
|
||||
Namespace: getObjectMetaNamespace(installConfig),
|
||||
},
|
||||
Spec: hivev1.ClusterImageSetSpec{
|
||||
ReleaseImage: releaseImage.PullSpec,
|
||||
},
|
||||
}
|
||||
a.Config = clusterImageSet
|
||||
|
||||
configData, err := yaml.Marshal(clusterImageSet)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal agent cluster image set")
|
||||
}
|
||||
|
||||
a.File = &asset.File{
|
||||
Filename: clusterImageSetFilename,
|
||||
Data: configData,
|
||||
}
|
||||
}
|
||||
|
||||
return a.finish()
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (a *ClusterImageSet) Files() []*asset.File {
|
||||
if a.File != nil {
|
||||
return []*asset.File{a.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns ClusterImageSet asset from the disk.
|
||||
func (a *ClusterImageSet) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
clusterImageSetFile, err := f.FetchByName(clusterImageSetFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", clusterImageSetFilename))
|
||||
}
|
||||
|
||||
a.File = clusterImageSetFile
|
||||
|
||||
clusterImageSet := &hivev1.ClusterImageSet{}
|
||||
if err := yaml.UnmarshalStrict(clusterImageSetFile.Data, clusterImageSet); err != nil {
|
||||
err = errors.Wrapf(err, "failed to unmarshal %s", clusterImageSetFilename)
|
||||
return false, err
|
||||
}
|
||||
a.Config = clusterImageSet
|
||||
|
||||
if err = a.finish(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (a *ClusterImageSet) finish() error {
|
||||
|
||||
if a.Config == nil {
|
||||
return errors.New("missing configuration or manifest file")
|
||||
}
|
||||
|
||||
if err := a.validateClusterImageSet().ToAggregate(); err != nil {
|
||||
return errors.Wrapf(err, "invalid ClusterImageSet configuration")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ClusterImageSet) validateClusterImageSet() field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if err := a.validateReleaseVersion(); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (a *ClusterImageSet) validateReleaseVersion() field.ErrorList {
|
||||
|
||||
var allErrs field.ErrorList
|
||||
|
||||
fieldPath := field.NewPath("Spec", "ReleaseImage")
|
||||
|
||||
releaseImage := &releaseimage.Image{}
|
||||
releaseImage.Generate(asset.Parents{})
|
||||
|
||||
if a.Config.Spec.ReleaseImage != releaseImage.PullSpec {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath, fmt.Sprintf("value must be equal to %s", releaseImage.PullSpec)))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
190
pkg/asset/agent/manifests/clusterimageset_test.go
Normal file
190
pkg/asset/agent/manifests/clusterimageset_test.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
hivev1 "github.com/openshift/hive/apis/hive/v1"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/openshift/installer/pkg/asset/releaseimage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
func TestClusterImageSet_Generate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
dependencies []asset.Asset
|
||||
expectedError string
|
||||
expectedConfig *hivev1.ClusterImageSet
|
||||
}{
|
||||
{
|
||||
name: "missing install config",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
&releaseimage.Image{},
|
||||
},
|
||||
expectedError: "missing configuration or manifest file",
|
||||
},
|
||||
{
|
||||
name: "invalid ClusterImageSet configuration",
|
||||
dependencies: []asset.Asset{
|
||||
getValidOptionalInstallConfig(),
|
||||
&releaseimage.Image{},
|
||||
},
|
||||
expectedError: "invalid ClusterImageSet configuration: Spec.ReleaseImage: Forbidden: value must be equal to " + TestReleaseImage,
|
||||
},
|
||||
{
|
||||
name: "valid configuration",
|
||||
dependencies: []asset.Asset{
|
||||
getValidOptionalInstallConfig(),
|
||||
&releaseimage.Image{
|
||||
PullSpec: TestReleaseImage,
|
||||
},
|
||||
},
|
||||
expectedConfig: &hivev1.ClusterImageSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "openshift-was not built correctly",
|
||||
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
|
||||
},
|
||||
Spec: hivev1.ClusterImageSetSpec{
|
||||
ReleaseImage: TestReleaseImage,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(tc.dependencies...)
|
||||
|
||||
asset := &ClusterImageSet{}
|
||||
err := asset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config)
|
||||
assert.NotEmpty(t, asset.Files())
|
||||
|
||||
configFile := asset.Files()[0]
|
||||
assert.Equal(t, "cluster-manifests/cluster-image-set.yaml", configFile.Filename)
|
||||
|
||||
var actualConfig hivev1.ClusterImageSet
|
||||
err = yaml.Unmarshal(configFile.Data, &actualConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *tc.expectedConfig, actualConfig)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestClusterImageSet_LoadedFromDisk(t *testing.T) {
|
||||
|
||||
currentRelease, err := releaseimage.Default()
|
||||
assert.NoError(t, err)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError string
|
||||
expectedConfig *hivev1.ClusterImageSet
|
||||
}{
|
||||
{
|
||||
name: "valid-config-file",
|
||||
data: `
|
||||
metadata:
|
||||
name: openshift-v4.10.0
|
||||
spec:
|
||||
releaseImage: ` + currentRelease,
|
||||
expectedFound: true,
|
||||
expectedConfig: &hivev1.ClusterImageSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "openshift-v4.10.0",
|
||||
},
|
||||
Spec: hivev1.ClusterImageSetSpec{
|
||||
ReleaseImage: currentRelease,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "different-version-not-supported",
|
||||
data: `
|
||||
metadata:
|
||||
name: openshift-v4.10.0
|
||||
spec:
|
||||
releaseImage: 99.999`,
|
||||
expectedError: fmt.Sprintf("invalid ClusterImageSet configuration: Spec.ReleaseImage: Forbidden: value must be equal to %s", currentRelease),
|
||||
},
|
||||
{
|
||||
name: "not-yaml",
|
||||
data: `This is not a yaml file`,
|
||||
expectedError: "failed to unmarshal cluster-manifests/cluster-image-set.yaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type v1.ClusterImageSet",
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
data: "",
|
||||
expectedError: fmt.Sprintf("invalid ClusterImageSet configuration: Spec.ReleaseImage: Forbidden: value must be equal to %s", currentRelease),
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: "failed to load cluster-manifests/cluster-image-set.yaml file: fetch failed",
|
||||
},
|
||||
{
|
||||
name: "unknown-field",
|
||||
data: `
|
||||
metadata:
|
||||
name: test-cluster-image-set
|
||||
namespace: cluster0
|
||||
spec:
|
||||
wrongField: wrongValue`,
|
||||
expectedError: "failed to unmarshal cluster-manifests/cluster-image-set.yaml: error unmarshaling JSON: while decoding JSON: json: unknown field \"wrongField\"",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(clusterImageSetFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: clusterImageSetFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &ClusterImageSet{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
if tc.expectedFound {
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config, "unexpected Config in ClusterImageSet")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
68
pkg/asset/agent/manifests/common.go
Normal file
68
pkg/asset/agent/manifests/common.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/agent/agentconfig"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
"github.com/openshift/installer/pkg/version"
|
||||
)
|
||||
|
||||
func getAgentClusterInstallName(ic *agent.OptionalInstallConfig) string {
|
||||
return ic.Config.ObjectMeta.Name
|
||||
}
|
||||
|
||||
func getClusterDeploymentName(ic *agent.OptionalInstallConfig) string {
|
||||
return ic.Config.ObjectMeta.Name
|
||||
}
|
||||
|
||||
func getInfraEnvName(ic *agent.OptionalInstallConfig) string {
|
||||
return ic.Config.ObjectMeta.Name
|
||||
}
|
||||
|
||||
func getPullSecretName(ic *agent.OptionalInstallConfig) string {
|
||||
return ic.Config.ObjectMeta.Name + "-pull-secret"
|
||||
}
|
||||
|
||||
func getObjectMetaNamespace(ic *agent.OptionalInstallConfig) string {
|
||||
return ic.Config.Namespace
|
||||
}
|
||||
|
||||
func getNMStateConfigName(a *agentconfig.AgentConfig) string {
|
||||
return a.Config.ObjectMeta.Name
|
||||
}
|
||||
|
||||
func getNMStateConfigNamespace(a *agentconfig.AgentConfig) string {
|
||||
return a.Config.Namespace
|
||||
}
|
||||
|
||||
func getNMStateConfigLabelsFromOptionalInstallConfig(ic *agent.OptionalInstallConfig) map[string]string {
|
||||
return map[string]string{
|
||||
"infraenvs.agent-install.openshift.io": getInfraEnvName(ic),
|
||||
}
|
||||
}
|
||||
|
||||
func getNMStateConfigLabelsFromAgentConfig(a *agentconfig.AgentConfig) map[string]string {
|
||||
return map[string]string{
|
||||
"infraenvs.agent-install.openshift.io": getNMStateConfigName(a),
|
||||
}
|
||||
}
|
||||
|
||||
func getClusterImageSetReferenceName() string {
|
||||
versionString, _ := version.Version()
|
||||
return "openshift-" + versionString
|
||||
}
|
||||
|
||||
// getVIPs returns a string representation of the platform's API VIP and Ingress VIP.
|
||||
// It returns an empty string if the platform does not configure a VIP
|
||||
func getVIPs(p *types.Platform) (string, string) {
|
||||
switch {
|
||||
case p == nil:
|
||||
return "", ""
|
||||
case p.BareMetal != nil:
|
||||
return p.BareMetal.APIVIP, p.BareMetal.IngressVIP
|
||||
case p.VSphere != nil:
|
||||
return p.VSphere.APIVIP, p.VSphere.IngressVIP
|
||||
default:
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
60
pkg/asset/agent/manifests/extramanifests.go
Normal file
60
pkg/asset/agent/manifests/extramanifests.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
openshiftManifestDir = "openshift"
|
||||
)
|
||||
|
||||
// ExtraManifests manages the additional manifests for cluster customization
|
||||
type ExtraManifests struct {
|
||||
FileList []*asset.File
|
||||
}
|
||||
|
||||
var (
|
||||
_ asset.WritableAsset = (*ExtraManifests)(nil)
|
||||
)
|
||||
|
||||
// Name returns a human friendly name for the operator
|
||||
func (em *ExtraManifests) Name() string {
|
||||
return "Extra Manifests"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed by the
|
||||
// Master asset
|
||||
func (em *ExtraManifests) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{}
|
||||
}
|
||||
|
||||
// Generate is not required for ExtraManifests.
|
||||
func (em *ExtraManifests) Generate(dependencies asset.Parents) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (em *ExtraManifests) Files() []*asset.File {
|
||||
return em.FileList
|
||||
}
|
||||
|
||||
// Load reads the asset files from disk.
|
||||
func (em *ExtraManifests) Load(f asset.FileFetcher) (found bool, err error) {
|
||||
yamlFileList, err := f.FetchByPattern(filepath.Join(openshiftManifestDir, "*.yaml"))
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to load *.yaml files")
|
||||
}
|
||||
ymlFileList, err := f.FetchByPattern(filepath.Join(openshiftManifestDir, "*.yml"))
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to load *.yml files")
|
||||
}
|
||||
|
||||
em.FileList = append(em.FileList, yamlFileList...)
|
||||
em.FileList = append(em.FileList, ymlFileList...)
|
||||
asset.SortFiles(em.FileList)
|
||||
|
||||
return len(em.FileList) > 0, nil
|
||||
}
|
||||
125
pkg/asset/agent/manifests/extramanifests_test.go
Normal file
125
pkg/asset/agent/manifests/extramanifests_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExtraManifests_Load(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
files []string
|
||||
fetchError error
|
||||
|
||||
expectedFound bool
|
||||
expectedFiles []string
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "no-extras",
|
||||
files: []string{},
|
||||
|
||||
expectedFound: false,
|
||||
expectedFiles: []string{},
|
||||
},
|
||||
{
|
||||
name: "just-yaml",
|
||||
files: []string{"/openshift/test-configmap.yaml"},
|
||||
|
||||
expectedFound: true,
|
||||
expectedFiles: []string{"/openshift/test-configmap.yaml"},
|
||||
},
|
||||
{
|
||||
name: "just-yml",
|
||||
files: []string{"/openshift/another-test-configmap.yml"},
|
||||
|
||||
expectedFound: true,
|
||||
expectedFiles: []string{"/openshift/another-test-configmap.yml"},
|
||||
},
|
||||
{
|
||||
name: "mixed",
|
||||
files: []string{
|
||||
"/openshift/test-configmap.yaml",
|
||||
"/openshift/another-test-configmap.yml",
|
||||
},
|
||||
|
||||
expectedFound: true,
|
||||
expectedFiles: []string{
|
||||
"/openshift/test-configmap.yaml",
|
||||
"/openshift/another-test-configmap.yml",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error",
|
||||
fetchError: os.ErrNotExist,
|
||||
|
||||
expectedError: "failed to load *.yaml files: file does not exist",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
yamlFiles := []*asset.File{}
|
||||
ymlFiles := []*asset.File{}
|
||||
for _, f := range tc.files {
|
||||
assetFile := &asset.File{
|
||||
Filename: f,
|
||||
Data: []byte(f),
|
||||
}
|
||||
|
||||
switch filepath.Ext(f) {
|
||||
case ".yaml":
|
||||
yamlFiles = append(yamlFiles, assetFile)
|
||||
case ".yml":
|
||||
ymlFiles = append(ymlFiles, assetFile)
|
||||
default:
|
||||
t.Error("extension not valid")
|
||||
}
|
||||
}
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByPattern("openshift/*.yaml").Return(
|
||||
yamlFiles,
|
||||
tc.fetchError,
|
||||
)
|
||||
if tc.fetchError == nil {
|
||||
fileFetcher.EXPECT().FetchByPattern("openshift/*.yml").Return(
|
||||
ymlFiles,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
extraManifestsAsset := &ExtraManifests{}
|
||||
found, err := extraManifestsAsset.Load(fileFetcher)
|
||||
|
||||
assert.Equal(t, tc.expectedFound, found)
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(tc.expectedFiles), len(extraManifestsAsset.FileList))
|
||||
for _, f := range tc.expectedFiles {
|
||||
found := false
|
||||
for _, a := range extraManifestsAsset.FileList {
|
||||
if a.Filename == f {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, fmt.Sprintf("Expected file %s not found", f))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
125
pkg/asset/agent/manifests/infraenv.go
Normal file
125
pkg/asset/agent/manifests/infraenv.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
)
|
||||
|
||||
var (
|
||||
infraEnvFilename = filepath.Join(clusterManifestDir, "infraenv.yaml")
|
||||
)
|
||||
|
||||
// InfraEnv generates the infraenv.yaml file.
|
||||
type InfraEnv struct {
|
||||
File *asset.File
|
||||
Config *aiv1beta1.InfraEnv
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*InfraEnv)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*InfraEnv) Name() string {
|
||||
return "InfraEnv Config"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*InfraEnv) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the InfraEnv manifest.
|
||||
func (i *InfraEnv) Generate(dependencies asset.Parents) error {
|
||||
|
||||
installConfig := &agent.OptionalInstallConfig{}
|
||||
dependencies.Get(installConfig)
|
||||
|
||||
if installConfig.Config != nil {
|
||||
infraEnv := &aiv1beta1.InfraEnv{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getInfraEnvName(installConfig),
|
||||
Namespace: getObjectMetaNamespace(installConfig),
|
||||
},
|
||||
Spec: aiv1beta1.InfraEnvSpec{
|
||||
ClusterRef: &aiv1beta1.ClusterReference{
|
||||
Name: getClusterDeploymentName(installConfig),
|
||||
Namespace: getObjectMetaNamespace(installConfig),
|
||||
},
|
||||
SSHAuthorizedKey: strings.Trim(installConfig.Config.SSHKey, "|\n\t"),
|
||||
PullSecretRef: &corev1.LocalObjectReference{
|
||||
Name: getPullSecretName(installConfig),
|
||||
},
|
||||
NMStateConfigLabelSelector: metav1.LabelSelector{
|
||||
MatchLabels: getNMStateConfigLabelsFromOptionalInstallConfig(installConfig),
|
||||
},
|
||||
},
|
||||
}
|
||||
i.Config = infraEnv
|
||||
|
||||
infraEnvData, err := yaml.Marshal(infraEnv)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal agent installer infraEnv")
|
||||
}
|
||||
|
||||
i.File = &asset.File{
|
||||
Filename: infraEnvFilename,
|
||||
Data: infraEnvData,
|
||||
}
|
||||
}
|
||||
|
||||
return i.finish()
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (i *InfraEnv) Files() []*asset.File {
|
||||
if i.File != nil {
|
||||
return []*asset.File{i.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns infraenv asset from the disk.
|
||||
func (i *InfraEnv) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
file, err := f.FetchByName(infraEnvFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", infraEnvFilename))
|
||||
}
|
||||
|
||||
config := &aiv1beta1.InfraEnv{}
|
||||
if err := yaml.UnmarshalStrict(file.Data, config); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to unmarshal %s", infraEnvFilename)
|
||||
}
|
||||
|
||||
i.File, i.Config = file, config
|
||||
if err = i.finish(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (i *InfraEnv) finish() error {
|
||||
|
||||
if i.Config == nil {
|
||||
return errors.New("missing configuration or manifest file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
194
pkg/asset/agent/manifests/infraenv_test.go
Normal file
194
pkg/asset/agent/manifests/infraenv_test.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
)
|
||||
|
||||
func TestInfraEnv_Generate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
dependencies []asset.Asset
|
||||
expectedError string
|
||||
expectedConfig *aiv1beta1.InfraEnv
|
||||
}{
|
||||
{
|
||||
name: "missing-config",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
},
|
||||
expectedError: "missing configuration or manifest file",
|
||||
},
|
||||
{
|
||||
name: "valid configuration",
|
||||
dependencies: []asset.Asset{
|
||||
getValidOptionalInstallConfig(),
|
||||
},
|
||||
expectedConfig: &aiv1beta1.InfraEnv{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: getInfraEnvName(getValidOptionalInstallConfig()),
|
||||
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
|
||||
},
|
||||
Spec: aiv1beta1.InfraEnvSpec{
|
||||
ClusterRef: &aiv1beta1.ClusterReference{
|
||||
Name: getClusterDeploymentName(getValidOptionalInstallConfig()),
|
||||
Namespace: getObjectMetaNamespace(getValidOptionalInstallConfig()),
|
||||
},
|
||||
SSHAuthorizedKey: strings.Trim(TestSSHKey, "|\n\t"),
|
||||
PullSecretRef: &corev1.LocalObjectReference{
|
||||
Name: getPullSecretName(getValidOptionalInstallConfig()),
|
||||
},
|
||||
NMStateConfigLabelSelector: metav1.LabelSelector{
|
||||
MatchLabels: getNMStateConfigLabelsFromOptionalInstallConfig(getValidOptionalInstallConfig()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(tc.dependencies...)
|
||||
|
||||
asset := &InfraEnv{}
|
||||
err := asset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config)
|
||||
assert.NotEmpty(t, asset.Files())
|
||||
|
||||
configFile := asset.Files()[0]
|
||||
assert.Equal(t, "cluster-manifests/infraenv.yaml", configFile.Filename)
|
||||
|
||||
var actualConfig aiv1beta1.InfraEnv
|
||||
err = yaml.Unmarshal(configFile.Data, &actualConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *tc.expectedConfig, actualConfig)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInfraEnv_LoadedFromDisk(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError string
|
||||
expectedConfig *aiv1beta1.InfraEnv
|
||||
}{
|
||||
{
|
||||
name: "valid-config-file",
|
||||
data: `
|
||||
metadata:
|
||||
name: infraEnv
|
||||
namespace: cluster0
|
||||
spec:
|
||||
clusterRef:
|
||||
name: ocp-edge-cluster-0
|
||||
namespace: cluster0
|
||||
nmStateConfigLabelSelector:
|
||||
matchLabels:
|
||||
cluster0-nmstate-label-name: cluster0-nmstate-label-value
|
||||
pullSecretRef:
|
||||
name: pull-secret
|
||||
sshAuthorizedKey: |
|
||||
ssh-rsa AAAAmyKey`,
|
||||
expectedFound: true,
|
||||
expectedConfig: &aiv1beta1.InfraEnv{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "infraEnv",
|
||||
Namespace: "cluster0",
|
||||
},
|
||||
Spec: aiv1beta1.InfraEnvSpec{
|
||||
ClusterRef: &aiv1beta1.ClusterReference{
|
||||
Name: "ocp-edge-cluster-0",
|
||||
Namespace: "cluster0",
|
||||
},
|
||||
NMStateConfigLabelSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"cluster0-nmstate-label-name": "cluster0-nmstate-label-value",
|
||||
},
|
||||
},
|
||||
PullSecretRef: &corev1.LocalObjectReference{
|
||||
Name: "pull-secret",
|
||||
},
|
||||
SSHAuthorizedKey: "ssh-rsa AAAAmyKey",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not-yaml",
|
||||
data: `This is not a yaml file`,
|
||||
expectedError: "failed to unmarshal cluster-manifests/infraenv.yaml: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type v1beta1.InfraEnv",
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: "failed to load cluster-manifests/infraenv.yaml file: fetch failed",
|
||||
},
|
||||
{
|
||||
name: "unknown-field",
|
||||
data: `
|
||||
metadata:
|
||||
name: infraEnv
|
||||
namespace: cluster0
|
||||
spec:
|
||||
wrongField: wrongValue`,
|
||||
expectedError: "failed to unmarshal cluster-manifests/infraenv.yaml: error converting YAML to JSON: yaml: line 2: found character that cannot start any token",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(infraEnvFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: infraEnvFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &InfraEnv{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedFound {
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config, "unexpected Config in InfraEnv")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
173
pkg/asset/agent/manifests/network-scripts.go
Normal file
173
pkg/asset/agent/manifests/network-scripts.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package manifests
|
||||
|
||||
// This file is copied from https://github.com/openshift/assisted-service/blob/master/internal/constants/scripts.go
|
||||
// as its in the internal directory so can't be imported
|
||||
|
||||
// PreNetworkConfigScript script runs on hosts before network manager service starts in order to apply
|
||||
// user's provided network configuration on the host.
|
||||
// If the user provides static network configuration, the network config files will be stored in directory
|
||||
// /etc/assisted/network in the following structure:
|
||||
// /etc/assisted/network/
|
||||
// +-- host1
|
||||
// | +--- *.nmconnection
|
||||
// | +--- mac_interface.ini
|
||||
// +-- host2
|
||||
// +--- *.nmconnection
|
||||
// +--- mac_interface.ini
|
||||
// 1. *.nmconnections - files generated by nmstate based on yaml files provided by the user
|
||||
// 2. mac_interface.ini - the file contains mapping of mac-address to logical interface name.
|
||||
// There are two usages for the file:
|
||||
// 1. Map logical interface name to MAC Address of the host. The logical interface name is a
|
||||
// name provided by the user for the interface. It will be replaced by the script with the
|
||||
// actual network interface name.
|
||||
// 2. Identify the host directory which belongs to the current host by matching a MAC Address
|
||||
// from the mapping file with host network interfaces.
|
||||
//
|
||||
// Applying the network configuration of each host will be done by:
|
||||
// 1. Associate the current host with its matching hostX directory. The association will be done by
|
||||
// matching host's mac addresses with those in mac_interface.ini.
|
||||
// 2. Replace logical interface name in nmconnection files with the interface name as set on the host
|
||||
// 3. Rename nmconnection files to start with the interface name (instead of the logical interface name)
|
||||
// 4. Copy the nmconnection files to /NetworkManager/system-connections/
|
||||
const PreNetworkConfigScript = `#!/bin/bash
|
||||
|
||||
# The directory that contains nmconnection files of all nodes
|
||||
NMCONNECTIONS_DIR=/etc/assisted/network
|
||||
MAC_NIC_MAPPING_FILE=mac_interface.ini
|
||||
|
||||
if [ ! -d "$NMCONNECTIONS_DIR" ]
|
||||
then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# A map of host mac addresses to interface names
|
||||
declare -A host_macs_to_hw_iface
|
||||
|
||||
# The directory that contains nmconnection files for the current host
|
||||
host_dir=""
|
||||
|
||||
# The mapping file of the current host
|
||||
mapping_file=""
|
||||
|
||||
# A nic-to-mac map created from the mapping file associated with the host
|
||||
declare -A logical_nic_mac_map
|
||||
|
||||
# Find destination directory based on ISO mode
|
||||
if [[ -f /etc/initrd-release ]]; then
|
||||
ETC_NETWORK_MANAGER="/run/NetworkManager/system-connections"
|
||||
else
|
||||
ETC_NETWORK_MANAGER="/etc/NetworkManager/system-connections"
|
||||
fi
|
||||
|
||||
# remove default connection file create by NM(nm-initrd-generator). This is a WA until
|
||||
# NM is back to supporting priority between nmconnections
|
||||
rm -f ${ETC_NETWORK_MANAGER}/*
|
||||
|
||||
# Create a map of host mac addresses to their network interfaces
|
||||
function map_host_macs_to_interfaces() {
|
||||
SYS_CLASS_NET_DIR='/sys/class/net'
|
||||
for nic in $( ls $SYS_CLASS_NET_DIR )
|
||||
do
|
||||
mac=$(cat $SYS_CLASS_NET_DIR/$nic/address | tr '[:lower:]' '[:upper:]')
|
||||
host_macs_to_hw_iface[$mac]=$nic
|
||||
done
|
||||
}
|
||||
|
||||
function find_host_directory_by_mac_address() {
|
||||
for d in $(ls -d ${NMCONNECTIONS_DIR}/host*)
|
||||
do
|
||||
mapping_file="${d}/${MAC_NIC_MAPPING_FILE}"
|
||||
if [ ! -f $mapping_file ]
|
||||
then
|
||||
echo "Mapping file '$mapping_file' is missing. Skipping on directory '$d'"
|
||||
continue
|
||||
fi
|
||||
|
||||
# check if mapping file contains mac-address that exists on the current host
|
||||
for mac_address in $(cat $mapping_file | cut -d= -f1 | tr '[:lower:]' '[:upper:]')
|
||||
do
|
||||
if [[ ! -z "${host_macs_to_hw_iface[${mac_address}]:-}" ]]
|
||||
then
|
||||
host_dir=$(mktemp -d)
|
||||
cp ${d}/* $host_dir
|
||||
return
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [ -z "$host_dir" ]
|
||||
then
|
||||
echo "None of host directories are a match for the current host"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function set_logical_nic_mac_mapping() {
|
||||
# initialize logical_nic_mac_map with mapping file entries
|
||||
readarray -t lines < "${mapping_file}"
|
||||
for line in "${lines[@]}"
|
||||
do
|
||||
mac=${line%%=*}
|
||||
nic=${line#*=}
|
||||
logical_nic_mac_map[$nic]=${mac^^}
|
||||
done
|
||||
}
|
||||
|
||||
# Replace logical interface name in nmconnection files with the interface name from the mapping file
|
||||
# of host's directory. Replacement is done based on mac-address matching
|
||||
function update_interface_names_by_mapping_file() {
|
||||
|
||||
# iterate over host's nmconnection files and replace logical interface name with host's nic name
|
||||
for nmconn_file in $(ls -1 ${host_dir}/*.nmconnection)
|
||||
do
|
||||
# iterate over mapping to find nmconnection files with logical interface name
|
||||
for nic in "${!logical_nic_mac_map[@]}"
|
||||
do
|
||||
mac=${logical_nic_mac_map[$nic]}
|
||||
|
||||
# the pattern should match '=eth0' (interface name) or '=eth0.' (for vlan devices)
|
||||
if grep -q -e "=$nic$" -e "=$nic\." "$nmconn_file"
|
||||
then
|
||||
# get host interface name
|
||||
host_iface=${host_macs_to_hw_iface[$mac]}
|
||||
if [ -z "$host_iface" ]
|
||||
then
|
||||
echo "Mapping file contains MAC Address '$mac' (for logical interface name '$nic') that doesn't exist on the host"
|
||||
continue
|
||||
fi
|
||||
|
||||
# replace logical interface name with host interface name
|
||||
sed -i -e "s/=$nic$/=$host_iface/g" -e "s/=$nic\./=$host_iface\./g" $nmconn_file
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
function copy_nmconnection_files_to_nm_config_dir() {
|
||||
for nmconn_file in $(ls -1 ${host_dir}/*.nmconnection)
|
||||
do
|
||||
# rename nmconnection files based on the actual interface name
|
||||
filename=$(basename $nmconn_file)
|
||||
prefix="${filename%%.*}"
|
||||
extension="${filename#*.}"
|
||||
if [ ! -z "${logical_nic_mac_map[$prefix]}" ]
|
||||
then
|
||||
dir_path=$(dirname $nmconn_file)
|
||||
mac_address=${logical_nic_mac_map[$prefix]}
|
||||
host_iface=${host_macs_to_hw_iface[$mac_address]}
|
||||
if [ ! -z "$host_iface" ]
|
||||
then
|
||||
mv $nmconn_file "${dir_path}/${host_iface}.${extension}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
cp ${host_dir}/*.nmconnection ${ETC_NETWORK_MANAGER}/
|
||||
}
|
||||
|
||||
map_host_macs_to_interfaces
|
||||
find_host_directory_by_mac_address
|
||||
set_logical_nic_mac_mapping
|
||||
update_interface_names_by_mapping_file
|
||||
copy_nmconnection_files_to_nm_config_dir
|
||||
`
|
||||
330
pkg/asset/agent/manifests/nmstateconfig.go
Normal file
330
pkg/asset/agent/manifests/nmstateconfig.go
Normal file
@@ -0,0 +1,330 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
"github.com/openshift/assisted-service/pkg/staticnetworkconfig"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent/agentconfig"
|
||||
k8syaml "sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
nmStateConfigFilename = filepath.Join(clusterManifestDir, "nmstateconfig.yaml")
|
||||
)
|
||||
|
||||
// NMStateConfig generates the nmstateconfig.yaml file.
|
||||
type NMStateConfig struct {
|
||||
File *asset.File
|
||||
StaticNetworkConfig []*models.HostStaticNetworkConfig
|
||||
Config []*aiv1beta1.NMStateConfig
|
||||
}
|
||||
|
||||
type nmStateConfig struct {
|
||||
Interfaces []struct {
|
||||
IPV4 struct {
|
||||
Address []struct {
|
||||
IP string `yaml:"ip,omitempty"`
|
||||
} `yaml:"address,omitempty"`
|
||||
} `yaml:"ipv4,omitempty"`
|
||||
IPV6 struct {
|
||||
Address []struct {
|
||||
IP string `yaml:"ip,omitempty"`
|
||||
} `yaml:"address,omitempty"`
|
||||
} `yaml:"ipv6,omitempty"`
|
||||
} `yaml:"interfaces,omitempty"`
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*NMStateConfig)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*NMStateConfig) Name() string {
|
||||
return "NMState Config"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*NMStateConfig) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&agentconfig.AgentConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the NMStateConfig manifest.
|
||||
func (n *NMStateConfig) Generate(dependencies asset.Parents) error {
|
||||
|
||||
agentConfig := &agentconfig.AgentConfig{}
|
||||
dependencies.Get(agentConfig)
|
||||
|
||||
staticNetworkConfig := []*models.HostStaticNetworkConfig{}
|
||||
nmStateConfigs := []*aiv1beta1.NMStateConfig{}
|
||||
var data string
|
||||
|
||||
if agentConfig.Config != nil {
|
||||
for i, host := range agentConfig.Config.Hosts {
|
||||
nmStateConfig := aiv1beta1.NMStateConfig{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NMStateConfig",
|
||||
APIVersion: "agent-install.openshift.io/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(getNMStateConfigName(agentConfig)+"-%d", i),
|
||||
Namespace: getNMStateConfigNamespace(agentConfig),
|
||||
Labels: getNMStateConfigLabelsFromAgentConfig(agentConfig),
|
||||
},
|
||||
Spec: aiv1beta1.NMStateConfigSpec{
|
||||
NetConfig: aiv1beta1.NetConfig{
|
||||
Raw: []byte(host.NetworkConfig.Raw),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, hostInterface := range host.Interfaces {
|
||||
intrfc := aiv1beta1.Interface{
|
||||
Name: hostInterface.Name,
|
||||
MacAddress: hostInterface.MacAddress,
|
||||
}
|
||||
nmStateConfig.Spec.Interfaces = append(nmStateConfig.Spec.Interfaces, &intrfc)
|
||||
|
||||
}
|
||||
nmStateConfigs = append(nmStateConfigs, &nmStateConfig)
|
||||
|
||||
staticNetworkConfig = append(staticNetworkConfig, &models.HostStaticNetworkConfig{
|
||||
MacInterfaceMap: buildMacInterfaceMap(nmStateConfig),
|
||||
NetworkYaml: string(nmStateConfig.Spec.NetConfig.Raw),
|
||||
})
|
||||
|
||||
// Marshal the nmStateConfig one at a time
|
||||
// and add a yaml seperator with new line
|
||||
// so as not to marshal the nmStateConfigs
|
||||
// as a yaml list in the generated nmstateconfig.yaml
|
||||
nmStateConfigData, err := k8syaml.Marshal(nmStateConfig)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal agent installer NMStateConfig")
|
||||
}
|
||||
data = fmt.Sprint(data, fmt.Sprint(string(nmStateConfigData), "---\n"))
|
||||
}
|
||||
|
||||
n.Config = nmStateConfigs
|
||||
n.StaticNetworkConfig = staticNetworkConfig
|
||||
|
||||
n.File = &asset.File{
|
||||
Filename: nmStateConfigFilename,
|
||||
Data: []byte(data),
|
||||
}
|
||||
}
|
||||
|
||||
return n.finish()
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (n *NMStateConfig) Files() []*asset.File {
|
||||
if n.File != nil {
|
||||
return []*asset.File{n.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns the NMStateConfig asset from the disk.
|
||||
func (n *NMStateConfig) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
file, err := f.FetchByName(nmStateConfigFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrapf(err, "failed to load file %s", nmStateConfigFilename)
|
||||
}
|
||||
|
||||
// Split up the file into multiple YAMLs if it contains NMStateConfig for more than one node
|
||||
var decoder nmStateConfigYamlDecoder
|
||||
yamlList, err := getMultipleYamls(file.Data, &decoder)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "could not decode YAML for %s", nmStateConfigFilename)
|
||||
}
|
||||
|
||||
var staticNetworkConfig []*models.HostStaticNetworkConfig
|
||||
var nmStateConfigList []*aiv1beta1.NMStateConfig
|
||||
|
||||
for i := range yamlList {
|
||||
nmStateConfig := yamlList[i].(*aiv1beta1.NMStateConfig)
|
||||
staticNetworkConfig = append(staticNetworkConfig, &models.HostStaticNetworkConfig{
|
||||
MacInterfaceMap: buildMacInterfaceMap(*nmStateConfig),
|
||||
NetworkYaml: string(nmStateConfig.Spec.NetConfig.Raw),
|
||||
})
|
||||
nmStateConfigList = append(nmStateConfigList, nmStateConfig)
|
||||
}
|
||||
|
||||
log := logrus.New()
|
||||
log.Level = logrus.WarnLevel
|
||||
staticNetworkConfigGenerator := staticnetworkconfig.New(log.WithField("pkg", "manifests"), staticnetworkconfig.Config{MaxConcurrentGenerations: 2})
|
||||
|
||||
// Validate the network config using nmstatectl
|
||||
if err = staticNetworkConfigGenerator.ValidateStaticConfigParams(context.Background(), staticNetworkConfig); err != nil {
|
||||
return false, errors.Wrapf(err, "staticNetwork configuration is not valid")
|
||||
}
|
||||
|
||||
n.File, n.StaticNetworkConfig, n.Config = file, staticNetworkConfig, nmStateConfigList
|
||||
if err = n.finish(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (n *NMStateConfig) finish() error {
|
||||
|
||||
if n.Config == nil {
|
||||
return errors.New("missing configuration or manifest file")
|
||||
}
|
||||
|
||||
if err := n.validateNMStateConfig().ToAggregate(); err != nil {
|
||||
return errors.Wrapf(err, "invalid NMStateConfig configuration")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NMStateConfig) validateNMStateConfig() field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if err := n.validateNMStateLabels(); err != nil {
|
||||
allErrs = append(allErrs, err...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func (n *NMStateConfig) validateNMStateLabels() field.ErrorList {
|
||||
|
||||
var allErrs field.ErrorList
|
||||
|
||||
fieldPath := field.NewPath("ObjectMeta", "Labels")
|
||||
|
||||
for _, nmStateConfig := range n.Config {
|
||||
if len(nmStateConfig.ObjectMeta.Labels) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf("%s does not have any label set", nmStateConfig.Name)))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func getFirstIP(nmStateConfig *nmStateConfig) string {
|
||||
for _, intf := range nmStateConfig.Interfaces {
|
||||
for _, addr4 := range intf.IPV4.Address {
|
||||
if addr4.IP != "" {
|
||||
return addr4.IP
|
||||
}
|
||||
}
|
||||
for _, addr6 := range intf.IPV6.Address {
|
||||
if addr6.IP != "" {
|
||||
return addr6.IP
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetNodeZeroIP retrieves the first IP from the user provided NMStateConfigs to set as the node0 IP
|
||||
func GetNodeZeroIP(nmStateConfigs []*aiv1beta1.NMStateConfig) (string, error) {
|
||||
for i := range nmStateConfigs {
|
||||
var nmStateConfig nmStateConfig
|
||||
err := yaml.Unmarshal(nmStateConfigs[i].Spec.NetConfig.Raw, &nmStateConfig)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error unmarshalling NMStateConfig: %v", err)
|
||||
}
|
||||
if nodeZeroIP := getFirstIP(&nmStateConfig); nodeZeroIP != "" {
|
||||
if net.ParseIP(nodeZeroIP) == nil {
|
||||
return "", fmt.Errorf("could not parse static IP: %s", nodeZeroIP)
|
||||
}
|
||||
|
||||
return nodeZeroIP, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("invalid NMStateConfig yaml, no interface IPs set")
|
||||
}
|
||||
|
||||
// GetNMIgnitionFiles returns the list of NetworkManager configuration files
|
||||
func GetNMIgnitionFiles(staticNetworkConfig []*models.HostStaticNetworkConfig) ([]staticnetworkconfig.StaticNetworkConfigData, error) {
|
||||
log := logrus.New()
|
||||
staticNetworkConfigGenerator := staticnetworkconfig.New(log.WithField("pkg", "manifests"), staticnetworkconfig.Config{MaxConcurrentGenerations: 2})
|
||||
|
||||
networkConfigStr, err := staticNetworkConfigGenerator.FormatStaticNetworkConfigForDB(staticNetworkConfig)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error marshalling StaticNetwork configuration: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesList, err := staticNetworkConfigGenerator.GenerateStaticNetworkConfigData(context.Background(), networkConfigStr)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to create StaticNetwork config data: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filesList, err
|
||||
}
|
||||
|
||||
type nmStateConfigYamlDecoder int
|
||||
|
||||
type decodeFormat interface {
|
||||
NewDecodedYaml(decoder *yaml.YAMLToJSONDecoder) (interface{}, error)
|
||||
}
|
||||
|
||||
func (d *nmStateConfigYamlDecoder) NewDecodedYaml(yamlDecoder *yaml.YAMLToJSONDecoder) (interface{}, error) {
|
||||
decodedData := new(aiv1beta1.NMStateConfig)
|
||||
err := yamlDecoder.Decode(&decodedData)
|
||||
|
||||
return decodedData, err
|
||||
}
|
||||
|
||||
// Read a YAML file containing multiple YAML definitions of the same format
|
||||
// Each specific format must be of type DecodeFormat
|
||||
func getMultipleYamls(contents []byte, decoder decodeFormat) ([]interface{}, error) {
|
||||
|
||||
r := bytes.NewReader(contents)
|
||||
dec := yaml.NewYAMLToJSONDecoder(r)
|
||||
|
||||
var outputList []interface{}
|
||||
for {
|
||||
decodedData, err := decoder.NewDecodedYaml(dec)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading multiple YAMLs")
|
||||
}
|
||||
|
||||
outputList = append(outputList, decodedData)
|
||||
}
|
||||
|
||||
return outputList, nil
|
||||
}
|
||||
|
||||
func buildMacInterfaceMap(nmStateConfig aiv1beta1.NMStateConfig) models.MacInterfaceMap {
|
||||
|
||||
// TODO - this eventually will move to another asset so the interface definition can be shared with Butane
|
||||
macInterfaceMap := make(models.MacInterfaceMap, 0, len(nmStateConfig.Spec.Interfaces))
|
||||
for _, cfg := range nmStateConfig.Spec.Interfaces {
|
||||
logrus.Debug("adding MAC interface map to host static network config - Name: ", cfg.Name, " MacAddress:", cfg.MacAddress)
|
||||
macInterfaceMap = append(macInterfaceMap, &models.MacInterfaceMapItems0{
|
||||
MacAddress: cfg.MacAddress,
|
||||
LogicalNicName: cfg.Name,
|
||||
})
|
||||
}
|
||||
return macInterfaceMap
|
||||
}
|
||||
577
pkg/asset/agent/manifests/nmstateconfig_test.go
Normal file
577
pkg/asset/agent/manifests/nmstateconfig_test.go
Normal file
@@ -0,0 +1,577 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/assisted-service/models"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent/agentconfig"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
)
|
||||
|
||||
func TestNMStateConfig_Generate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
dependencies []asset.Asset
|
||||
expectedError string
|
||||
expectedConfig []*aiv1beta1.NMStateConfig
|
||||
}{
|
||||
{
|
||||
name: "missing-config",
|
||||
dependencies: []asset.Asset{
|
||||
&agentconfig.AgentConfig{},
|
||||
},
|
||||
expectedError: "missing configuration or manifest file",
|
||||
},
|
||||
{
|
||||
name: "valid config",
|
||||
dependencies: []asset.Asset{
|
||||
getValidAgentConfig(),
|
||||
},
|
||||
expectedConfig: []*aiv1beta1.NMStateConfig{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NMStateConfig",
|
||||
APIVersion: "agent-install.openshift.io/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprint(getNMStateConfigName(getValidAgentConfig()), "-0"),
|
||||
Namespace: getNMStateConfigNamespace(getValidAgentConfig()),
|
||||
Labels: getNMStateConfigLabelsFromAgentConfig(getValidAgentConfig()),
|
||||
},
|
||||
Spec: aiv1beta1.NMStateConfigSpec{
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp2s0",
|
||||
MacAddress: "98:af:65:a5:8d:01",
|
||||
},
|
||||
{
|
||||
Name: "enp3s1",
|
||||
MacAddress: "28:d2:44:d2:b2:1a",
|
||||
},
|
||||
},
|
||||
NetConfig: aiv1beta1.NetConfig{
|
||||
Raw: unmarshalJSON([]byte("interfaces:")),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NMStateConfig",
|
||||
APIVersion: "agent-install.openshift.io/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprint(getNMStateConfigName(getValidAgentConfig()), "-1"),
|
||||
Namespace: getNMStateConfigNamespace(getValidAgentConfig()),
|
||||
Labels: getNMStateConfigLabelsFromAgentConfig(getValidAgentConfig()),
|
||||
},
|
||||
Spec: aiv1beta1.NMStateConfigSpec{
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp2t0",
|
||||
MacAddress: "98:af:65:a5:8d:02",
|
||||
},
|
||||
},
|
||||
NetConfig: aiv1beta1.NetConfig{
|
||||
Raw: unmarshalJSON([]byte("interfaces:")),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "NMStateConfig",
|
||||
APIVersion: "agent-install.openshift.io/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprint(getNMStateConfigName(getValidAgentConfig()), "-2"),
|
||||
Namespace: getNMStateConfigNamespace(getValidAgentConfig()),
|
||||
Labels: getNMStateConfigLabelsFromAgentConfig(getValidAgentConfig()),
|
||||
},
|
||||
Spec: aiv1beta1.NMStateConfigSpec{
|
||||
Interfaces: []*aiv1beta1.Interface{
|
||||
{
|
||||
Name: "enp2u0",
|
||||
MacAddress: "98:af:65:a5:8d:03",
|
||||
},
|
||||
},
|
||||
NetConfig: aiv1beta1.NetConfig{
|
||||
Raw: unmarshalJSON([]byte("interfaces:")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(tc.dependencies...)
|
||||
|
||||
asset := &NMStateConfig{}
|
||||
err := asset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedConfig, asset.Config)
|
||||
assert.NotEmpty(t, asset.Files())
|
||||
|
||||
configFile := asset.Files()[0]
|
||||
assert.Equal(t, "cluster-manifests/nmstateconfig.yaml", configFile.Filename)
|
||||
|
||||
// Split up the file into multiple YAMLs if it contains NMStateConfig for more than one node
|
||||
var decoder nmStateConfigYamlDecoder
|
||||
yamlList, err := getMultipleYamls(configFile.Data, &decoder)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(tc.expectedConfig), len(yamlList))
|
||||
|
||||
for i := range tc.expectedConfig {
|
||||
assert.Equal(t, tc.expectedConfig[i], yamlList[i])
|
||||
|
||||
}
|
||||
|
||||
assert.Equal(t, len(tc.expectedConfig), len(asset.StaticNetworkConfig))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestNMStateConfig_LoadedFromDisk(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError string
|
||||
requiresNmstatectl bool
|
||||
expectedConfig []*models.HostStaticNetworkConfig
|
||||
}{
|
||||
{
|
||||
name: "valid-config-file",
|
||||
data: `
|
||||
metadata:
|
||||
name: mynmstateconfig
|
||||
namespace: spoke-cluster
|
||||
labels:
|
||||
cluster0-nmstate-label-name: cluster0-nmstate-label-value
|
||||
spec:
|
||||
config:
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
state: up
|
||||
mac-address: 52:54:01:aa:aa:a1
|
||||
ipv4:
|
||||
enabled: true
|
||||
address:
|
||||
- ip: 192.168.122.21
|
||||
prefix-length: 24
|
||||
dhcp: false
|
||||
dns-resolver:
|
||||
config:
|
||||
server:
|
||||
- 192.168.122.1
|
||||
routes:
|
||||
config:
|
||||
- destination: 0.0.0.0/0
|
||||
next-hop-address: 192.168.122.1
|
||||
next-hop-interface: eth0
|
||||
table-id: 254
|
||||
interfaces:
|
||||
- name: "eth0"
|
||||
macAddress: "52:54:01:aa:aa:a1"
|
||||
- name: "eth1"
|
||||
macAddress: "52:54:01:bb:bb:b1"`,
|
||||
requiresNmstatectl: true,
|
||||
expectedFound: true,
|
||||
expectedConfig: []*models.HostStaticNetworkConfig{
|
||||
{
|
||||
MacInterfaceMap: models.MacInterfaceMap{
|
||||
{LogicalNicName: "eth0", MacAddress: "52:54:01:aa:aa:a1"},
|
||||
{LogicalNicName: "eth1", MacAddress: "52:54:01:bb:bb:b1"},
|
||||
},
|
||||
NetworkYaml: "dns-resolver:\n config:\n server:\n - 192.168.122.1\ninterfaces:\n- ipv4:\n address:\n - ip: 192.168.122.21\n prefix-length: 24\n dhcp: false\n enabled: true\n mac-address: 52:54:01:aa:aa:a1\n name: eth0\n state: up\n type: ethernet\nroutes:\n config:\n - destination: 0.0.0.0/0\n next-hop-address: 192.168.122.1\n next-hop-interface: eth0\n table-id: 254\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "valid-config-multiple-yamls",
|
||||
data: `
|
||||
metadata:
|
||||
name: mynmstateconfig
|
||||
namespace: spoke-cluster
|
||||
labels:
|
||||
cluster0-nmstate-label-name: cluster0-nmstate-label-value
|
||||
spec:
|
||||
config:
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
state: up
|
||||
mac-address: 52:54:01:aa:aa:a1
|
||||
ipv4:
|
||||
enabled: true
|
||||
address:
|
||||
- ip: 192.168.122.21
|
||||
prefix-length: 24
|
||||
interfaces:
|
||||
- name: "eth0"
|
||||
macAddress: "52:54:01:aa:aa:a1"
|
||||
---
|
||||
metadata:
|
||||
name: mynmstateconfig-2
|
||||
namespace: spoke-cluster
|
||||
labels:
|
||||
cluster0-nmstate-label-name: cluster0-nmstate-label-value
|
||||
spec:
|
||||
config:
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
state: up
|
||||
mac-address: 52:54:01:cc:cc:c1
|
||||
ipv4:
|
||||
enabled: true
|
||||
address:
|
||||
- ip: 192.168.122.22
|
||||
prefix-length: 24
|
||||
interfaces:
|
||||
- name: "eth0"
|
||||
macAddress: "52:54:01:cc:cc:c1"`,
|
||||
requiresNmstatectl: true,
|
||||
expectedFound: true,
|
||||
expectedConfig: []*models.HostStaticNetworkConfig{
|
||||
{
|
||||
MacInterfaceMap: models.MacInterfaceMap{
|
||||
{LogicalNicName: "eth0", MacAddress: "52:54:01:aa:aa:a1"},
|
||||
},
|
||||
NetworkYaml: "interfaces:\n- ipv4:\n address:\n - ip: 192.168.122.21\n prefix-length: 24\n enabled: true\n mac-address: 52:54:01:aa:aa:a1\n name: eth0\n state: up\n type: ethernet\n",
|
||||
},
|
||||
{
|
||||
MacInterfaceMap: models.MacInterfaceMap{
|
||||
{LogicalNicName: "eth0", MacAddress: "52:54:01:cc:cc:c1"},
|
||||
},
|
||||
NetworkYaml: "interfaces:\n- ipv4:\n address:\n - ip: 192.168.122.22\n prefix-length: 24\n enabled: true\n mac-address: 52:54:01:cc:cc:c1\n name: eth0\n state: up\n type: ethernet\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "invalid-interfaces",
|
||||
data: `
|
||||
metadata:
|
||||
name: mynmstateconfig
|
||||
namespace: spoke-cluster
|
||||
labels:
|
||||
cluster0-nmstate-label-name: cluster0-nmstate-label-value
|
||||
spec:
|
||||
interfaces:
|
||||
- name: "eth0"
|
||||
macAddress: "52:54:01:aa:aa:a1"
|
||||
- name: "eth0"
|
||||
macAddress: "52:54:01:bb:bb:b1"`,
|
||||
requiresNmstatectl: true,
|
||||
expectedError: "staticNetwork configuration is not valid",
|
||||
},
|
||||
|
||||
{
|
||||
name: "invalid-address-for-type",
|
||||
data: `
|
||||
metadata:
|
||||
name: mynmstateconfig
|
||||
namespace: spoke-cluster
|
||||
labels:
|
||||
cluster0-nmstate-label-name: cluster0-nmstate-label-value
|
||||
spec:
|
||||
config:
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
state: up
|
||||
mac-address: 52:54:01:aa:aa:a1
|
||||
ipv6:
|
||||
enabled: true
|
||||
address:
|
||||
- ip: 192.168.122.21
|
||||
prefix-length: 24
|
||||
interfaces:
|
||||
- name: "eth0"
|
||||
macAddress: "52:54:01:aa:aa:a1"`,
|
||||
requiresNmstatectl: true,
|
||||
expectedError: "staticNetwork configuration is not valid",
|
||||
},
|
||||
|
||||
{
|
||||
name: "missing-label",
|
||||
data: `
|
||||
metadata:
|
||||
name: mynmstateconfig
|
||||
namespace: spoke-cluster
|
||||
spec:
|
||||
config:
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
state: up
|
||||
mac-address: 52:54:01:aa:aa:a1
|
||||
ipv4:
|
||||
enabled: true
|
||||
address:
|
||||
- ip: 192.168.122.21
|
||||
prefix-length: 24
|
||||
interfaces:
|
||||
- name: "eth0"
|
||||
macAddress: "52:54:01:aa:aa:a1"`,
|
||||
expectedError: "invalid NMStateConfig configuration: ObjectMeta.Labels: Required value: mynmstateconfig does not have any label set",
|
||||
},
|
||||
|
||||
{
|
||||
name: "not-yaml",
|
||||
data: `This is not a yaml file`,
|
||||
expectedError: "could not decode YAML for cluster-manifests/nmstateconfig.yaml: Error reading multiple YAMLs: error unmarshaling JSON: while decoding JSON: json: cannot unmarshal string into Go value of type v1beta1.NMStateConfig",
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: "failed to load file cluster-manifests/nmstateconfig.yaml: fetch failed",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
// nmstate may not be installed yet in CI so skip this test if not
|
||||
if tc.requiresNmstatectl {
|
||||
_, execErr := exec.LookPath("nmstatectl")
|
||||
if execErr != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(nmStateConfigFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: nmStateConfigFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &NMStateConfig{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedError != "" {
|
||||
assert.ErrorContains(t, err, tc.expectedError)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
if tc.expectedFound {
|
||||
assert.Equal(t, tc.expectedConfig, asset.StaticNetworkConfig, "unexpected Config in NMStateConfig")
|
||||
assert.Equal(t, len(tc.expectedConfig), len(asset.Config))
|
||||
for i := 0; i < len(tc.expectedConfig); i++ {
|
||||
|
||||
staticNetworkConfig := asset.StaticNetworkConfig[i]
|
||||
nmStateConfig := asset.Config[i]
|
||||
|
||||
for n := 0; n < len(staticNetworkConfig.MacInterfaceMap); n++ {
|
||||
macInterfaceMap := staticNetworkConfig.MacInterfaceMap[n]
|
||||
iface := nmStateConfig.Spec.Interfaces[n]
|
||||
|
||||
assert.Equal(t, macInterfaceMap.LogicalNicName, iface.Name)
|
||||
assert.Equal(t, macInterfaceMap.MacAddress, iface.MacAddress)
|
||||
}
|
||||
assert.YAMLEq(t, staticNetworkConfig.NetworkYaml, string(nmStateConfig.Spec.NetConfig.Raw))
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeZeroIP(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
expectedIP string
|
||||
expectedError string
|
||||
configs []string
|
||||
}{
|
||||
{
|
||||
name: "no interfaces",
|
||||
expectedError: "no interface IPs set",
|
||||
},
|
||||
{
|
||||
name: "first interface",
|
||||
expectedIP: "192.168.122.21",
|
||||
configs: []string{
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
ipv4:
|
||||
address:
|
||||
- ip: 192.168.122.21
|
||||
- name: eth1
|
||||
type: ethernet
|
||||
ipv4:
|
||||
address:
|
||||
- ip: 192.168.122.22
|
||||
`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "second interface",
|
||||
expectedIP: "192.168.122.22",
|
||||
configs: []string{
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
- name: eth1
|
||||
type: ethernet
|
||||
ipv4:
|
||||
address:
|
||||
- ip: 192.168.122.22
|
||||
`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "second host",
|
||||
expectedIP: "192.168.122.22",
|
||||
configs: []string{
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
- name: eth1
|
||||
type: ethernet
|
||||
`,
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
- name: eth1
|
||||
type: ethernet
|
||||
ipv4:
|
||||
address:
|
||||
- ip: 192.168.122.22
|
||||
`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ipv4 first",
|
||||
expectedIP: "192.168.122.22",
|
||||
configs: []string{
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
ipv6:
|
||||
address:
|
||||
- ip: "2001:0db8::0001"
|
||||
ipv4:
|
||||
address:
|
||||
- ip: 192.168.122.22
|
||||
`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ipv6 host first",
|
||||
expectedIP: "2001:0db8::0001",
|
||||
configs: []string{
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
ipv6:
|
||||
address:
|
||||
- ip: "2001:0db8::0001"
|
||||
`,
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
ipv4:
|
||||
address:
|
||||
- ip: 192.168.122.31
|
||||
`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ipv6 first",
|
||||
expectedIP: "2001:0db8::0001",
|
||||
configs: []string{
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
ipv6:
|
||||
address:
|
||||
- ip: "2001:0db8::0001"
|
||||
- name: eth1
|
||||
type: ethernet
|
||||
ipv4:
|
||||
address:
|
||||
- ip: 192.168.122.22
|
||||
`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ipv6",
|
||||
expectedIP: "2001:0db8::0001",
|
||||
configs: []string{
|
||||
`
|
||||
interfaces:
|
||||
- name: eth0
|
||||
type: ethernet
|
||||
ipv6:
|
||||
address:
|
||||
- ip: "2001:0db8::0001"
|
||||
`,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var configs []*aiv1beta1.NMStateConfig
|
||||
for _, hostRaw := range tc.configs {
|
||||
configs = append(configs, &aiv1beta1.NMStateConfig{
|
||||
Spec: aiv1beta1.NMStateConfigSpec{
|
||||
NetConfig: aiv1beta1.NetConfig{
|
||||
Raw: aiv1beta1.RawNetConfig(hostRaw),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
ip, err := GetNodeZeroIP(configs)
|
||||
if tc.expectedError == "" {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedIP, ip)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, tc.expectedError)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
177
pkg/asset/agent/manifests/util_test.go
Normal file
177
pkg/asset/agent/manifests/util_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package manifests
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/agent/agentconfig"
|
||||
"github.com/openshift/installer/pkg/asset/installconfig"
|
||||
"github.com/openshift/installer/pkg/ipnet"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
agenttypes "github.com/openshift/installer/pkg/types/agent"
|
||||
"github.com/openshift/installer/pkg/types/baremetal"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
// TestSSHKey provides a ssh key for unit tests
|
||||
TestSSHKey = `|
|
||||
ssh-rsa AAAAB3NzaC1y1LJe3zew1ghc= root@localhost.localdomain`
|
||||
// TestSecret provides a ssh key for unit tests
|
||||
TestSecret = `'{"auths":{"cloud.openshift.com":{"auth":"b3BlUTA=","email":"test@redhat.com"}}}`
|
||||
// TestReleaseImage provides a release image url for unit tests
|
||||
TestReleaseImage = "registry.ci.openshift.org/origin/release:4.11"
|
||||
)
|
||||
|
||||
// GetValidOptionalInstallConfig returns a valid optional install config
|
||||
func getValidOptionalInstallConfig() *agent.OptionalInstallConfig {
|
||||
_, newCidr, _ := net.ParseCIDR("192.168.111.0/24")
|
||||
|
||||
return &agent.OptionalInstallConfig{
|
||||
InstallConfig: installconfig.InstallConfig{
|
||||
Config: &types.InstallConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ocp-edge-cluster-0",
|
||||
Namespace: "cluster-0",
|
||||
},
|
||||
BaseDomain: "testing.com",
|
||||
PullSecret: TestSecret,
|
||||
SSHKey: TestSSHKey,
|
||||
ControlPlane: &types.MachinePool{
|
||||
Name: "master",
|
||||
Replicas: pointer.Int64Ptr(3),
|
||||
Platform: types.MachinePoolPlatform{},
|
||||
},
|
||||
Compute: []types.MachinePool{
|
||||
{
|
||||
Name: "worker-machine-pool-1",
|
||||
Replicas: pointer.Int64Ptr(2),
|
||||
},
|
||||
{
|
||||
Name: "worker-machine-pool-2",
|
||||
Replicas: pointer.Int64Ptr(3),
|
||||
},
|
||||
},
|
||||
Networking: &types.Networking{
|
||||
ClusterNetwork: []types.ClusterNetworkEntry{
|
||||
{
|
||||
CIDR: ipnet.IPNet{IPNet: *newCidr},
|
||||
HostPrefix: 23,
|
||||
},
|
||||
},
|
||||
ServiceNetwork: []ipnet.IPNet{
|
||||
*ipnet.MustParseCIDR("172.30.0.0/16"),
|
||||
},
|
||||
},
|
||||
Platform: types.Platform{
|
||||
BareMetal: &baremetal.Platform{
|
||||
APIVIP: "192.168.122.10",
|
||||
IngressVIP: "192.168.122.11",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Supplied: true,
|
||||
}
|
||||
}
|
||||
|
||||
func getValidAgentConfig() *agentconfig.AgentConfig {
|
||||
return &agentconfig.AgentConfig{
|
||||
Config: &agenttypes.Config{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ocp-edge-cluster-0",
|
||||
Namespace: "cluster-0",
|
||||
},
|
||||
RendezvousIP: "192.168.122.2",
|
||||
Hosts: []agenttypes.Host{
|
||||
{
|
||||
Hostname: "control-0.example.org",
|
||||
Role: "master",
|
||||
RootDeviceHints: baremetal.RootDeviceHints{
|
||||
DeviceName: "/dev/sda",
|
||||
HCTL: "hctl-value",
|
||||
Model: "model-value",
|
||||
Vendor: "vendor-value",
|
||||
SerialNumber: "serial-number-value",
|
||||
MinSizeGigabytes: 20,
|
||||
WWN: "wwn-value",
|
||||
WWNWithExtension: "wwn-with-extension-value",
|
||||
WWNVendorExtension: "wwn-vendor-extension-value",
|
||||
Rotational: new(bool),
|
||||
},
|
||||
Interfaces: []*v1beta1.Interface{
|
||||
{
|
||||
Name: "enp2s0",
|
||||
MacAddress: "98:af:65:a5:8d:01",
|
||||
},
|
||||
{
|
||||
Name: "enp3s1",
|
||||
MacAddress: "28:d2:44:d2:b2:1a",
|
||||
},
|
||||
},
|
||||
NetworkConfig: v1beta1.NetConfig{
|
||||
Raw: unmarshalJSON([]byte("interfaces:")),
|
||||
},
|
||||
},
|
||||
{
|
||||
Hostname: "control-1.example.org",
|
||||
Role: "master",
|
||||
RootDeviceHints: baremetal.RootDeviceHints{
|
||||
DeviceName: "/dev/sdb",
|
||||
HCTL: "hctl-value",
|
||||
Model: "model-value",
|
||||
Vendor: "vendor-value",
|
||||
SerialNumber: "serial-number-value",
|
||||
MinSizeGigabytes: 40,
|
||||
WWN: "wwn-value",
|
||||
WWNWithExtension: "wwn-with-extension-value",
|
||||
WWNVendorExtension: "wwn-vendor-extension-value",
|
||||
Rotational: new(bool),
|
||||
},
|
||||
Interfaces: []*v1beta1.Interface{
|
||||
{
|
||||
Name: "enp2t0",
|
||||
MacAddress: "98:af:65:a5:8d:02",
|
||||
},
|
||||
},
|
||||
NetworkConfig: v1beta1.NetConfig{
|
||||
Raw: unmarshalJSON([]byte("interfaces:")),
|
||||
},
|
||||
},
|
||||
{
|
||||
Hostname: "control-2.example.org",
|
||||
Role: "master",
|
||||
RootDeviceHints: baremetal.RootDeviceHints{
|
||||
DeviceName: "/dev/sdc",
|
||||
HCTL: "hctl-value",
|
||||
Model: "model-value",
|
||||
Vendor: "vendor-value",
|
||||
SerialNumber: "serial-number-value",
|
||||
MinSizeGigabytes: 60,
|
||||
WWN: "wwn-value",
|
||||
WWNWithExtension: "wwn-with-extension-value",
|
||||
WWNVendorExtension: "wwn-vendor-extension-value",
|
||||
Rotational: new(bool),
|
||||
},
|
||||
Interfaces: []*v1beta1.Interface{
|
||||
{
|
||||
Name: "enp2u0",
|
||||
MacAddress: "98:af:65:a5:8d:03",
|
||||
},
|
||||
},
|
||||
NetworkConfig: v1beta1.NetConfig{
|
||||
Raw: unmarshalJSON([]byte("interfaces:")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalJSON(b []byte) []byte {
|
||||
output, _ := yaml.JSONToYAML(b)
|
||||
return output
|
||||
}
|
||||
102
pkg/asset/agent/mirror/cabundle.go
Normal file
102
pkg/asset/agent/mirror/cabundle.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/manifests"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// CaBundleFilename defines the name of the file on disk
|
||||
CaBundleFilename = filepath.Join(mirrorConfigDir, "ca-bundle.crt")
|
||||
)
|
||||
|
||||
// CaBundle generates the cetificate file for disconnected mirrors.
|
||||
type CaBundle struct {
|
||||
File *asset.File
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*CaBundle)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*CaBundle) Name() string {
|
||||
return "Mirror Registries Certificate File"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*CaBundle) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the Mirror Registries certificate file from install-config.
|
||||
func (i *CaBundle) Generate(dependencies asset.Parents) error {
|
||||
installConfig := &agent.OptionalInstallConfig{}
|
||||
dependencies.Get(installConfig)
|
||||
if !installConfig.Supplied {
|
||||
return nil
|
||||
}
|
||||
|
||||
if installConfig.Config.AdditionalTrustBundle == "" {
|
||||
i.File = &asset.File{
|
||||
Filename: CaBundleFilename,
|
||||
Data: []byte{},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return i.parseCertificates(installConfig.Config.AdditionalTrustBundle)
|
||||
}
|
||||
|
||||
func (i *CaBundle) parseCertificates(certs string) error {
|
||||
if len(certs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := manifests.ParseCertificates(certs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for filename, content := range data {
|
||||
if filepath.Base(CaBundleFilename) == filename {
|
||||
i.File = &asset.File{
|
||||
Filename: CaBundleFilename,
|
||||
Data: []byte(content),
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("unexpected CA Bundle filename %s", filename)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (i *CaBundle) Files() []*asset.File {
|
||||
if i.File != nil {
|
||||
return []*asset.File{i.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns the Mirror Registries certificate file from the disk.
|
||||
func (i *CaBundle) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
file, err := f.FetchByName(CaBundleFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", CaBundleFilename))
|
||||
}
|
||||
|
||||
return true, i.parseCertificates(string(file.Data))
|
||||
}
|
||||
230
pkg/asset/agent/mirror/cabundle_test.go
Normal file
230
pkg/asset/agent/mirror/cabundle_test.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/installconfig"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
)
|
||||
|
||||
func TestCaBundle_Generate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
dependencies []asset.Asset
|
||||
expectedError string
|
||||
expectedConfig string
|
||||
}{
|
||||
{
|
||||
name: "missing-config",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{
|
||||
Supplied: true,
|
||||
InstallConfig: installconfig.InstallConfig{
|
||||
Config: &types.InstallConfig{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "cluster-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "additional-trust-bundle",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{
|
||||
Supplied: true,
|
||||
InstallConfig: installconfig.InstallConfig{
|
||||
Config: &types.InstallConfig{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "cluster-0",
|
||||
},
|
||||
AdditionalTrustBundle: `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDZTCCAk2gAwIBAgIURbA8lR+5xlJZUoOXK66AHFWd3uswDQYJKoZIhvcNAQEL
|
||||
BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE
|
||||
CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAeFw0yMjA3MDgxOTUzMTVaFw0yMjA4MDcx
|
||||
OTUzMTVaMEIxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAa
|
||||
BgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IB
|
||||
DwAwggEKAoIBAQCroH9c2PLWI0O/nBrmKtS2IuReyWaR0DOMJY7C/vc12l9zlH0D
|
||||
xTOUfEtdqRktjVsUn1vIIiFakxd0QLIPcMyKplmbavIBUQp+MZr0pNVX+lwcctbA
|
||||
7FVHEnbWYNVepoV7kZkTVvMXAqFylMXU4gDmuZzIxhVMMxjialJNED+3ngqvX4w3
|
||||
4q4KSk1ytaHGwjREIErwPJjv5PK48KVJL2nlCuA+tbxu1r8eVkOUvZlxAuNNXk/U
|
||||
mf3QX5EiUlTtsmRAct6fIUT3jkrsHSS/tZ66EYJ9Q0OBoX2lL/Msmi27OQvA7uYn
|
||||
uqYlwJzU43tCsiip9E9z/UrLcMYyXx3oPJyPAgMBAAGjUzBRMB0GA1UdDgQWBBTI
|
||||
ahE8DDT4T1vta6cXVVaRjnel0zAfBgNVHSMEGDAWgBTIahE8DDT4T1vta6cXVVaR
|
||||
jnel0zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCQbsMtPFkq
|
||||
PxwOAIds3IoupuyIKmsF32ECEH/OlS+7Sj7MUJnGTQrwgjrsVS5sl8AmnGx4hPdL
|
||||
VX98nEcKMNkph3Hkvh4EvgjSfmYGUXuJBcYU5jqNQrlrGv37rEf5FnvdHV1F3MG8
|
||||
A0Mj0TLtcTdtaJFoOrnQuD/k0/1d+cMiYGTSaT5XK/unARqGEMd4BlWPh5P3SflV
|
||||
/Vy2hHlMpv7OcZ8yaAI3htENZLus+L5kjHWKu6dxlPHKu6ef5k64su2LTNE07Vr9
|
||||
S655uiFW5AX2wDVUcQEDCOiEn6SI9DTt5oQjWPMxPf+rEyfQ2f1QwVez7cyr6Qc5
|
||||
OIUk31HnM/Fj
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedConfig: `-----BEGIN CERTIFICATE-----
|
||||
MIIDZTCCAk2gAwIBAgIURbA8lR+5xlJZUoOXK66AHFWd3uswDQYJKoZIhvcNAQEL
|
||||
BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE
|
||||
CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAeFw0yMjA3MDgxOTUzMTVaFw0yMjA4MDcx
|
||||
OTUzMTVaMEIxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAa
|
||||
BgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IB
|
||||
DwAwggEKAoIBAQCroH9c2PLWI0O/nBrmKtS2IuReyWaR0DOMJY7C/vc12l9zlH0D
|
||||
xTOUfEtdqRktjVsUn1vIIiFakxd0QLIPcMyKplmbavIBUQp+MZr0pNVX+lwcctbA
|
||||
7FVHEnbWYNVepoV7kZkTVvMXAqFylMXU4gDmuZzIxhVMMxjialJNED+3ngqvX4w3
|
||||
4q4KSk1ytaHGwjREIErwPJjv5PK48KVJL2nlCuA+tbxu1r8eVkOUvZlxAuNNXk/U
|
||||
mf3QX5EiUlTtsmRAct6fIUT3jkrsHSS/tZ66EYJ9Q0OBoX2lL/Msmi27OQvA7uYn
|
||||
uqYlwJzU43tCsiip9E9z/UrLcMYyXx3oPJyPAgMBAAGjUzBRMB0GA1UdDgQWBBTI
|
||||
ahE8DDT4T1vta6cXVVaRjnel0zAfBgNVHSMEGDAWgBTIahE8DDT4T1vta6cXVVaR
|
||||
jnel0zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCQbsMtPFkq
|
||||
PxwOAIds3IoupuyIKmsF32ECEH/OlS+7Sj7MUJnGTQrwgjrsVS5sl8AmnGx4hPdL
|
||||
VX98nEcKMNkph3Hkvh4EvgjSfmYGUXuJBcYU5jqNQrlrGv37rEf5FnvdHV1F3MG8
|
||||
A0Mj0TLtcTdtaJFoOrnQuD/k0/1d+cMiYGTSaT5XK/unARqGEMd4BlWPh5P3SflV
|
||||
/Vy2hHlMpv7OcZ8yaAI3htENZLus+L5kjHWKu6dxlPHKu6ef5k64su2LTNE07Vr9
|
||||
S655uiFW5AX2wDVUcQEDCOiEn6SI9DTt5oQjWPMxPf+rEyfQ2f1QwVez7cyr6Qc5
|
||||
OIUk31HnM/Fj
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(tc.dependencies...)
|
||||
|
||||
asset := &CaBundle{}
|
||||
err := asset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.EqualError(t, err, tc.expectedError)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
|
||||
files := asset.Files()
|
||||
if tc.expectedConfig != "" {
|
||||
assert.Len(t, files, 1)
|
||||
assert.Equal(t, CaBundleFilename, files[0].Filename)
|
||||
assert.Equal(t, tc.expectedConfig, string(files[0].Data))
|
||||
} else {
|
||||
if len(files) == 1 {
|
||||
assert.Equal(t, CaBundleFilename, files[0].Filename)
|
||||
assert.Equal(t, []byte{}, files[0].Data)
|
||||
} else {
|
||||
assert.Empty(t, files)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCaBundle_LoadedFromDisk(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "valid-config-file",
|
||||
data: `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDZTCCAk2gAwIBAgIURbA8lR+5xlJZUoOXK66AHFWd3uswDQYJKoZIhvcNAQEL
|
||||
BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE
|
||||
CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAeFw0yMjA3MDgxOTUzMTVaFw0yMjA4MDcx
|
||||
OTUzMTVaMEIxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAa
|
||||
BgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IB
|
||||
DwAwggEKAoIBAQCroH9c2PLWI0O/nBrmKtS2IuReyWaR0DOMJY7C/vc12l9zlH0D
|
||||
xTOUfEtdqRktjVsUn1vIIiFakxd0QLIPcMyKplmbavIBUQp+MZr0pNVX+lwcctbA
|
||||
7FVHEnbWYNVepoV7kZkTVvMXAqFylMXU4gDmuZzIxhVMMxjialJNED+3ngqvX4w3
|
||||
4q4KSk1ytaHGwjREIErwPJjv5PK48KVJL2nlCuA+tbxu1r8eVkOUvZlxAuNNXk/U
|
||||
mf3QX5EiUlTtsmRAct6fIUT3jkrsHSS/tZ66EYJ9Q0OBoX2lL/Msmi27OQvA7uYn
|
||||
uqYlwJzU43tCsiip9E9z/UrLcMYyXx3oPJyPAgMBAAGjUzBRMB0GA1UdDgQWBBTI
|
||||
ahE8DDT4T1vta6cXVVaRjnel0zAfBgNVHSMEGDAWgBTIahE8DDT4T1vta6cXVVaR
|
||||
jnel0zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCQbsMtPFkq
|
||||
PxwOAIds3IoupuyIKmsF32ECEH/OlS+7Sj7MUJnGTQrwgjrsVS5sl8AmnGx4hPdL
|
||||
VX98nEcKMNkph3Hkvh4EvgjSfmYGUXuJBcYU5jqNQrlrGv37rEf5FnvdHV1F3MG8
|
||||
A0Mj0TLtcTdtaJFoOrnQuD/k0/1d+cMiYGTSaT5XK/unARqGEMd4BlWPh5P3SflV
|
||||
/Vy2hHlMpv7OcZ8yaAI3htENZLus+L5kjHWKu6dxlPHKu6ef5k64su2LTNE07Vr9
|
||||
S655uiFW5AX2wDVUcQEDCOiEn6SI9DTt5oQjWPMxPf+rEyfQ2f1QwVez7cyr6Qc5
|
||||
OIUk31HnM/Fj
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
expectedFound: true,
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
name: "invalid-config-file",
|
||||
data: `
|
||||
-----BEGIN CERTIFICATE-----
|
||||
nope
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
expectedFound: true,
|
||||
expectedError: "x509: malformed certificate",
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
data: "",
|
||||
expectedFound: true,
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: "failed to load mirror/ca-bundle.crt file: fetch failed",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(CaBundleFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: CaBundleFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &CaBundle{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
3
pkg/asset/agent/mirror/mirror.go
Normal file
3
pkg/asset/agent/mirror/mirror.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package mirror
|
||||
|
||||
const mirrorConfigDir = "mirror"
|
||||
228
pkg/asset/agent/mirror/registriesconf.go
Normal file
228
pkg/asset/agent/mirror/registriesconf.go
Normal file
@@ -0,0 +1,228 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/pkg/sysregistriesv2"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/ignition/bootstrap"
|
||||
"github.com/pelletier/go-toml"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// RegistriesConfFilename defines the name of the file on disk
|
||||
RegistriesConfFilename = filepath.Join(mirrorConfigDir, "registries.conf")
|
||||
)
|
||||
|
||||
// The default registries.conf file is the podman default as it appears in
|
||||
// CoreOS, with no unqualified-search-registries.
|
||||
const defaultRegistriesConf = `
|
||||
# NOTE: RISK OF USING UNQUALIFIED IMAGE NAMES
|
||||
# We recommend always using fully qualified image names including the registry
|
||||
# server (full dns name), namespace, image name, and tag
|
||||
# (e.g., registry.redhat.io/ubi8/ubi:latest). Pulling by digest (i.e.,
|
||||
# quay.io/repository/name@digest) further eliminates the ambiguity of tags.
|
||||
# When using short names, there is always an inherent risk that the image being
|
||||
# pulled could be spoofed. For example, a user wants to pull an image named
|
||||
# 'foobar' from a registry and expects it to come from myregistry.com. If
|
||||
# myregistry.com is not first in the search list, an attacker could place a
|
||||
# different 'foobar' image at a registry earlier in the search list. The user
|
||||
# would accidentally pull and run the attacker's image and code rather than the
|
||||
# intended content. We recommend only adding registries which are completely
|
||||
# trusted (i.e., registries which don't allow unknown or anonymous users to
|
||||
# create accounts with arbitrary names). This will prevent an image from being
|
||||
# spoofed, squatted or otherwise made insecure. If it is necessary to use one
|
||||
# of these registries, it should be added at the end of the list.
|
||||
#
|
||||
# # An array of host[:port] registries to try when pulling an unqualified image, in order.
|
||||
|
||||
unqualified-search-registries = []
|
||||
|
||||
# [[registry]]
|
||||
# # The "prefix" field is used to choose the relevant [[registry]] TOML table;
|
||||
# # (only) the TOML table with the longest match for the input image name
|
||||
# # (taking into account namespace/repo/tag/digest separators) is used.
|
||||
# #
|
||||
# # The prefix can also be of the form: *.example.com for wildcard subdomain
|
||||
# # matching.
|
||||
# #
|
||||
# # If the prefix field is missing, it defaults to be the same as the "location" field.
|
||||
# prefix = "example.com/foo"
|
||||
#
|
||||
# # If true, unencrypted HTTP as well as TLS connections with untrusted
|
||||
# # certificates are allowed.
|
||||
# insecure = false
|
||||
#
|
||||
# # If true, pulling images with matching names is forbidden.
|
||||
# blocked = false
|
||||
#
|
||||
# # The physical location of the "prefix"-rooted namespace.
|
||||
# #
|
||||
# # By default, this is equal to "prefix" (in which case "prefix" can be omitted
|
||||
# # and the [[registry]] TOML table can only specify "location").
|
||||
# #
|
||||
# # Example: Given
|
||||
# # prefix = "example.com/foo"
|
||||
# # location = "internal-registry-for-example.net/bar"
|
||||
# # requests for the image example.com/foo/myimage:latest will actually work with the
|
||||
# # internal-registry-for-example.net/bar/myimage:latest image.
|
||||
#
|
||||
# # The location can be empty iff prefix is in a
|
||||
# # wildcarded format: "*.example.com". In this case, the input reference will
|
||||
# # be used as-is without any rewrite.
|
||||
# location = internal-registry-for-example.com/bar"
|
||||
#
|
||||
# # (Possibly-partial) mirrors for the "prefix"-rooted namespace.
|
||||
# #
|
||||
# # The mirrors are attempted in the specified order; the first one that can be
|
||||
# # contacted and contains the image will be used (and if none of the mirrors contains the image,
|
||||
# # the primary location specified by the "registry.location" field, or using the unmodified
|
||||
# # user-specified reference, is tried last).
|
||||
# #
|
||||
# # Each TOML table in the "mirror" array can contain the following fields, with the same semantics
|
||||
# # as if specified in the [[registry]] TOML table directly:
|
||||
# # - location
|
||||
# # - insecure
|
||||
# [[registry.mirror]]
|
||||
# location = "example-mirror-0.local/mirror-for-foo"
|
||||
# [[registry.mirror]]
|
||||
# location = "example-mirror-1.local/mirrors/foo"
|
||||
# insecure = true
|
||||
# # Given the above, a pull of example.com/foo/image:latest will try:
|
||||
# # 1. example-mirror-0.local/mirror-for-foo/image:latest
|
||||
# # 2. example-mirror-1.local/mirrors/foo/image:latest
|
||||
# # 3. internal-registry-for-example.net/bar/image:latest
|
||||
# # in order, and use the first one that exists.
|
||||
`
|
||||
|
||||
// RegistriesConf generates the registries.conf file.
|
||||
type RegistriesConf struct {
|
||||
File *asset.File
|
||||
MirrorConfig []RegistriesConfig
|
||||
}
|
||||
|
||||
// RegistriesConfig holds the data extracted from registries.conf
|
||||
type RegistriesConfig struct {
|
||||
Location string
|
||||
Mirror string
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*RegistriesConf)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*RegistriesConf) Name() string {
|
||||
return "Mirror Registries Config"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*RegistriesConf) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the registries.conf file from install-config.
|
||||
func (i *RegistriesConf) Generate(dependencies asset.Parents) error {
|
||||
installConfig := &agent.OptionalInstallConfig{}
|
||||
dependencies.Get(installConfig)
|
||||
if !installConfig.Supplied || len(installConfig.Config.ImageContentSources) == 0 {
|
||||
i.File = &asset.File{
|
||||
Filename: RegistriesConfFilename,
|
||||
Data: []byte(defaultRegistriesConf),
|
||||
}
|
||||
return i.finish()
|
||||
}
|
||||
|
||||
registries := sysregistriesv2.V2RegistriesConf{
|
||||
Registries: []sysregistriesv2.Registry{},
|
||||
}
|
||||
for _, group := range bootstrap.MergedMirrorSets(installConfig.Config.ImageContentSources) {
|
||||
if len(group.Mirrors) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
registry := sysregistriesv2.Registry{}
|
||||
registry.Endpoint.Location = group.Source
|
||||
registry.MirrorByDigestOnly = true
|
||||
for _, mirror := range group.Mirrors {
|
||||
registry.Mirrors = append(registry.Mirrors, sysregistriesv2.Endpoint{Location: mirror})
|
||||
}
|
||||
registries.Registries = append(registries.Registries, registry)
|
||||
}
|
||||
|
||||
data, err := toml.Marshal(registries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.File = &asset.File{
|
||||
Filename: RegistriesConfFilename,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
return i.finish()
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (i *RegistriesConf) Files() []*asset.File {
|
||||
if i.File != nil {
|
||||
return []*asset.File{i.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load returns RegistriesConf asset from the disk.
|
||||
func (i *RegistriesConf) Load(f asset.FileFetcher) (bool, error) {
|
||||
|
||||
file, err := f.FetchByName(RegistriesConfFilename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("failed to load %s file", RegistriesConfFilename))
|
||||
}
|
||||
|
||||
i.File = file
|
||||
|
||||
if err = i.finish(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (i *RegistriesConf) finish() error {
|
||||
|
||||
config, err := extractLocationMirrorDataFromRegistries(i.File.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to parse mirrors in %s", RegistriesConfFilename))
|
||||
}
|
||||
|
||||
i.MirrorConfig = config
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// From assisted-service pkg/mirrorregistries/generator.go
|
||||
func extractLocationMirrorDataFromRegistries(registriesConfToml []byte) ([]RegistriesConfig, error) {
|
||||
registries := sysregistriesv2.V2RegistriesConf{}
|
||||
err := toml.Unmarshal(registriesConfToml, ®istries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registriesConfList := make([]RegistriesConfig, len(registries.Registries))
|
||||
for i, reg := range registries.Registries {
|
||||
registriesConfList[i] = RegistriesConfig{
|
||||
Location: reg.Location,
|
||||
Mirror: reg.Mirrors[0].Location,
|
||||
}
|
||||
}
|
||||
|
||||
return registriesConfList, nil
|
||||
}
|
||||
184
pkg/asset/agent/mirror/registriesconf_test.go
Normal file
184
pkg/asset/agent/mirror/registriesconf_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/agent"
|
||||
"github.com/openshift/installer/pkg/asset/installconfig"
|
||||
"github.com/openshift/installer/pkg/asset/mock"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
)
|
||||
|
||||
func TestRegistriesConf_Generate(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
dependencies []asset.Asset
|
||||
expectedError string
|
||||
expectedConfig string
|
||||
}{
|
||||
{
|
||||
name: "missing-config",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{},
|
||||
},
|
||||
expectedConfig: defaultRegistriesConf,
|
||||
},
|
||||
{
|
||||
name: "default",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{
|
||||
Supplied: true,
|
||||
InstallConfig: installconfig.InstallConfig{
|
||||
Config: &types.InstallConfig{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "cluster-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedConfig: defaultRegistriesConf,
|
||||
},
|
||||
{
|
||||
name: "image-content-sources",
|
||||
dependencies: []asset.Asset{
|
||||
&agent.OptionalInstallConfig{
|
||||
Supplied: true,
|
||||
InstallConfig: installconfig.InstallConfig{
|
||||
Config: &types.InstallConfig{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "cluster-0",
|
||||
},
|
||||
ImageContentSources: []types.ImageContentSource{
|
||||
{
|
||||
Source: "registry.ci.openshift.org/ocp/release",
|
||||
Mirrors: []string{
|
||||
"virthost.ostest.test.metalkube.org:5000/localimages/local-release-image",
|
||||
},
|
||||
},
|
||||
{
|
||||
Source: "quay.io/openshift-release-dev/ocp-v4.0-art-dev",
|
||||
Mirrors: []string{
|
||||
"virthost.ostest.test.metalkube.org:5000/localimages/local-release-image",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedConfig: `unqualified-search-registries = []
|
||||
|
||||
[[registry]]
|
||||
location = "registry.ci.openshift.org/ocp/release"
|
||||
mirror-by-digest-only = true
|
||||
prefix = ""
|
||||
|
||||
[[registry.mirror]]
|
||||
location = "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image"
|
||||
|
||||
[[registry]]
|
||||
location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
|
||||
mirror-by-digest-only = true
|
||||
prefix = ""
|
||||
|
||||
[[registry.mirror]]
|
||||
location = "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image"
|
||||
`,
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
parents := asset.Parents{}
|
||||
parents.Add(tc.dependencies...)
|
||||
|
||||
asset := &RegistriesConf{}
|
||||
err := asset.Generate(parents)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.EqualError(t, err, tc.expectedError)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
|
||||
files := asset.Files()
|
||||
assert.Len(t, files, 1)
|
||||
assert.Equal(t, tc.expectedConfig, string(files[0].Data))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistries_LoadedFromDisk(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
data string
|
||||
fetchError error
|
||||
expectedFound bool
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "valid-config-file",
|
||||
data: `
|
||||
[[registry]]
|
||||
location = "registry.ci.openshift.org/ocp/release"
|
||||
mirror-by-digest-only = false
|
||||
|
||||
[[registry.mirror]]
|
||||
location = "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image"
|
||||
|
||||
[[registry]]
|
||||
location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
|
||||
mirror-by-digest-only = false
|
||||
|
||||
[[registry.mirror]]
|
||||
location = "virthost.ostest.test.metalkube.org:5000/localimages/local-release-image"`,
|
||||
expectedFound: true,
|
||||
expectedError: "",
|
||||
},
|
||||
{
|
||||
name: "file-not-found",
|
||||
fetchError: &os.PathError{Err: os.ErrNotExist},
|
||||
},
|
||||
{
|
||||
name: "error-fetching-file",
|
||||
fetchError: errors.New("fetch failed"),
|
||||
expectedError: "failed to load mirror/registries.conf file: fetch failed",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
fileFetcher := mock.NewMockFileFetcher(mockCtrl)
|
||||
fileFetcher.EXPECT().FetchByName(RegistriesConfFilename).
|
||||
Return(
|
||||
&asset.File{
|
||||
Filename: RegistriesConfFilename,
|
||||
Data: []byte(tc.data)},
|
||||
tc.fetchError,
|
||||
)
|
||||
|
||||
asset := &RegistriesConf{}
|
||||
found, err := asset.Load(fileFetcher)
|
||||
assert.Equal(t, tc.expectedFound, found, "unexpected found value returned from Load")
|
||||
if tc.expectedError != "" {
|
||||
assert.Equal(t, tc.expectedError, err.Error())
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
20
pkg/asset/filewriter.go
Normal file
20
pkg/asset/filewriter.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package asset
|
||||
|
||||
// FileWriter interface is used to write all the files in the specified location
|
||||
type FileWriter interface {
|
||||
PersistToFile(directory string) error
|
||||
}
|
||||
|
||||
// NewDefaultFileWriter create a new adapter to expose the default implementation as a FileWriter
|
||||
func NewDefaultFileWriter(a WritableAsset) FileWriter {
|
||||
return &fileWriterAdapter{a: a}
|
||||
}
|
||||
|
||||
type fileWriterAdapter struct {
|
||||
a WritableAsset
|
||||
}
|
||||
|
||||
// PersistToFile wraps the default implementation
|
||||
func (fwa *fileWriterAdapter) PersistToFile(directory string) error {
|
||||
return PersistToFile(fwa.a, directory)
|
||||
}
|
||||
@@ -41,10 +41,10 @@ func (a *SingleNodeBootstrapInPlace) Generate(dependencies asset.Parents) error
|
||||
if err := a.generateConfig(dependencies, templateData); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.addStorageFiles("/", "bootstrap/bootstrap-in-place/files", templateData); err != nil {
|
||||
if err := AddStorageFiles(a.Config, "/", "bootstrap/bootstrap-in-place/files", templateData); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.addSystemdUnits("bootstrap/bootstrap-in-place/systemd/units", templateData, bootstrapInPlaceEnabledServices); err != nil {
|
||||
if err := AddSystemdUnits(a.Config, "bootstrap/bootstrap-in-place/systemd/units", templateData, bootstrapInPlaceEnabledServices); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.Common.generateFile(singleNodeBootstrapInPlaceIgnFilename); err != nil {
|
||||
|
||||
@@ -164,10 +164,10 @@ func (a *Common) generateConfig(dependencies asset.Parents, templateData *bootst
|
||||
},
|
||||
}
|
||||
|
||||
if err := a.addStorageFiles("/", "bootstrap/files", templateData); err != nil {
|
||||
if err := AddStorageFiles(a.Config, "/", "bootstrap/files", templateData); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.addSystemdUnits("bootstrap/systemd/units", templateData, commonEnabledServices); err != nil {
|
||||
if err := AddSystemdUnits(a.Config, "bootstrap/systemd/units", templateData, commonEnabledServices); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ func (a *Common) generateConfig(dependencies asset.Parents, templateData *bootst
|
||||
directory, err := data.Assets.Open(platformFilePath)
|
||||
if err == nil {
|
||||
directory.Close()
|
||||
err = a.addStorageFiles("/", platformFilePath, templateData)
|
||||
err = AddStorageFiles(a.Config, "/", platformFilePath, templateData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func (a *Common) generateConfig(dependencies asset.Parents, templateData *bootst
|
||||
directory, err = data.Assets.Open(platformUnitPath)
|
||||
if err == nil {
|
||||
directory.Close()
|
||||
if err = a.addSystemdUnits(platformUnitPath, templateData, commonEnabledServices); err != nil {
|
||||
if err = AddSystemdUnits(a.Config, platformUnitPath, templateData, commonEnabledServices); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -242,7 +242,7 @@ func (a *Common) getTemplateData(dependencies asset.Parents, bootstrapInPlace bo
|
||||
}
|
||||
|
||||
registries := []sysregistriesv2.Registry{}
|
||||
for _, group := range mergedMirrorSets(installConfig.Config.ImageContentSources) {
|
||||
for _, group := range MergedMirrorSets(installConfig.Config.ImageContentSources) {
|
||||
if len(group.Mirrors) == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -304,7 +304,13 @@ func (a *Common) getTemplateData(dependencies asset.Parents, bootstrapInPlace bo
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Common) addStorageFiles(base string, uri string, templateData *bootstrapTemplateData) (err error) {
|
||||
// AddStorageFiles adds files to a Ignition config.
|
||||
// Parameters:
|
||||
// config - the ignition config to be modified
|
||||
// base - path were the files are written to in to config
|
||||
// uri - path under data/data specifying the files to be included
|
||||
// templateData - struct to used to render templates
|
||||
func AddStorageFiles(config *igntypes.Config, base string, uri string, templateData interface{}) (err error) {
|
||||
file, err := data.Assets.Open(uri)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -327,7 +333,7 @@ func (a *Common) addStorageFiles(base string, uri string, templateData *bootstra
|
||||
|
||||
for _, childInfo := range children {
|
||||
name := childInfo.Name()
|
||||
err = a.addStorageFiles(path.Join(base, name), path.Join(uri, name), templateData)
|
||||
err = AddStorageFiles(config, path.Join(base, name), path.Join(uri, name), templateData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -348,7 +354,7 @@ func (a *Common) addStorageFiles(base string, uri string, templateData *bootstra
|
||||
appendToFile := false
|
||||
if parentDir == "bin" || parentDir == "dispatcher.d" {
|
||||
mode = 0555
|
||||
} else if filename == "motd" {
|
||||
} else if filename == "motd" || filename == "containers.conf" {
|
||||
mode = 0644
|
||||
appendToFile = true
|
||||
} else {
|
||||
@@ -360,12 +366,18 @@ func (a *Common) addStorageFiles(base string, uri string, templateData *bootstra
|
||||
}
|
||||
|
||||
// Replace files that already exist in the slice with ones added later, otherwise append them
|
||||
a.Config.Storage.Files = replaceOrAppend(a.Config.Storage.Files, ign)
|
||||
config.Storage.Files = replaceOrAppend(config.Storage.Files, ign)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Common) addSystemdUnits(uri string, templateData *bootstrapTemplateData, enabledServices []string) (err error) {
|
||||
// AddSystemdUnits adds systemd units to a Ignition config.
|
||||
// Parameters:
|
||||
// config - the ignition config to be modified
|
||||
// uri - path under data/data specifying the systemd units files to be included
|
||||
// templateData - struct to used to render templates
|
||||
// enabledServices - a list of systemd units to be enabled by default
|
||||
func AddSystemdUnits(config *igntypes.Config, uri string, templateData interface{}, enabledServices []string) (err error) {
|
||||
enabled := make(map[string]struct{}, len(enabledServices))
|
||||
for _, s := range enabledServices {
|
||||
enabled[s] = struct{}{}
|
||||
@@ -436,7 +448,7 @@ func (a *Common) addSystemdUnits(uri string, templateData *bootstrapTemplateData
|
||||
if _, ok := enabled[name]; ok {
|
||||
unit.Enabled = ignutil.BoolToPtr(true)
|
||||
}
|
||||
a.Config.Systemd.Units = append(a.Config.Systemd.Units, unit)
|
||||
config.Systemd.Units = append(config.Systemd.Units, unit)
|
||||
} else {
|
||||
name, contents, err := readFile(childInfo.Name(), file, templateData)
|
||||
if err != nil {
|
||||
@@ -450,7 +462,7 @@ func (a *Common) addSystemdUnits(uri string, templateData *bootstrapTemplateData
|
||||
if _, ok := enabled[name]; ok {
|
||||
unit.Enabled = ignutil.BoolToPtr(true)
|
||||
}
|
||||
a.Config.Systemd.Units = append(a.Config.Systemd.Units, unit)
|
||||
config.Systemd.Units = append(config.Systemd.Units, unit)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
)
|
||||
|
||||
func mergedMirrorSets(sources []types.ImageContentSource) []types.ImageContentSource {
|
||||
// MergedMirrorSets consolidates a list of ImageContentSources so that each
|
||||
// source appears only once.
|
||||
func MergedMirrorSets(sources []types.ImageContentSource) []types.ImageContentSource {
|
||||
sourceSet := make(map[string][]string)
|
||||
mirrorSet := make(map[string]sets.String)
|
||||
orderedSources := []string{}
|
||||
|
||||
@@ -120,7 +120,7 @@ func TestMergedMirrorSets(t *testing.T) {
|
||||
}}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.expected, mergedMirrorSets(test.input))
|
||||
assert.Equal(t, test.expected, MergedMirrorSets(test.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
46
pkg/asset/kubeconfig/agent.go
Normal file
46
pkg/asset/kubeconfig/agent.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
agentmanifests "github.com/openshift/installer/pkg/asset/agent/manifests"
|
||||
"github.com/openshift/installer/pkg/asset/tls"
|
||||
)
|
||||
|
||||
// AgentAdminClient is the asset for the agent admin kubeconfig.
|
||||
type AgentAdminClient struct {
|
||||
AdminClient
|
||||
}
|
||||
|
||||
// Dependencies returns the dependency of the kubeconfig.
|
||||
func (k *AgentAdminClient) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&tls.AdminKubeConfigClientCertKey{},
|
||||
&tls.KubeAPIServerCompleteCABundle{},
|
||||
&agentmanifests.ClusterDeployment{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate generates the kubeconfig.
|
||||
func (k *AgentAdminClient) Generate(parents asset.Parents) error {
|
||||
ca := &tls.KubeAPIServerCompleteCABundle{}
|
||||
clientCertKey := &tls.AdminKubeConfigClientCertKey{}
|
||||
parents.Get(ca, clientCertKey)
|
||||
|
||||
clusterDeployment := &agentmanifests.ClusterDeployment{}
|
||||
parents.Get(clusterDeployment)
|
||||
|
||||
clusterName := clusterDeployment.Config.Spec.ClusterName
|
||||
extAPIServerURL := fmt.Sprintf("https://api.%s.%s:6443", clusterName, strings.TrimSuffix(clusterDeployment.Config.Spec.BaseDomain, "."))
|
||||
|
||||
return k.kubeconfig.generate(
|
||||
ca,
|
||||
clientCertKey,
|
||||
extAPIServerURL,
|
||||
clusterName,
|
||||
"admin",
|
||||
kubeconfigAdminPath,
|
||||
)
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (atbc *AdditionalTrustBundleConfig) Generate(dependencies asset.Parents) er
|
||||
if installConfig.Config.AdditionalTrustBundle == "" {
|
||||
return nil
|
||||
}
|
||||
data, err := parseCertificates(installConfig.Config.AdditionalTrustBundle)
|
||||
data, err := ParseCertificates(installConfig.Config.AdditionalTrustBundle)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -99,7 +99,8 @@ func (atbc *AdditionalTrustBundleConfig) Load(f asset.FileFetcher) (bool, error)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func parseCertificates(certificates string) (map[string]string, error) {
|
||||
// ParseCertificates parses and verifies a PEM certificate bundle
|
||||
func ParseCertificates(certificates string) (map[string]string, error) {
|
||||
rest := []byte(certificates)
|
||||
var sb strings.Builder
|
||||
for {
|
||||
|
||||
7
pkg/types/agent/OWNERS
Normal file
7
pkg/types/agent/OWNERS
Normal file
@@ -0,0 +1,7 @@
|
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
# This file just uses aliases defined in OWNERS_ALIASES.
|
||||
|
||||
approvers:
|
||||
- agent-approvers
|
||||
reviewers:
|
||||
- agent-reviewers
|
||||
34
pkg/types/agent/agent_config_type.go
Normal file
34
pkg/types/agent/agent_config_type.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
|
||||
"github.com/openshift/installer/pkg/types/baremetal"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// AgentConfigVersion is the version supported by this package.
|
||||
// If you bump this, you must also update the list of convertable values in
|
||||
// pkg/types/conversion/agentconfig.go
|
||||
const AgentConfigVersion = "v1alpha1"
|
||||
|
||||
// Config or aka AgentConfig is the API for specifying additional
|
||||
// configuration for the agent-based installer not covered by
|
||||
// install-config.
|
||||
type Config struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// ip address of node0
|
||||
RendezvousIP string `json:"rendezvousIP,omitempty"`
|
||||
Hosts []Host `json:"hosts,omitempty"`
|
||||
}
|
||||
|
||||
// Host defines per host configurations
|
||||
type Host struct {
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
RootDeviceHints baremetal.RootDeviceHints `json:"rootDeviceHints,omitempty"`
|
||||
// list of interfaces and mac addresses
|
||||
Interfaces []*aiv1beta1.Interface `json:"interfaces,omitempty"`
|
||||
NetworkConfig aiv1beta1.NetConfig `json:"networkConfig,omitempty"`
|
||||
}
|
||||
28
pkg/types/agent/conversion/agentconfig.go
Normal file
28
pkg/types/agent/conversion/agentconfig.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package conversion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
|
||||
"github.com/openshift/installer/pkg/types/agent"
|
||||
)
|
||||
|
||||
// ConvertAgentConfig is modeled after the k8s conversion schemes, which is
|
||||
// how deprecated values are upconverted.
|
||||
// This updates the APIVersion to reflect the fact that we've internally
|
||||
// upconverted.
|
||||
func ConvertAgentConfig(config *agent.Config) error {
|
||||
// check that the version is convertible
|
||||
switch config.APIVersion {
|
||||
case agent.AgentConfigVersion:
|
||||
// works
|
||||
case "":
|
||||
return field.Required(field.NewPath("apiVersion"), "no version was provided")
|
||||
default:
|
||||
return field.Invalid(field.NewPath("apiVersion"), config.APIVersion, fmt.Sprintf("cannot upconvert from version %s", config.APIVersion))
|
||||
}
|
||||
|
||||
config.APIVersion = agent.AgentConfigVersion
|
||||
return nil
|
||||
}
|
||||
59
pkg/types/agent/conversion/agentconfig_test.go
Normal file
59
pkg/types/agent/conversion/agentconfig_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package conversion
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/openshift/installer/pkg/types/agent"
|
||||
)
|
||||
|
||||
func TestConvertAgentConfig(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
config *agent.Config
|
||||
expected *agent.Config
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
config: &agent.Config{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: agent.AgentConfigVersion,
|
||||
},
|
||||
},
|
||||
expected: &agent.Config{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: agent.AgentConfigVersion,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no version",
|
||||
config: &agent.Config{},
|
||||
expectedError: "no version was provided",
|
||||
},
|
||||
{
|
||||
name: "bad version",
|
||||
config: &agent.Config{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1alpha0",
|
||||
},
|
||||
},
|
||||
expectedError: "cannot upconvert from version v1alpha0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := ConvertAgentConfig(tc.config)
|
||||
if tc.expectedError == "" {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expected, tc.config, "unexpected install config")
|
||||
} else {
|
||||
assert.Regexp(t, tc.expectedError, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
3
vendor/github.com/cavaliercoder/go-cpio/.gitignore
generated
vendored
Normal file
3
vendor/github.com/cavaliercoder/go-cpio/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
.fuzz/
|
||||
*.zip
|
||||
|
||||
10
vendor/github.com/cavaliercoder/go-cpio/.travis.yml
generated
vendored
Normal file
10
vendor/github.com/cavaliercoder/go-cpio/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.4
|
||||
- 1.6.4
|
||||
- 1.7.6
|
||||
- 1.8.3
|
||||
|
||||
script: make check
|
||||
26
vendor/github.com/cavaliercoder/go-cpio/LICENSE
generated
vendored
Normal file
26
vendor/github.com/cavaliercoder/go-cpio/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
Copyright (c) 2017 Ryan Armstrong. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
18
vendor/github.com/cavaliercoder/go-cpio/Makefile
generated
vendored
Normal file
18
vendor/github.com/cavaliercoder/go-cpio/Makefile
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
PACKAGE = github.com/cavaliercoder/go-cpio
|
||||
|
||||
all: check
|
||||
|
||||
check:
|
||||
go test -v
|
||||
|
||||
cpio-fuzz.zip: *.go
|
||||
go-fuzz-build $(PACKAGE)
|
||||
|
||||
fuzz: cpio-fuzz.zip
|
||||
go-fuzz -bin=./cpio-fuzz.zip -workdir=.fuzz/
|
||||
|
||||
clean-fuzz:
|
||||
rm -rf cpio-fuzz.zip .fuzz/crashers/* .fuzz/suppressions/*
|
||||
|
||||
|
||||
.PHONY: all check
|
||||
62
vendor/github.com/cavaliercoder/go-cpio/README.md
generated
vendored
Normal file
62
vendor/github.com/cavaliercoder/go-cpio/README.md
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# go-cpio [](https://godoc.org/github.com/cavaliercoder/go-cpio) [](https://travis-ci.org/cavaliercoder/go-cpio) [](https://goreportcard.com/report/github.com/cavaliercoder/go-cpio)
|
||||
|
||||
This package provides a Go native implementation of the CPIO archive file
|
||||
format.
|
||||
|
||||
Currently, only the SVR4 (New ASCII) format is supported, both with and without
|
||||
checksums.
|
||||
|
||||
```go
|
||||
// Create a buffer to write our archive to.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Create a new cpio archive.
|
||||
w := cpio.NewWriter(buf)
|
||||
|
||||
// Add some files to the archive.
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"readme.txt", "This archive contains some text files."},
|
||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
||||
{"todo.txt", "Get animal handling license."},
|
||||
}
|
||||
for _, file := range files {
|
||||
hdr := &cpio.Header{
|
||||
Name: file.Name,
|
||||
Mode: 0600,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
if err := w.WriteHeader(hdr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if _, err := w.Write([]byte(file.Body)); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
// Make sure to check the error on Close.
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Open the cpio archive for reading.
|
||||
b := bytes.NewReader(buf.Bytes())
|
||||
r := cpio.NewReader(b)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := r.Next()
|
||||
if err == io.EOF {
|
||||
// end of cpio archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Printf("Contents of %s:\n", hdr.Name)
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
```
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user