1
0
mirror of https://github.com/openshift/installer.git synced 2026-02-06 00:48:45 +01:00

feat: add arbiter role support to ABI

added support for arbiter installs to ABI flow, we currently do not
support installing TechPreview featureSet with agent based install, this
includes adding that capability for overriding featureSet to be passed
to the assisted service.

Signed-off-by: ehila <ehila@redhat.com>
This commit is contained in:
ehila
2025-07-01 07:36:15 -04:00
parent 72f57f4fd6
commit 75fa733182
17 changed files with 209 additions and 33 deletions

View File

@@ -50,6 +50,7 @@ spec:
status:
agentLabelSelector: {}
bootArtifacts:
discoveryIgnitionURL: ""
initrd: ""
ipxeScript: ""
kernel: ""

View File

@@ -74,6 +74,7 @@ spec:
status:
agentLabelSelector: {}
bootArtifacts:
discoveryIgnitionURL: ""
initrd: ""
ipxeScript: ""
kernel: ""

View File

@@ -142,6 +142,7 @@ spec:
status:
agentLabelSelector: {}
bootArtifacts:
discoveryIgnitionURL: ""
initrd: ""
ipxeScript: ""
kernel: ""

View File

@@ -81,6 +81,7 @@ spec:
status:
agentLabelSelector: {}
bootArtifacts:
discoveryIgnitionURL: ""
initrd: ""
ipxeScript: ""
kernel: ""

View File

@@ -82,6 +82,7 @@ spec:
status:
agentLabelSelector: {}
bootArtifacts:
discoveryIgnitionURL: ""
initrd: ""
ipxeScript: ""
kernel: ""

View File

@@ -18,8 +18,9 @@ done
printf '\nInfra env id is %s\n' "${INFRA_ENV_ID}" 1>&2
total_required_nodes=$(( REQUIRED_MASTER_NODES + REQUIRED_WORKER_NODES ))
total_required_nodes=$(( REQUIRED_MASTER_NODES + REQUIRED_ARBITER_NODES + REQUIRED_WORKER_NODES ))
echo "Number of required master nodes: ${REQUIRED_MASTER_NODES}" 1>&2
echo "Number of required arbiter nodes: ${REQUIRED_ARBITER_NODES}" 1>&2
echo "Number of required worker nodes: ${REQUIRED_WORKER_NODES}" 1>&2
echo "Total number of required nodes: ${total_required_nodes}" 1>&2

View File

@@ -9,9 +9,10 @@ DISK_ENCRYPTION_SUPPORT=true
DUMMY_IGNITION=false
ENABLE_SINGLE_NODE_DNSMASQ=true
EPHEMERAL_INSTALLER_CLUSTER_TLS_CERTS_OVERRIDE_DIR=/opt/agent/tls
HW_VALIDATOR_REQUIREMENTS=[{"version":"default","master":{"cpu_cores":4,"ram_mib":16384,"disk_size_gb":100,"installation_disk_speed_threshold_ms":10,"network_latency_threshold_ms":100,"packet_loss_percentage":0},"worker":{"cpu_cores":2,"ram_mib":8192,"disk_size_gb":100,"installation_disk_speed_threshold_ms":10,"network_latency_threshold_ms":1000,"packet_loss_percentage":10},"sno":{"cpu_cores":8,"ram_mib":16384,"disk_size_gb":100,"installation_disk_speed_threshold_ms":10}}]
HW_VALIDATOR_REQUIREMENTS=[{"version":"default","master":{"cpu_cores":4,"ram_mib":16384,"disk_size_gb":100,"installation_disk_speed_threshold_ms":10,"network_latency_threshold_ms":100,"packet_loss_percentage":0},"arbiter":{"cpu_cores":2,"ram_mib":8192,"disk_size_gb":50,"installation_disk_speed_threshold_ms":10,"network_latency_threshold_ms":1000,"packet_loss_percentage":0},"worker":{"cpu_cores":2,"ram_mib":8192,"disk_size_gb":100,"installation_disk_speed_threshold_ms":10,"network_latency_threshold_ms":1000,"packet_loss_percentage":10},"sno":{"cpu_cores":8,"ram_mib":16384,"disk_size_gb":100,"installation_disk_speed_threshold_ms":10}}]
INSTALL_INVOKER=agent-installer
IPV6_SUPPORT=true
TNA_CLUSTERS_SUPPORT=true
LOG_LEVEL=debug
NTP_DEFAULT_SERVER=
PUBLIC_CONTAINER_REGISTRIES={{.PublicContainerRegistries}}

View File

@@ -1,2 +1,3 @@
REQUIRED_MASTER_NODES={{.ControlPlaneAgents}}
REQUIRED_ARBITER_NODES={{.ArbiterAgents}}
REQUIRED_WORKER_NODES={{.WorkerAgents}}

View File

@@ -26,8 +26,9 @@ var (
)
const (
masterRole string = "master"
workerRole string = "worker"
masterRole string = "master"
workerRole string = "worker"
arbiterRole string = "arbiter"
)
type nmStateInterface struct {
@@ -193,9 +194,9 @@ func (a *AgentHosts) validateHostRootDeviceHints(hostPath *field.Path, host agen
func (a *AgentHosts) validateRoles(hostPath *field.Path, host agent.Host) field.ErrorList {
var allErrs field.ErrorList
if len(host.Role) > 0 && host.Role != masterRole && host.Role != workerRole {
if len(host.Role) > 0 && host.Role != masterRole && host.Role != arbiterRole && host.Role != workerRole {
allErrs = append(allErrs, field.NotSupported(hostPath.Child("role"), host.Role,
[]string{masterRole, workerRole}))
[]string{masterRole, workerRole, arbiterRole}))
}
return allErrs

View File

@@ -171,7 +171,7 @@ func TestAgentHosts_Generate(t *testing.T) {
&workflow.AgentWorkflow{Workflow: workflow.AgentWorkflowTypeInstall},
&joiner.AddNodesConfig{},
getInstallConfigSingleHost(),
getAgentConfigMultiHost(),
getAgentConfigMultiHost("worker"),
},
expectedConfig: agentHosts().hosts(
agentHost().name("test").role("master").interfaces(iface("enp3s1", "28:d2:44:d2:b2:1a")).deviceHint().networkConfig(agentNetworkConfigOne),
@@ -240,7 +240,7 @@ func TestAgentHosts_Generate(t *testing.T) {
getInstallConfigSingleHost(),
getAgentConfigInvalidHostRole(),
},
expectedError: "invalid Hosts configuration: hosts[0].role: Unsupported value: \"invalid-role\": supported values: \"master\", \"worker\"",
expectedError: "invalid Hosts configuration: hosts[0].role: Unsupported value: \"invalid-role\": supported values: \"master\", \"worker\", \"arbiter\"",
expectedConfig: nil,
},
{
@@ -332,6 +332,18 @@ func TestAgentHosts_Generate(t *testing.T) {
agentHost().name("test").role("master").interfaces(iface("enp3s1", "28:d2:44:d2:b2:1a")).deviceHint().networkConfig(agentNetworkConfigEmbeddedRendezvousIPOne),
agentHost().name("test-2").role("worker").interfaces(iface("enp3s1", "28:d2:44:d2:b2:1b")).networkConfig(agentNetworkConfigEmbeddedRendezvousIPTwo)),
},
{
name: "multi-host-from-agent-config-with-arbiter",
dependencies: []asset.Asset{
&workflow.AgentWorkflow{Workflow: workflow.AgentWorkflowTypeInstall},
&joiner.AddNodesConfig{},
getInstallConfigSingleHost(),
getAgentConfigMultiHost("arbiter"),
},
expectedConfig: agentHosts().hosts(
agentHost().name("test").role("master").interfaces(iface("enp3s1", "28:d2:44:d2:b2:1a")).deviceHint().networkConfig(agentNetworkConfigOne),
agentHost().name("test-2").role("arbiter").interfaces(iface("enp3s1", "28:d2:44:d2:b2:1b")).networkConfig(agentNetworkConfigTwo)),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
@@ -439,12 +451,12 @@ func getAgentConfigSingleHost() *AgentConfig {
return a
}
func getAgentConfigMultiHost() *AgentConfig {
func getAgentConfigMultiHost(role string) *AgentConfig {
a := getAgentConfigSingleHost()
a.Config.Hosts[0].NetworkConfig.Raw = []byte(agentNetworkConfigOne)
host := agent.Host{
Hostname: "test-2",
Role: "worker",
Role: role,
Interfaces: []*aiv1beta1.Interface{
{
Name: "enp3s1",
@@ -460,7 +472,7 @@ func getAgentConfigMultiHost() *AgentConfig {
}
func getAgentConfigMultiHostEmbeddedRendezvousIP() *AgentConfig {
a := getAgentConfigMultiHost()
a := getAgentConfigMultiHost("worker")
a.Config.RendezvousIP = "192.168.111.1"
a.Config.Hosts[0].NetworkConfig.Raw = []byte(agentNetworkConfigEmbeddedRendezvousIPOne)
a.Config.Hosts[1].NetworkConfig.Raw = []byte(agentNetworkConfigEmbeddedRendezvousIPTwo)
@@ -536,7 +548,7 @@ func getAgentConfigMissingInterfaces() *AgentConfig {
}
func getAgentConfigInvalidRendezvousIP() *AgentConfig {
a := getAgentConfigMultiHost()
a := getAgentConfigMultiHost("worker")
a.Config.RendezvousIP = "192.168.111.81"
return a
}

View File

@@ -65,6 +65,7 @@ type agentTemplateData struct {
ServiceProtocol string
PullSecret string
ControlPlaneAgents int
ArbiterAgents int
WorkerAgents int
ReleaseImages string
ReleaseImage string
@@ -155,6 +156,7 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err
clusterName := ""
imageTypeISO := "full-iso"
numMasters := 0
numArbiters := 0
numWorkers := 0
enabledServices := getDefaultEnabledServices()
openshiftVersion := ""
@@ -178,6 +180,7 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err
}
// Fetch the required number of master and worker nodes.
numMasters = agentManifests.AgentClusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents
numArbiters = agentManifests.AgentClusterInstall.Spec.ProvisionRequirements.ArbiterAgents
numWorkers = agentManifests.AgentClusterInstall.Spec.ProvisionRequirements.WorkerAgents
// Enable specific install services
enabledServices = append(enabledServices, "start-cluster-installation.service")
@@ -202,6 +205,7 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err
// is supported, so forcing the expected number of masters to zero, and assuming implcitly
// that all the hosts defined are workers.
numMasters = 0
numArbiters = 0
numWorkers = len(addNodesConfig.Config.Hosts)
// Enable add-nodes specific services
@@ -290,7 +294,7 @@ func (a *Ignition) Generate(ctx context.Context, dependencies asset.Parents) err
authConfig.AuthTokenExpiry,
caBundleMount,
len(registriesConfig.MirrorConfig) > 0,
numMasters, numWorkers,
numMasters, numArbiters, numWorkers,
osImage,
infraEnv.Spec.Proxy,
)
@@ -413,13 +417,14 @@ func addBootstrapScripts(config *igntypes.Config, releaseImage string) (err erro
func getTemplateData(name, pullSecret, releaseImageList, releaseImage, releaseImageMirror, publicContainerRegistries,
imageTypeISO, infraEnvID, publicKey, authType, agentAuthToken, userAuthToken, watcherAuthToken, tokenExpiry, caBundleMount string,
haveMirrorConfig bool,
numMasters, numWorkers int,
numMasters, numArbiters, numWorkers int,
osImage *models.OsImage,
proxy *v1beta1.Proxy) *agentTemplateData {
return &agentTemplateData{
ServiceProtocol: "http",
PullSecret: pullSecret,
ControlPlaneAgents: numMasters,
ArbiterAgents: numArbiters,
WorkerAgents: numWorkers,
ReleaseImages: releaseImageList,
ReleaseImage: releaseImage,

View File

@@ -62,6 +62,7 @@ func TestIgnition_getTemplateData(t *testing.T) {
ProvisionRequirements: hiveext.ProvisionRequirements{
ControlPlaneAgents: 3,
WorkerAgents: 5,
ArbiterAgents: 1,
},
},
}
@@ -96,11 +97,12 @@ func TestIgnition_getTemplateData(t *testing.T) {
agentAuthToken := "agentAuthToken"
userAuthToken := "userAuthToken"
watcherAuthToken := "watcherAuthToken"
templateData := getTemplateData(clusterName, pullSecret, releaseImageList, releaseImage, releaseImageMirror, publicContainerRegistries, "minimal-iso", infraEnvID, publicKey, gencrypto.AuthType, agentAuthToken, userAuthToken, watcherAuthToken, "", "", haveMirrorConfig, agentClusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents, agentClusterInstall.Spec.ProvisionRequirements.WorkerAgents, osImage, proxy)
templateData := getTemplateData(clusterName, pullSecret, releaseImageList, releaseImage, releaseImageMirror, publicContainerRegistries, "minimal-iso", infraEnvID, publicKey, gencrypto.AuthType, agentAuthToken, userAuthToken, watcherAuthToken, "", "", haveMirrorConfig, agentClusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents, agentClusterInstall.Spec.ProvisionRequirements.ArbiterAgents, agentClusterInstall.Spec.ProvisionRequirements.WorkerAgents, osImage, proxy)
assert.Equal(t, clusterName, templateData.ClusterName)
assert.Equal(t, "http", templateData.ServiceProtocol)
assert.Equal(t, pullSecret, templateData.PullSecret)
assert.Equal(t, agentClusterInstall.Spec.ProvisionRequirements.ControlPlaneAgents, templateData.ControlPlaneAgents)
assert.Equal(t, agentClusterInstall.Spec.ProvisionRequirements.ArbiterAgents, templateData.ArbiterAgents)
assert.Equal(t, agentClusterInstall.Spec.ProvisionRequirements.WorkerAgents, templateData.WorkerAgents)
assert.Equal(t, releaseImageList, templateData.ReleaseImages)
assert.Equal(t, releaseImage, templateData.ReleaseImage)
@@ -649,6 +651,7 @@ func buildIgnitionAssetDefaultDependencies(t *testing.T) []asset.Asset {
ProvisionRequirements: hiveext.ProvisionRequirements{
ControlPlaneAgents: 3,
WorkerAgents: 5,
ArbiterAgents: 1,
},
},
},

View File

@@ -88,14 +88,10 @@ func (a *OptionalInstallConfig) validateInstallConfig(ctx context.Context, insta
allErrs = append(allErrs, err...)
}
if installConfig.FeatureSet != configv1.Default {
allErrs = append(allErrs, field.NotSupported(field.NewPath("featureSet"), installConfig.FeatureSet, []string{string(configv1.Default)}))
}
warnUnusedConfig(installConfig)
numMasters, numWorkers := GetReplicaCount(installConfig)
logrus.Infof(fmt.Sprintf("Configuration has %d master replicas and %d worker replicas", numMasters, numWorkers))
numMasters, numArbiters, numWorkers := GetReplicaCount(installConfig)
logrus.Infof("Configuration has %d master replicas, %d arbiter replicas, and %d worker replicas", numMasters, numArbiters, numWorkers)
if err := a.validateControlPlaneConfiguration(installConfig); err != nil {
allErrs = append(allErrs, err...)
@@ -258,6 +254,10 @@ func (a *OptionalInstallConfig) validateSNOConfiguration(installConfig *types.In
fieldPath = field.NewPath("compute", "replicas")
allErrs = append(allErrs, field.Forbidden(fieldPath, fmt.Sprintf("Total number of compute replicas must be 0 when controlPlane.replicas is 1 for platform %s or %s. Found %v", none.Name, external.Name, workers)))
}
if installConfig.Arbiter != nil && installConfig.Arbiter.Replicas != nil && *installConfig.Arbiter.Replicas > 0 {
fieldPath = field.NewPath("arbiter", "replicas")
allErrs = append(allErrs, field.Forbidden(fieldPath, fmt.Sprintf("Total number of arbiter replicas must be 0 when controlPlane.replicas is 1 for Single Node Openshift (SNO) cluster. Found %d", *installConfig.Arbiter.Replicas)))
}
}
return allErrs
}
@@ -518,12 +518,16 @@ func warnUnusedConfig(installConfig *types.InstallConfig) {
}
}
// GetReplicaCount gets the configured master and worker replicas.
func GetReplicaCount(installConfig *types.InstallConfig) (numMasters, numWorkers int64) {
// GetReplicaCount gets the configured master, arbiter and worker replicas.
func GetReplicaCount(installConfig *types.InstallConfig) (numMasters, numArbiters, numWorkers int64) {
numRequiredMasters := int64(0)
if installConfig.ControlPlane != nil && installConfig.ControlPlane.Replicas != nil {
numRequiredMasters += *installConfig.ControlPlane.Replicas
}
numRequiredArbiters := int64(0)
if installConfig.Arbiter != nil && installConfig.Arbiter.Replicas != nil {
numRequiredArbiters += *installConfig.Arbiter.Replicas
}
numRequiredWorkers := int64(0)
for _, worker := range installConfig.Compute {
@@ -532,5 +536,5 @@ func GetReplicaCount(installConfig *types.InstallConfig) (numMasters, numWorkers
}
}
return numRequiredMasters, numRequiredWorkers
return numRequiredMasters, numRequiredArbiters, numRequiredWorkers
}

View File

@@ -18,6 +18,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"sigs.k8s.io/yaml"
configv1 "github.com/openshift/api/config/v1"
operv1 "github.com/openshift/api/operator/v1"
hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1"
aiv1beta1 "github.com/openshift/assisted-service/api/v1beta1"
@@ -122,6 +123,10 @@ type agentClusterInstallInstallConfigOverrides struct {
CPUPartitioning types.CPUPartitioningMode `json:"cpuPartitioningMode,omitempty"`
// Allow override of AdditionalTrustBundlePolicy
AdditionalTrustBundlePolicy types.PolicyType `json:"additionalTrustBundlePolicy,omitempty"`
// Allow override of FeatureSet
FeatureSet configv1.FeatureSet `json:"featureSet,omitempty"`
// Allow override of FeatureGates
FeatureGates []string `json:"featureGates,omitempty"`
}
var _ asset.WritableAsset = (*AgentClusterInstall)(nil)
@@ -167,6 +172,11 @@ func (a *AgentClusterInstall) Generate(_ context.Context, dependencies asset.Par
numberOfWorkers = numberOfWorkers + int(*compute.Replicas)
}
numberOfArbiters := 0
if installConfig.Config.IsArbiterEnabled() {
numberOfArbiters = int(*installConfig.Config.Arbiter.Replicas)
}
clusterNetwork := []hiveext.ClusterNetworkEntry{}
for _, cn := range installConfig.Config.Networking.ClusterNetwork {
entry := hiveext.ClusterNetworkEntry{
@@ -213,6 +223,7 @@ func (a *AgentClusterInstall) Generate(_ context.Context, dependencies asset.Par
SSHPublicKey: strings.Trim(installConfig.Config.SSHKey, "|\n\t"),
ProvisionRequirements: hiveext.ProvisionRequirements{
ControlPlaneAgents: int(*installConfig.Config.ControlPlane.Replicas),
ArbiterAgents: numberOfArbiters,
WorkerAgents: numberOfWorkers,
},
PlatformType: agent.HivePlatformType(installConfig.Config.Platform),
@@ -237,6 +248,16 @@ func (a *AgentClusterInstall) Generate(_ context.Context, dependencies asset.Par
icOverrides.FIPS = installConfig.Config.FIPS
}
if len(installConfig.Config.FeatureSet) > 0 {
icOverridden = true
icOverrides.FeatureSet = installConfig.Config.FeatureSet
}
if len(installConfig.Config.FeatureGates) > 0 {
icOverridden = true
icOverrides.FeatureGates = installConfig.Config.FeatureGates
}
if installConfig.Config.Proxy != nil {
rendezvousIP := ""
if agentConfig.Config != nil {

View File

@@ -150,6 +150,11 @@ func TestAgentClusterInstall_Generate(t *testing.T) {
installConfigOverrides: `{"additionalTrustBundlePolicy":"Always"}`,
})
installConfigWithArbiter := getValidOptionalInstallConfigArbiter()
goodArbiterACI := getGoodACI()
goodArbiterACI.Spec.ProvisionRequirements.ArbiterAgents = 1
goodArbiterACI.Spec.ProvisionRequirements.WorkerAgents = 0
cases := []struct {
name string
dependencies []asset.Asset
@@ -306,6 +311,16 @@ func TestAgentClusterInstall_Generate(t *testing.T) {
},
expectedConfig: goodTrustBundlePolicyACI,
},
{
name: "valid configuration with ArbiterAgents",
dependencies: []asset.Asset{
&workflow.AgentWorkflow{Workflow: workflow.AgentWorkflowTypeInstall},
installConfigWithArbiter,
&agentconfig.AgentHosts{},
&agentconfig.AgentConfig{},
},
expectedConfig: goodArbiterACI,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
@@ -422,6 +437,86 @@ spec:
},
expectedError: "",
},
{
name: "valid-config-file-with-arbiter",
data: `
metadata:
name: test-agent-cluster-install
namespace: cluster0
spec:
apiVIP: 192.168.111.5
ingressVIP: 192.168.111.4
diskEncryption:
enableOn: workers
mode: tpmv2
platformType: BareMetal
clusterDeploymentRef:
name: ostest
imageSetRef:
name: openshift-v4.10.0
networking:
machineNetwork:
- cidr: 10.10.11.0/24
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
serviceNetwork:
- 172.30.0.0/16
networkType: OVNKubernetes
provisionRequirements:
controlPlaneAgents: 3
workerAgents: 2
arbiterAgents: 1
sshPublicKey: |
ssh-rsa AAAAmyKey`,
expectedFound: true,
expectedConfig: &hiveext.AgentClusterInstall{
ObjectMeta: metav1.ObjectMeta{
Name: "test-agent-cluster-install",
Namespace: "cluster0",
},
Spec: hiveext.AgentClusterInstallSpec{
APIVIP: "192.168.111.5",
IngressVIP: "192.168.111.4",
DiskEncryption: &hiveext.DiskEncryption{
EnableOn: swag.String("workers"),
Mode: swag.String("tpmv2"),
},
PlatformType: hiveext.BareMetalPlatformType,
ClusterDeploymentRef: corev1.LocalObjectReference{
Name: "ostest",
},
ImageSetRef: &hivev1.ClusterImageSetReference{
Name: "openshift-v4.10.0",
},
Networking: hiveext.Networking{
MachineNetwork: []hiveext.MachineNetworkEntry{
{
CIDR: "10.10.11.0/24",
},
},
ClusterNetwork: []hiveext.ClusterNetworkEntry{
{
CIDR: "10.128.0.0/14",
HostPrefix: 23,
},
},
ServiceNetwork: []string{
"172.30.0.0/16",
},
NetworkType: "OVNKubernetes",
UserManagedNetworking: swag.Bool(false),
},
ProvisionRequirements: hiveext.ProvisionRequirements{
ControlPlaneAgents: 3,
WorkerAgents: 2,
ArbiterAgents: 1,
},
SSHPublicKey: "ssh-rsa AAAAmyKey",
},
},
expectedError: "",
},
{
name: "valid-config-file-external-oci-platform",
data: `

View File

@@ -259,7 +259,7 @@ func GetNodeZeroIP(hosts []agenttype.Host, nmStateConfigs []*aiv1beta1.NMStateCo
// Select first the configs from the hosts, if defined
// Skip worker hosts (or without an explicit role assigned)
for _, host := range hosts {
if host.Role != "master" {
if host.Role != "master" && host.Role != "arbiter" {
continue
}
rawConfigs = append(rawConfigs, host.NetworkConfig.Raw)
@@ -361,10 +361,11 @@ func buildMacInterfaceMap(nmStateConfig aiv1beta1.NMStateConfig) models.MacInter
}
func validateHostCount(installConfig *types.InstallConfig, agentHosts *agentconfig.AgentHosts) error {
numRequiredMasters, numRequiredWorkers := agent.GetReplicaCount(installConfig)
numRequiredMasters, numRequiredArbiters, numRequiredWorkers := agent.GetReplicaCount(installConfig)
numMasters := int64(0)
numWorkers := int64(0)
numArbiters := int64(0)
// Check for hosts explicitly defined
for _, host := range agentHosts.Hosts {
switch host.Role {
@@ -372,6 +373,8 @@ func validateHostCount(installConfig *types.InstallConfig, agentHosts *agentconf
numMasters++
case "worker":
numWorkers++
case "arbiter":
numArbiters++
}
}
@@ -380,6 +383,8 @@ func validateHostCount(installConfig *types.InstallConfig, agentHosts *agentconf
if host.Role == "" {
if numMasters < numRequiredMasters {
numMasters++
} else if numArbiters < numRequiredArbiters {
numArbiters++
} else {
numWorkers++
}
@@ -393,6 +398,13 @@ func validateHostCount(installConfig *types.InstallConfig, agentHosts *agentconf
return fmt.Errorf("the number of master hosts defined (%v) exceeds the configured ControlPlane replicas (%v)", numMasters, numRequiredMasters)
}
if numArbiters != 0 && numArbiters < numRequiredArbiters {
logrus.Warnf("not enough arbiter hosts defined (%v) to support all the configured Arbiter replicas (%v)", numArbiters, numRequiredArbiters)
}
if numArbiters > numRequiredArbiters {
return fmt.Errorf("the number of arbiter hosts defined (%v) exceeds the configured Arbiter replicas (%v)", numArbiters, numRequiredArbiters)
}
if numWorkers != 0 && numWorkers < numRequiredWorkers {
logrus.Warnf("not enough worker hosts defined (%v) to support all the configured Compute replicas (%v)", numWorkers, numRequiredWorkers)
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/go-openapi/swag"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"k8s.io/utils/ptr"
"sigs.k8s.io/yaml"
hiveext "github.com/openshift/assisted-service/api/hiveextension/v1beta1"
@@ -84,17 +84,17 @@ func getValidOptionalInstallConfig() *agent.OptionalInstallConfig {
SSHKey: testSSHKey,
ControlPlane: &types.MachinePool{
Name: "master",
Replicas: pointer.Int64Ptr(3),
Replicas: ptr.To(int64(3)),
Platform: types.MachinePoolPlatform{},
},
Compute: []types.MachinePool{
{
Name: "worker-machine-pool-1",
Replicas: pointer.Int64Ptr(2),
Replicas: ptr.To(int64(2)),
},
{
Name: "worker-machine-pool-2",
Replicas: pointer.Int64Ptr(3),
Replicas: ptr.To(int64(3)),
},
},
Networking: &types.Networking{
@@ -145,17 +145,17 @@ func getValidOptionalInstallConfigDualStack() *agent.OptionalInstallConfig {
SSHKey: testSSHKey,
ControlPlane: &types.MachinePool{
Name: "master",
Replicas: pointer.Int64Ptr(3),
Replicas: ptr.To(int64(3)),
Platform: types.MachinePoolPlatform{},
},
Compute: []types.MachinePool{
{
Name: "worker-machine-pool-1",
Replicas: pointer.Int64Ptr(2),
Replicas: ptr.To(int64(2)),
},
{
Name: "worker-machine-pool-2",
Replicas: pointer.Int64Ptr(3),
Replicas: ptr.To(int64(3)),
},
},
Networking: &types.Networking{
@@ -200,6 +200,21 @@ func getValidOptionalInstallConfigDualStackDualVIPs() *agent.OptionalInstallConf
return installConfig
}
func getValidOptionalInstallConfigArbiter() *agent.OptionalInstallConfig {
installConfig := getValidOptionalInstallConfig()
installConfig.Config.Compute = []types.MachinePool{
{
Name: "workers",
Replicas: ptr.To(int64(0)),
},
}
installConfig.Config.Arbiter = &types.MachinePool{
Name: "arbiter",
Replicas: ptr.To(int64(1)),
}
return installConfig
}
// getProxyValidOptionalInstallConfig returns a valid optional install config for proxied installation
func getProxyValidOptionalInstallConfig() *agent.OptionalInstallConfig {
validIC := getValidOptionalInstallConfig()