mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-05 21:46:22 +01:00
TELCODOCS-2484: Replacing SiteConfig with ClusterInstance
This commit is contained in:
committed by
openshift-cherrypick-robot
parent
c853ae651a
commit
f4cbb21feb
@@ -3462,7 +3462,7 @@ Topics:
|
||||
File: ztp-preparing-the-hub-cluster
|
||||
- Name: Updating GitOps ZTP
|
||||
File: ztp-updating-gitops
|
||||
- Name: Installing managed clusters with RHACM and SiteConfig resources
|
||||
- Name: Installing managed clusters with RHACM and ClusterInstance resources
|
||||
File: ztp-deploying-far-edge-sites
|
||||
- Name: Manually installing a single-node OpenShift cluster with GitOps ZTP
|
||||
File: ztp-manual-install
|
||||
@@ -3472,7 +3472,7 @@ Topics:
|
||||
File: ztp-reference-cluster-configuration-for-vdu
|
||||
- Name: Validating cluster tuning for vDU application workloads
|
||||
File: ztp-vdu-validating-cluster-tuning
|
||||
- Name: Advanced managed cluster configuration with SiteConfig resources
|
||||
- Name: Advanced managed cluster configuration with ClusterInstance resources
|
||||
File: ztp-advanced-install-ztp
|
||||
- Name: Managing cluster policies with PolicyGenerator resources
|
||||
Dir: policygenerator_for_ztp
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="ztp-advanced-install-ztp"]
|
||||
= Advanced managed cluster configuration with SiteConfig resources
|
||||
= Advanced managed cluster configuration with ClusterInstance resources
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: ztp-advanced-install-ztp
|
||||
|
||||
toc::[]
|
||||
|
||||
You can use `SiteConfig` custom resources (CRs) to deploy custom functionality and configurations in your managed clusters at installation time.
|
||||
|
||||
include::snippets/siteconfig-deprecation-notice.adoc[]
|
||||
You can use `ClusterInstance` custom resources (CRs) to deploy custom functionality and configurations in your managed clusters at installation time.
|
||||
|
||||
include::modules/ztp-customizing-the-install-extra-manifests.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ztp-filtering-ai-crs-using-siteconfig.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ztp-deleting-node-using-siteconfig.adoc[leveloffset=+1]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
:_mod-docs-content-type: ASSEMBLY
|
||||
[id="ztp-deploying-far-edge-sites"]
|
||||
= Installing managed clusters with {rh-rhacm} and SiteConfig resources
|
||||
= Installing managed clusters with {rh-rhacm} and ClusterInstance resources
|
||||
include::_attributes/common-attributes.adoc[]
|
||||
:context: ztp-deploying-far-edge-sites
|
||||
|
||||
@@ -35,9 +35,7 @@ include::modules/ztp-deploying-a-site.adoc[leveloffset=+1]
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-sno-siteconfig-config-reference_ztp-deploying-far-edge-sites[{sno-caps} SiteConfig CR installation reference]
|
||||
|
||||
include::modules/ztp-sno-accelerated-ztp.adoc[leveloffset=+2]
|
||||
* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-clusterinstance-config-reference_ztp-deploying-far-edge-sites[{sno-caps} ClusterInstance CR installation reference]
|
||||
|
||||
include::modules/ztp-configuring-ipsec-using-ztp-and-siteconfig.adoc[leveloffset=+2]
|
||||
|
||||
@@ -50,7 +48,7 @@ include::modules/ztp-configuring-ipsec-using-ztp-and-siteconfig.adoc[leveloffset
|
||||
|
||||
* xref:../networking/network_security/configuring-ipsec-ovn.adoc#nw-ovn-ipsec-encryption_configuring-ipsec-ovn[Encryption protocol and IPsec mode]
|
||||
|
||||
* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-deploying-far-edge-sites[Installing managed clusters with {rh-rhacm} and SiteConfig resources]
|
||||
* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-deploying-far-edge-sites[Installing managed clusters with {rh-rhacm} and ClusterInstance resources]
|
||||
|
||||
include::modules/ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno.adoc[leveloffset=+2]
|
||||
|
||||
@@ -63,17 +61,15 @@ include::modules/ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno.adoc[lev
|
||||
|
||||
* xref:../networking/network_security/configuring-ipsec-ovn.adoc#nw-ovn-ipsec-encryption_configuring-ipsec-ovn[Encryption protocol and IPsec mode]
|
||||
|
||||
* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-deploying-far-edge-sites[Installing managed clusters with {rh-rhacm} and SiteConfig resources]
|
||||
* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-deploying-far-edge-sites[Installing managed clusters with {rh-rhacm} and ClusterInstance resources]
|
||||
|
||||
include::modules/ztp-verifying-ipsec.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ztp-sno-siteconfig-config-reference.adoc[leveloffset=+2]
|
||||
include::modules/ztp-clusterinstance-config-reference.adoc[leveloffset=+2]
|
||||
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* xref:../edge_computing/ztp-reference-cluster-configuration-for-vdu.adoc#ztp-sno-du-disk-encryption_sno-configure-for-vdu[About disk encryption with TPM and PCR protection].
|
||||
|
||||
* xref:../edge_computing/ztp-advanced-install-ztp.adoc#ztp-customizing-the-install-extra-manifests_ztp-advanced-install-ztp[Customizing extra installation manifests in the {ztp} pipeline]
|
||||
|
||||
* xref:../edge_computing/ztp-preparing-the-hub-cluster.adoc#ztp-preparing-the-ztp-git-repository_ztp-preparing-the-hub-cluster[Preparing the {ztp} site configuration repository]
|
||||
@@ -112,7 +108,9 @@ include::modules/ztp-site-cleanup.adoc[leveloffset=+1]
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* For information about removing a cluster, see link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.9/html/clusters/cluster_mce_overview#remove-managed-cluster[Removing a cluster from management].
|
||||
* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.9/html/clusters/cluster_mce_overview#remove-managed-cluster[Removing a cluster from management].
|
||||
|
||||
* link:https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes/2.15/html/multicluster_engine_operator_with_red_hat_advanced_cluster_management/ibio-intro#deprovision-clusters[Deprovisioning clusters]
|
||||
|
||||
include::modules/ztp-removing-obsolete-content.adoc[leveloffset=+1]
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ You can deploy a managed {sno} cluster by using {rh-rhacm-first} and the assiste
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
If you are creating multiple managed clusters, use the `SiteConfig` method described in xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-deploying-far-edge-sites[Deploying far edge sites with ZTP].
|
||||
If you are creating multiple managed clusters, use the `ClusterInstance` method described in xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-deploying-far-edge-sites[Deploying far edge sites with ZTP].
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
@@ -22,17 +22,6 @@ The target bare-metal host must meet the networking, firmware, and hardware requ
|
||||
|
||||
include::modules/ztp-generating-install-and-config-crs-manually.adoc[leveloffset=+1]
|
||||
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* xref:../edge_computing/ztp-reference-cluster-configuration-for-vdu.adoc#ztp-sno-du-enabling-workload-partitioning_sno-configure-for-vdu[Workload partitioning]
|
||||
|
||||
* xref:../installing/installing_bare_metal/ipi/ipi-install-installation-workflow.adoc#bmc-addressing_ipi-install-installation-workflow[BMC addressing]
|
||||
|
||||
* xref:../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#root-device-hints_preparing-to-install-with-agent-based-installer[About root device hints]
|
||||
|
||||
* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-sno-siteconfig-config-reference_ztp-deploying-far-edge-sites[{sno-caps} SiteConfig CR installation reference]
|
||||
|
||||
include::modules/ztp-creating-the-site-secrets.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ztp-configuring-kernel-arguments-for-discovery-iso-in-manual-installations.adoc[leveloffset=+1]
|
||||
@@ -42,12 +31,20 @@ include::modules/ztp-manually-install-a-single-managed-cluster.adoc[leveloffset=
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* xref:../installing/installing_bare_metal/ipi/ipi-install-installation-workflow.adoc#bmc-addressing_ipi-install-installation-workflow[BMC addressing]
|
||||
|
||||
* xref:../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#root-device-hints_preparing-to-install-with-agent-based-installer[About root device hints]
|
||||
|
||||
* xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-clusterinstance-config-reference_ztp-deploying-far-edge-sites[{sno-caps} ClusterInstance CR installation reference]
|
||||
|
||||
* xref:../edge_computing/ztp-reference-cluster-configuration-for-vdu.adoc#ztp-managed-cluster-network-prereqs_sno-configure-for-vdu[Connectivity prerequisites for managed cluster networks]
|
||||
|
||||
* xref:../storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-preface-sno-ran_logical-volume-manager-storage[Deploying {lvms} on {sno} clusters]
|
||||
|
||||
* xref:../edge_computing/policygenerator_for_ztp/ztp-advanced-policygenerator-config.adoc#ztp-provisioning-lvm-storage_ztp-advanced-policy-config[Configuring {lvms} using {policy-gen-cr} CRs]
|
||||
|
||||
* xref:../edge_computing/policygenerator_for_ztp/ztp-configuring-managed-clusters-policygenerator.adoc#ztp-configuring-managed-clusters-policygenerator[Configuring managed cluster policies by using PolicyGenerator resources]
|
||||
|
||||
include::modules/ztp-checking-the-managed-cluster-status.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ztp-troubleshooting-the-managed-cluster.adoc[leveloffset=+1]
|
||||
|
||||
@@ -21,7 +21,7 @@ include::modules/ztp-creating-argocd-clusterinstance.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ztp-active-ocp-version.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ztp-migrating-sno-clusterinstnce.adoc[leveloffset=+1]
|
||||
include::modules/ztp-migrating-sno-clusterinstance.adoc[leveloffset=+1]
|
||||
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
@@ -28,17 +28,8 @@ include::modules/ztp-enabling-workload-partitioning-sno.adoc[leveloffset=+1]
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* For the recommended {sno} workload partitioning configuration, see xref:../edge_computing/ztp-reference-cluster-configuration-for-vdu.adoc#ztp-sno-du-enabling-workload-partitioning_sno-configure-for-vdu[Workload partitioning].
|
||||
|
||||
include::modules/ztp-sno-du-disk-encryption.adoc[leveloffset=+1]
|
||||
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* xref:../security/network_bound_disk_encryption/nbde-about-disk-encryption-technology.adoc#nbde-tpm-encryption_nbde-implementation[TPM encryption]
|
||||
|
||||
* For information about enabling disk encryption, see xref:../edge_computing/ztp-reference-cluster-configuration-for-vdu.adoc#ztp-sno-du-configuring-disk-encryption-with-pcr-protection_sno-configure-for-vdu[Enabling disk encryption with TPM and PCR protection].
|
||||
|
||||
[id="ztp-sno-install-time-cluster-config"]
|
||||
== Recommended cluster install manifests
|
||||
|
||||
@@ -46,12 +37,10 @@ The ZTP pipeline applies the following custom resources (CRs) during cluster ins
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
When using the {ztp} plugin and `SiteConfig` CRs for cluster deployment, the following `MachineConfig` CRs are included by default.
|
||||
When using the {ztp} plugin and `ClusterInstance` CRs for cluster deployment, the following `MachineConfig` CRs are included by default.
|
||||
====
|
||||
|
||||
Use the `SiteConfig` `extraManifests` filter to alter the CRs that are included by default. For more information, see xref:../edge_computing/ztp-advanced-install-ztp.adoc#ztp-advanced-install-ztp[Advanced managed cluster configuration with SiteConfig CRs].
|
||||
|
||||
include::modules/ztp-sno-du-enabling-workload-partitioning.adoc[leveloffset=+2]
|
||||
Use the `ClusterInstance` `extraManifestRefs` to alter the CRs that are included by default. For more information, see xref:../edge_computing/ztp-advanced-install-ztp.adoc#ztp-advanced-install-ztp[Advanced managed cluster configuration with ClusterInstance CRs].
|
||||
|
||||
include::modules/ztp-sno-du-configuring-the-container-mountspace.adoc[leveloffset=+2]
|
||||
|
||||
@@ -65,13 +54,9 @@ include::modules/ztp-sno-du-disabling-crio-wipe.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ztp-sno-du-configuring-crun-container-runtime.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ztp-sno-du-configuring-disk-encryption-with-pcr-protection.adoc[leveloffset=+2]
|
||||
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* xref:../edge_computing/ztp-reference-cluster-configuration-for-vdu.adoc#ztp-sno-du-disk-encryption_sno-configure-for-vdu[About disk encryption with TPM and PCR protection]
|
||||
|
||||
[id="ztp-sno-post-install-time-cluster-config"]
|
||||
== Recommended postinstallation cluster configurations
|
||||
|
||||
@@ -79,7 +64,7 @@ When the cluster installation is complete, the ZTP pipeline applies the followin
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
In {ztp} v4.10 and earlier, you configure UEFI secure boot with a `MachineConfig` CR. This is no longer required in {ztp} v4.11 and later. In v4.11, you configure UEFI secure boot for {sno} clusters by updating the `spec.clusters.nodes.bootMode` field in the `SiteConfig` CR that you use to install the cluster. For more information, see xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-deploying-a-site_ztp-deploying-far-edge-sites[Deploying a managed cluster with SiteConfig and {ztp}].
|
||||
In {ztp} v4.10 and earlier, you configure UEFI secure boot with a `MachineConfig` CR. This is no longer required in {ztp} v4.11 and later. In v4.11, you configure UEFI secure boot for {sno} clusters by updating the `spec.nodes[].bootMode` field in the `ClusterInstance` CR that you use to install the cluster. For more information, see xref:../edge_computing/ztp-deploying-far-edge-sites.adoc#ztp-deploying-a-site_ztp-deploying-far-edge-sites[Deploying a managed cluster with ClusterInstance and {ztp}].
|
||||
====
|
||||
|
||||
include::modules/ztp-sno-du-configuring-the-operators.adoc[leveloffset=+2]
|
||||
|
||||
@@ -38,29 +38,29 @@ $ oc delete managedcluster sno-worker-example
|
||||
+
|
||||
... Wait until the managed cluster is removed. After the cluster is removed, create the proper `SeedGenerator` CR. The {lcao} cleans up the {rh-rhacm} artifacts.
|
||||
+
|
||||
.. If you are using {ztp}, detach your cluster by removing the seed cluster's `SiteConfig` CR from the `kustomization.yaml`.
|
||||
.. If you are using {ztp}, detach your cluster by removing the seed cluster's `ClusterInstance` CR from the `kustomization.yaml`.
|
||||
+
|
||||
... If you have a `kustomization.yaml` file that references multiple `SiteConfig` CRs, remove your seed cluster's `SiteConfig` CR from the `kustomization.yaml`:
|
||||
... If you have a `kustomization.yaml` file that references multiple `ClusterInstance` CRs, remove your seed cluster's `ClusterInstance` CR from the `kustomization.yaml`:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
generators:
|
||||
#- example-seed-sno1.yaml
|
||||
- example-target-sno2.yaml
|
||||
- example-target-sno3.yaml
|
||||
resources:
|
||||
#- clusterinstance-seed-sno1.yaml
|
||||
- clusterinstance-target-sno2.yaml
|
||||
- clusterinstance-target-sno3.yaml
|
||||
----
|
||||
+
|
||||
... If you have a `kustomization.yaml` that references one `SiteConfig` CR, remove your seed cluster's `SiteConfig` CR from the `kustomization.yaml` and add the `generators: {}` line:
|
||||
... If you have a `kustomization.yaml` that references one `ClusterInstance` CR, remove your seed cluster's `ClusterInstance` CR from the `kustomization.yaml` and add the `resources: []` line:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
generators: {}
|
||||
resources: []
|
||||
----
|
||||
+
|
||||
... Commit the `kustomization.yaml` changes in your Git repository and push the changes to your repository.
|
||||
|
||||
@@ -425,7 +425,7 @@ Resolution:: Create and apply a new `ClusterGroupUpdate` CR with the same specif
|
||||
|
||||
Issue:: If there are no policies for the managed cluster when the cluster becomes `Ready`, a `ClusterGroupUpgrade` CR with no policies is auto-created.
|
||||
Upon completion of the `ClusterGroupUpgrade` CR, the managed cluster is labeled as `ztp-done`.
|
||||
If the `PolicyGenerator` or `PolicyGenTemplate` CRs were not pushed to the Git repository within the required time after `SiteConfig` resources were pushed, this might result in no policies being available for the target cluster when the cluster became `Ready`.
|
||||
If the `PolicyGenerator` or `PolicyGenTemplate` CRs were not pushed to the Git repository within the required time after `ClusterInstance` resources were pushed, this might result in no policies being available for the target cluster when the cluster became `Ready`.
|
||||
|
||||
Resolution:: Verify that the policies you want to apply are available on the hub cluster, then create a `ClusterGroupUpgrade` CR with the required policies.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
Long download times are unavoidable during initial deployment. Over time, there is a risk that CRI-O will erase the `/var/lib/containers/storage` directory in the case of an unexpected shutdown.
|
||||
To address long image download times, you can create a local image registry on remote managed clusters using {ztp-first}. This is useful in Edge computing scenarios where clusters are deployed at the far edge of the network.
|
||||
|
||||
Before you can set up the local image registry with {ztp}, you need to configure disk partitioning in the `SiteConfig` CR that you use to install the remote managed cluster. After installation, you configure the local image registry using a `{policy-gen-cr}` CR. Then, the {ztp} pipeline creates Persistent Volume (PV) and Persistent Volume Claim (PVC) CRs and patches the `imageregistry` configuration.
|
||||
Before you can set up the local image registry with {ztp}, you need to configure disk partitioning in the `ClusterInstance` CR that you use to install the remote managed cluster. After installation, you configure the local image registry using a `{policy-gen-cr}` CR. Then, the {ztp} pipeline creates Persistent Volume (PV) and Persistent Volume Claim (PVC) CRs and patches the `imageregistry` configuration.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
||||
@@ -10,7 +10,7 @@ You can add an additional worker node to existing {sno} clusters to increase ava
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* Install and configure {rh-rhacm} 2.6 or later in an {product-title} 4.11 or later bare-metal hub cluster
|
||||
* Install and configure {rh-rhacm} 2.12 or later in an {product-title} 4.11 or later bare-metal hub cluster
|
||||
* Install {cgu-operator-full} in the hub cluster
|
||||
* Install {gitops-title} in the hub cluster
|
||||
* Use the {ztp} `ztp-site-generate` container image version 4.12 or later
|
||||
@@ -20,50 +20,60 @@ You can add an additional worker node to existing {sno} clusters to increase ava
|
||||
|
||||
.Procedure
|
||||
|
||||
. If you deployed your cluster by using the `example-sno.yaml` `SiteConfig` manifest, add your new worker node to the `spec.clusters['example-sno'].nodes` list:
|
||||
. If you deployed your cluster by using the `example-sno.yaml` `ClusterInstance` CR, add your new worker node to the `spec.nodes` list:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
nodes:
|
||||
- hostName: "example-node2.example.com"
|
||||
role: "worker"
|
||||
bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1"
|
||||
bmcCredentialsName:
|
||||
name: "example-node2-bmh-secret"
|
||||
bootMACAddress: "AA:BB:CC:DD:EE:11"
|
||||
bootMode: "UEFI"
|
||||
nodeNetwork:
|
||||
interfaces:
|
||||
- name: eno1
|
||||
macAddress: "AA:BB:CC:DD:EE:11"
|
||||
config:
|
||||
interfaces:
|
||||
- name: eno1
|
||||
type: ethernet
|
||||
state: up
|
||||
macAddress: "AA:BB:CC:DD:EE:11"
|
||||
ipv4:
|
||||
enabled: false
|
||||
ipv6:
|
||||
enabled: true
|
||||
address:
|
||||
- ip: 1111:2222:3333:4444::1
|
||||
prefix-length: 64
|
||||
dns-resolver:
|
||||
config:
|
||||
search:
|
||||
- example.com
|
||||
server:
|
||||
- 1111:2222:3333:4444::2
|
||||
routes:
|
||||
config:
|
||||
- destination: ::/0
|
||||
next-hop-interface: eno1
|
||||
next-hop-address: 1111:2222:3333:4444::1
|
||||
table-id: 254
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-sno"
|
||||
namespace: "example-sno"
|
||||
spec:
|
||||
# ... existing cluster configuration ...
|
||||
nodes:
|
||||
- hostName: "example-sno.example.com"
|
||||
role: "master"
|
||||
# ... existing master node configuration ...
|
||||
- hostName: "example-node2.example.com"
|
||||
role: "worker"
|
||||
bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1"
|
||||
bmcCredentialsName:
|
||||
name: "example-node2-bmh-secret"
|
||||
bootMACAddress: "AA:BB:CC:DD:EE:11"
|
||||
bootMode: "UEFI"
|
||||
nodeNetwork:
|
||||
interfaces:
|
||||
- name: eno1
|
||||
macAddress: "AA:BB:CC:DD:EE:11"
|
||||
config:
|
||||
interfaces:
|
||||
- name: eno1
|
||||
type: ethernet
|
||||
state: up
|
||||
macAddress: "AA:BB:CC:DD:EE:11"
|
||||
ipv4:
|
||||
enabled: false
|
||||
ipv6:
|
||||
enabled: true
|
||||
address:
|
||||
- ip: 1111:2222:3333:4444::1
|
||||
prefix-length: 64
|
||||
dns-resolver:
|
||||
config:
|
||||
search:
|
||||
- example.com
|
||||
server:
|
||||
- 1111:2222:3333:4444::2
|
||||
routes:
|
||||
config:
|
||||
- destination: ::/0
|
||||
next-hop-interface: eno1
|
||||
next-hop-address: 1111:2222:3333:4444::1
|
||||
table-id: 254
|
||||
----
|
||||
|
||||
. Create a BMC authentication secret for the new host, as referenced by the `bmcCredentialsName` field in the `spec.nodes` section of your `SiteConfig` file:
|
||||
. Create a BMC authentication secret for the new host, as referenced by the `bmcCredentialsName` field in the `spec.nodes` section of your `ClusterInstance` CR:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -80,7 +90,7 @@ type: Opaque
|
||||
|
||||
. Commit the changes in Git, and then push to the Git repository that is being monitored by the {ztp} ArgoCD application.
|
||||
+
|
||||
When the ArgoCD `cluster` application synchronizes, two new manifests appear on the hub cluster generated by the {ztp} plugin:
|
||||
When the ArgoCD `cluster` application synchronizes, two new manifests appear on the hub cluster generated by the SiteConfig Operator:
|
||||
+
|
||||
* `BareMetalHost`
|
||||
* `NMStateConfig`
|
||||
|
||||
9
modules/ztp-clusterinstance-config-reference.adoc
Normal file
9
modules/ztp-clusterinstance-config-reference.adoc
Normal file
@@ -0,0 +1,9 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-sites.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="ztp-clusterinstance-config-reference_{context}"]
|
||||
= ClusterInstance CR installation reference
|
||||
|
||||
For a detailed API reference for the `ClusterInstance` custom resource, see link:https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes/{rh-rhacm-version}/html-single/apis/index#clusterinstance-api[ClusterInstance API] in the {rh-rhacm-first} documentation.
|
||||
@@ -5,9 +5,9 @@
|
||||
|
||||
:_module-type: PROCEDURE
|
||||
[id="ztp-configuring-disk-partitioning_{context}"]
|
||||
= Configuring disk partitioning with SiteConfig
|
||||
= Configuring disk partitioning with ClusterInstance
|
||||
|
||||
Configure disk partitioning for a managed cluster using a `SiteConfig` CR and {ztp-first}. The disk partition details in the `SiteConfig` CR must match the underlying disk.
|
||||
Configure disk partitioning for a managed cluster using a `ClusterInstance` CR and {ztp-first}. The disk partition details in the `ClusterInstance` CR must match the underlying disk.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
@@ -65,16 +65,22 @@ $ butane storage.bu
|
||||
|
||||
. Use a tool such as link:https://jsonformatter.org/json-pretty-print[JSON Pretty Print] to convert the output into JSON format.
|
||||
|
||||
. Copy the output into the `.spec.clusters.nodes.ignitionConfigOverride` field in the `SiteConfig` CR.
|
||||
. Copy the output into the `spec.nodes[].ignitionConfigOverride` field in the `ClusterInstance` CR.
|
||||
+
|
||||
.Example
|
||||
[source,yaml]
|
||||
----
|
||||
[...]
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-sno"
|
||||
namespace: "example-sno"
|
||||
spec:
|
||||
clusters:
|
||||
- nodes:
|
||||
- ignitionConfigOverride: |
|
||||
# ...
|
||||
nodes:
|
||||
- hostName: "node1.example.com"
|
||||
role: "master"
|
||||
ignitionConfigOverride: |
|
||||
{
|
||||
"ignition": {
|
||||
"version": "3.2.0"
|
||||
@@ -116,12 +122,11 @@ spec:
|
||||
]
|
||||
}
|
||||
}
|
||||
[...]
|
||||
----
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
If the `.spec.clusters.nodes.ignitionConfigOverride` field does not exist, create it.
|
||||
If the `spec.nodes[].ignitionConfigOverride` field does not exist, create it.
|
||||
====
|
||||
|
||||
.Verification
|
||||
@@ -150,7 +155,7 @@ $ oc get bmh -n my-sno-ns my-sno -ojson | jq '.metadata.annotations["bmac.agent-
|
||||
$ oc debug node/my-sno-node
|
||||
----
|
||||
|
||||
.. Set `/host` as the root directory within the debug shell by running the following command. The debug pod mounts the host’s root file system in `/host` within the pod. By changing the root directory to `/host`, you can run binaries contained in the host’s executable paths:
|
||||
.. Set `/host` as the root directory within the debug shell by running the following command. The debug pod mounts the host's root file system in `/host` within the pod. By changing the root directory to `/host`, you can run binaries contained in the host's executable paths:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
|
||||
@@ -13,8 +13,8 @@ Tune hosts with specific hardware profiles in your lab and ensure they are optim
|
||||
When you have completed host tuning to your satisfaction, you extract the host profile and save it in your {ztp} repository.
|
||||
Then, you use the host profile to configure firmware settings in the managed cluster hosts that you deploy with {ztp}.
|
||||
|
||||
You specify the required hardware profiles in `SiteConfig` custom resources (CRs) that you use to deploy the managed clusters.
|
||||
The {ztp} pipeline generates the required `HostFirmwareSettings` (`HFS`) and `BareMetalHost` (`BMH`) CRs that are applied to the hub cluster.
|
||||
You specify the required hardware profiles by creating `HostFirmwareSettings` CRs, packaging them in `ConfigMap` resources, and referencing them in the `templateRefs` field of your `ClusterInstance` CR.
|
||||
The SiteConfig Operator generates the required `HostFirmwareSettings` and `BareMetalHost` CRs that are applied to the hub cluster.
|
||||
|
||||
Use the following best practices to manage your host firmware profiles.
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="ztp-configuring-ipsec-using-ztp-and-siteconfig-for-mno_{context}"]
|
||||
= Configuring IPsec encryption for multi-node clusters using {ztp} and SiteConfig resources
|
||||
= Configuring IPsec encryption for multi-node clusters using {ztp} and ClusterInstance resources
|
||||
|
||||
You can enable IPsec encryption in managed multi-node clusters that you install using {ztp} and {rh-rhacm-first}.
|
||||
You can encrypt traffic between the managed cluster and IPsec endpoints external to the managed cluster. All network traffic between nodes on the OVN-Kubernetes cluster network is encrypted with IPsec in Transport mode.
|
||||
@@ -15,6 +15,8 @@ You can encrypt traffic between the managed cluster and IPsec endpoints external
|
||||
|
||||
* You have logged in to the hub cluster as a user with `cluster-admin` privileges.
|
||||
|
||||
* You have installed the SiteConfig Operator in the hub cluster.
|
||||
|
||||
* You have configured {rh-rhacm} and the hub cluster for generating the required installation and policy custom resources (CRs) for managed clusters.
|
||||
|
||||
* You have created a Git repository where you manage your custom site configuration data.
|
||||
@@ -119,40 +121,72 @@ out
|
||||
<1> The `ipsec/import-certs.sh` script generates the Butane and endpoint configuration CRs.
|
||||
<2> Add the `ca.pem` and `left_server.p12` certificate files that are relevant to your network.
|
||||
|
||||
. Create a `custom-manifest/` folder in the repository where you manage your custom site configuration data and add the `enable-ipsec.yaml` and `99-ipsec-*` YAML files to the directory.
|
||||
. Create an `ipsec-manifests/` folder in the repository where you manage your custom site configuration data and add the `enable-ipsec.yaml` and `99-ipsec-*` YAML files to the directory.
|
||||
+
|
||||
.Example `siteconfig` directory
|
||||
.Example site configuration directory
|
||||
[source,terminal]
|
||||
----
|
||||
siteconfig
|
||||
├── site1-mno-du.yaml
|
||||
├── extra-manifest/
|
||||
└── custom-manifest
|
||||
├── enable-ipsec.yaml
|
||||
├── 99-ipsec-master-import-certs.yaml
|
||||
└── 99-ipsec-worker-import-certs.yaml
|
||||
site-configs/
|
||||
├── hub-1/
|
||||
│ └── clusterinstance-site1-mno-du.yaml
|
||||
├── ipsec-manifests/
|
||||
│ ├── enable-ipsec.yaml
|
||||
│ ├── 99-ipsec-master-import-certs.yaml
|
||||
│ └── 99-ipsec-worker-import-certs.yaml
|
||||
└── kustomization.yaml
|
||||
----
|
||||
|
||||
. In your `SiteConfig` CR, add the `custom-manifest/` directory to the `extraManifests.searchPaths` field, as in the following example:
|
||||
. Create a `kustomization.yaml` file that uses `configMapGenerator` to package your IPsec manifests into a `ConfigMap`:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
clusters:
|
||||
- clusterName: "site1-mno-du"
|
||||
networkType: "OVNKubernetes"
|
||||
extraManifests:
|
||||
searchPaths:
|
||||
- extra-manifest/
|
||||
- custom-manifest/
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- hub-1/clusterinstance-site1-mno-du.yaml
|
||||
configMapGenerator:
|
||||
- name: ipsec-manifests-cm
|
||||
namespace: site1-mno-du <1>
|
||||
files:
|
||||
- ipsec-manifests/enable-ipsec.yaml
|
||||
- ipsec-manifests/99-ipsec-master-import-certs.yaml
|
||||
- ipsec-manifests/99-ipsec-worker-import-certs.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true <2>
|
||||
----
|
||||
<1> The namespace must match the `ClusterInstance` namespace.
|
||||
<2> Disables the hash suffix so the `ConfigMap` name is predictable.
|
||||
|
||||
. In your `ClusterInstance` CR, reference the `ConfigMap` in the `extraManifestsRefs` field:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "site1-mno-du"
|
||||
namespace: "site1-mno-du"
|
||||
spec:
|
||||
clusterName: "site1-mno-du"
|
||||
networkType: "OVNKubernetes"
|
||||
extraManifestsRefs:
|
||||
- name: ipsec-manifests-cm <1>
|
||||
# ...
|
||||
----
|
||||
<1> Reference to the `ConfigMap` containing the IPsec certificate import manifests.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
If you have other extra manifests, you can either include them in the same `ConfigMap` or create multiple `ConfigMap` resources and reference them all in `extraManifestsRefs`.
|
||||
====
|
||||
|
||||
. Include the `ipsec-config-policy.yaml` config policy file in the `source-crs` directory in GitOps and reference the file in one of the `PolicyGenerator` CRs.
|
||||
|
||||
. Commit the `SiteConfig` CR changes and updated files in your Git repository and push the changes to provision the managed cluster and configure IPsec encryption.
|
||||
. Commit the `ClusterInstance` CR, IPsec manifest files, and `kustomization.yaml` changes in your Git repository and push the changes to provision the managed cluster and configure IPsec encryption.
|
||||
+
|
||||
The Argo CD pipeline detects the changes and begins the managed cluster deployment.
|
||||
+
|
||||
During cluster provisioning, the {ztp} pipeline appends the CRs in the `custom-manifest/` directory to the default set of extra manifests stored in the `extra-manifest/` directory.
|
||||
During cluster provisioning, the SiteConfig Operator applies the CRs contained in the referenced `ConfigMap` resources as extra manifests. The IPsec configuration policy is applied as a Day 2 operation after the cluster is provisioned.
|
||||
|
||||
.Verification
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
:_module-type: PROCEDURE
|
||||
[id="ztp-configuring-ipsec-using-ztp-and-siteconfig_{context}"]
|
||||
= Configuring IPsec encryption for {sno} clusters using {ztp} and SiteConfig resources
|
||||
= Configuring IPsec encryption for {sno} clusters using {ztp} and ClusterInstance resources
|
||||
|
||||
You can enable IPsec encryption in managed {sno} clusters that you install using {ztp} and {rh-rhacm-first}.
|
||||
You can encrypt traffic between the managed cluster and IPsec endpoints external to the managed cluster. All network traffic between nodes on the OVN-Kubernetes cluster network is encrypted with IPsec in Transport mode.
|
||||
@@ -20,6 +20,8 @@ You can also configure IPsec encryption for {sno} clusters with an additional wo
|
||||
|
||||
* You have logged in to the hub cluster as a user with `cluster-admin` privileges.
|
||||
|
||||
* You have installed the SiteConfig Operator in the hub cluster.
|
||||
|
||||
* You have configured {rh-rhacm} and the hub cluster for generating the required installation and policy custom resources (CRs) for managed clusters.
|
||||
|
||||
* You have created a Git repository where you manage your custom site configuration data.
|
||||
@@ -94,40 +96,71 @@ out
|
||||
<1> The `ipsec/build.sh` script generates the Butane and endpoint configuration CRs.
|
||||
<2> You provide `ca.pem` and `left_server.p12` certificate files that are relevant to your network.
|
||||
|
||||
. Create a `custom-manifest/` folder in the repository where you manage your custom site configuration data.
|
||||
. Create an `ipsec-manifests/` folder in the repository where you manage your custom site configuration data.
|
||||
Add the `enable-ipsec.yaml` and `99-ipsec-*` YAML files to the directory.
|
||||
For example:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
siteconfig
|
||||
├── site1-sno-du.yaml
|
||||
├── extra-manifest/
|
||||
└── custom-manifest
|
||||
├── enable-ipsec.yaml
|
||||
├── 99-ipsec-worker-endpoint-config.yaml
|
||||
└── 99-ipsec-master-endpoint-config.yaml
|
||||
site-configs/
|
||||
├── hub-1/
|
||||
│ └── clusterinstance-site1-sno-du.yaml
|
||||
├── ipsec-manifests/
|
||||
│ ├── enable-ipsec.yaml
|
||||
│ ├── 99-ipsec-worker-endpoint-config.yaml
|
||||
│ └── 99-ipsec-master-endpoint-config.yaml
|
||||
└── kustomization.yaml
|
||||
----
|
||||
|
||||
. In your `SiteConfig` CR, add the `custom-manifest/` directory to the `extraManifests.searchPaths` field.
|
||||
For example:
|
||||
. Create a `kustomization.yaml` file that uses `configMapGenerator` to package your IPsec manifests into a `ConfigMap`:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
clusters:
|
||||
- clusterName: "site1-sno-du"
|
||||
networkType: "OVNKubernetes"
|
||||
extraManifests:
|
||||
searchPaths:
|
||||
- extra-manifest/
|
||||
- custom-manifest/
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- hub-1/clusterinstance-site1-sno-du.yaml
|
||||
configMapGenerator:
|
||||
- name: ipsec-manifests-cm
|
||||
namespace: site1-sno-du <1>
|
||||
files:
|
||||
- ipsec-manifests/enable-ipsec.yaml
|
||||
- ipsec-manifests/99-ipsec-master-endpoint-config.yaml
|
||||
- ipsec-manifests/99-ipsec-worker-endpoint-config.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true <2>
|
||||
----
|
||||
<1> The namespace must match the `ClusterInstance` namespace.
|
||||
<2> Disables the hash suffix so the `ConfigMap` name is predictable.
|
||||
|
||||
. Commit the `SiteConfig` CR changes and updated files in your Git repository and push the changes to provision the managed cluster and configure IPsec encryption.
|
||||
. In your `ClusterInstance` CR, reference the `ConfigMap` in the `extraManifestsRefs` field:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "site1-sno-du"
|
||||
namespace: "site1-sno-du"
|
||||
spec:
|
||||
clusterName: "site1-sno-du"
|
||||
networkType: "OVNKubernetes"
|
||||
extraManifestsRefs:
|
||||
- name: ipsec-manifests-cm <1>
|
||||
# ...
|
||||
----
|
||||
<1> Reference to the `ConfigMap` containing the IPsec manifests.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
If you have other extra manifests, you can either include them in the same `ConfigMap` or create multiple `ConfigMap` resources and reference each of those in the `extraManifestsRefs` field.
|
||||
====
|
||||
|
||||
. Commit the `ClusterInstance` CR, IPsec manifest files, and `kustomization.yaml` changes in your Git repository and push the changes to provision the managed cluster and configure IPsec encryption.
|
||||
+
|
||||
The Argo CD pipeline detects the changes and begins the managed cluster deployment.
|
||||
+
|
||||
During cluster provisioning, the {ztp} pipeline appends the CRs in the `custom-manifest/` directory to the default set of extra manifests stored in the `extra-manifest/` directory.
|
||||
During cluster provisioning, the SiteConfig Operator applies the CRs contained in the referenced `ConfigMap` resources as extra manifests.
|
||||
|
||||
.Verification
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ In {product-title} {product-version}, you can only add kernel arguments. You can
|
||||
|
||||
* You have installed the OpenShift CLI (oc).
|
||||
* You have logged in to the hub cluster as a user with cluster-admin privileges.
|
||||
* You have manually generated the installation and configuration custom resources (CRs).
|
||||
* You have applied a `ClusterInstance` CR to the hub cluster.
|
||||
|
||||
.Procedure
|
||||
|
||||
@@ -47,7 +47,7 @@ spec:
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The `SiteConfig` CR generates the `InfraEnv` resource as part of the day-0 installation CRs.
|
||||
The `ClusterInstance` CR generates the `InfraEnv` resource as part of the day-0 installation CRs.
|
||||
====
|
||||
|
||||
.Verification
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
[id="setting-managed-bare-metal-host-kernel-arguments_{context}"]
|
||||
= Configuring Discovery ISO kernel arguments for installations using {ztp}
|
||||
|
||||
The {ztp-first} workflow uses the Discovery ISO as part of the {product-title} installation process on managed bare-metal hosts. You can edit the `InfraEnv` resource to specify kernel arguments for the Discovery ISO. This is useful for cluster installations with specific environmental requirements. For example, configure the `rd.net.timeout.carrier` kernel argument for the Discovery ISO to facilitate static networking for the cluster or to receive a DHCP address before downloading the root file system during installation.
|
||||
The {ztp-first} workflow uses the Discovery ISO as part of the {product-title} installation process on managed bare-metal hosts. You can edit the `InfraEnv` resource to specify kernel arguments for the Discovery ISO. This is useful for cluster installations with specific environmental requirements.
|
||||
|
||||
For example, configure the `rd.net.timeout.carrier` kernel argument for the Discovery ISO to facilitate static networking for the cluster or to receive a DHCP address before downloading the root file system during installation.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
@@ -25,7 +27,7 @@ In {product-title} {product-version}, you can only add kernel arguments. You can
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
The `InfraEnv` CR in this example uses template syntax such as `{{ .Cluster.ClusterName }}` that is populated based on values in the `SiteConfig` CR. The `SiteConfig` CR automatically populates values for these templates during deployment. Do not edit the templates manually.
|
||||
The `InfraEnv` CR in this example uses template syntax such as `{{ .Cluster.ClusterName }}` that is populated based on values in the `ClusterInstance` CR. The `ClusterInstance` CR automatically populates values for these templates during deployment. Do not edit the templates manually.
|
||||
====
|
||||
+
|
||||
[source,yaml]
|
||||
@@ -59,28 +61,58 @@ spec:
|
||||
<1> Specify the append operation to add a kernel argument.
|
||||
<2> Specify the kernel argument you want to configure. This example configures the audit kernel argument and the trace kernel argument.
|
||||
|
||||
. Commit the `InfraEnv-example.yaml` CR to the same location in your Git repository that has the `SiteConfig` CR and push your changes. The following example shows a sample Git repository structure:
|
||||
|
||||
. Commit the `InfraEnv-example.yaml` file to your Git repository and push your changes. The following example shows a sample Git repository structure:
|
||||
+
|
||||
[source,text]
|
||||
----
|
||||
~/example-ztp/install
|
||||
└── site-install
|
||||
├── siteconfig-example.yaml
|
||||
├── clusterinstance-example.yaml
|
||||
├── InfraEnv-example.yaml
|
||||
...
|
||||
└── kustomization.yaml
|
||||
----
|
||||
|
||||
. Edit the `spec.clusters.crTemplates` specification in the `SiteConfig` CR to reference the `InfraEnv-example.yaml` CR in your Git repository:
|
||||
. Update the `kustomization.yaml` file to use the `configMapGenerator` field to package the `InfraEnv` CR into a `ConfigMap`:
|
||||
+
|
||||
[source,yaml,options="nowrap",role="white-space-pre"]
|
||||
[source,yaml]
|
||||
----
|
||||
clusters:
|
||||
crTemplates:
|
||||
InfraEnv: "InfraEnv-example.yaml"
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- clusterinstance-example.yaml <1>
|
||||
configMapGenerator:
|
||||
- name: custom-infraenv-cm <2>
|
||||
namespace: example-cluster <3>
|
||||
files:
|
||||
- InfraEnv-example.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
----
|
||||
<1> The name of the `ClusterInstance` CR.
|
||||
<2> The name of the `ConfigMap` that contains the custom `InfraEnv` CR.
|
||||
<3> The namespace must match the `ClusterInstance` namespace.
|
||||
|
||||
. In your `ClusterInstance` CR, reference the `ConfigMap` in the `spec.templateRefs` field:
|
||||
+
|
||||
When you are ready to deploy your cluster by committing and pushing the `SiteConfig` CR, the build pipeline uses the custom `InfraEnv-example` CR in your Git repository to configure the infrastructure environment, including the custom kernel arguments.
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-cluster"
|
||||
namespace: "example-cluster"
|
||||
spec:
|
||||
clusterName: "example-cluster"
|
||||
templateRefs:
|
||||
- name: custom-infraenv-cm <1>
|
||||
namespace: example-cluster
|
||||
# ...
|
||||
----
|
||||
<1> Reference to the `ConfigMap` CR that contains the custom `InfraEnv` CR template.
|
||||
|
||||
. Commit the `ClusterInstance` CR and `kustomization.yaml` to your Git repository and push your changes.
|
||||
+
|
||||
When the Argo CD pipeline syncs the changes, the SiteConfig Operator uses the custom `InfraEnv-example` CR from the generated `ConfigMap` to configure the infrastructure environment, including the custom kernel arguments.
|
||||
|
||||
.Verification
|
||||
To verify that the kernel arguments are applied, after the Discovery image verifies that {product-title} is ready for installation, you can SSH to the target host before the installation process begins. At that point, you can view the kernel arguments for the Discovery ISO in the `/proc/cmdline` file.
|
||||
|
||||
@@ -67,7 +67,7 @@ sourceFiles:
|
||||
claim: "image-registry-pvc"
|
||||
----
|
||||
<1> Set the appropriate value for `ztp-deploy-wave` depending on whether you are configuring image registries at the site, common, or group level. `ztp-deploy-wave: "100"` is suitable for development or testing because it allows you to group the referenced source files together.
|
||||
<2> In `ImageRegistryPV.yaml`, ensure that the `spec.local.path` field is set to `/var/imageregistry` to match the value set for the `mount_point` field in the `SiteConfig` CR.
|
||||
<2> In `ImageRegistryPV.yaml`, ensure that the `spec.local.path` field is set to `/var/imageregistry` to match the value set for the `mount_point` field in the `ClusterInstance` CR.
|
||||
|
||||
+
|
||||
[IMPORTANT]
|
||||
|
||||
@@ -27,45 +27,33 @@ Create a parallel Argo CD project and application to manage the new `ClusterInst
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: AppProject
|
||||
metadata:
|
||||
name: ztp-app-project-v2
|
||||
name: ztp-app-project
|
||||
namespace: openshift-gitops
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-wave: "100"
|
||||
spec:
|
||||
clusterResourceWhitelist:
|
||||
- group: hive.openshift.io
|
||||
- group: 'hive.openshift.io'
|
||||
kind: ClusterImageSet
|
||||
- group: hive.openshift.io
|
||||
kind: ClusterImageSet
|
||||
- group: cluster.open-cluster-management.io
|
||||
- group: 'cluster.open-cluster-management.io'
|
||||
kind: ManagedCluster
|
||||
- group: ""
|
||||
- group: ''
|
||||
kind: Namespace
|
||||
destinations:
|
||||
- namespace: '*'
|
||||
server: '*'
|
||||
namespaceResourceWhitelist:
|
||||
- group: ""
|
||||
- group: ''
|
||||
kind: ConfigMap
|
||||
- group: ""
|
||||
- group: ''
|
||||
kind: Namespace
|
||||
- group: ""
|
||||
- group: ''
|
||||
kind: Secret
|
||||
- group: agent-install.openshift.io
|
||||
kind: InfraEnv
|
||||
- group: agent-install.openshift.io
|
||||
kind: NMStateConfig
|
||||
- group: extensions.hive.openshift.io
|
||||
kind: AgentClusterInstall
|
||||
- group: hive.openshift.io
|
||||
kind: ClusterDeployment
|
||||
- group: metal3.io
|
||||
kind: BareMetalHost
|
||||
- group: metal3.io
|
||||
kind: HostFirmwareSettings
|
||||
- group: agent.open-cluster-management.io
|
||||
kind: KlusterletAddonConfig
|
||||
- group: cluster.open-cluster-management.io
|
||||
kind: ManagedCluster
|
||||
- group: siteconfig.open-cluster-management.io
|
||||
- group: 'extensions.hive.openshift.io'
|
||||
kind: ImageClusterInstall
|
||||
- group: 'metal3.io'
|
||||
kind: DataImage
|
||||
- group: 'siteconfig.open-cluster-management.io'
|
||||
kind: ClusterInstance <1>
|
||||
sourceRepos:
|
||||
- '*'
|
||||
|
||||
@@ -11,8 +11,8 @@ Add the required `Secret` custom resources (CRs) for the managed bare-metal host
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The secrets are referenced from the `SiteConfig` CR by name. The namespace
|
||||
must match the `SiteConfig` namespace.
|
||||
The secrets are referenced from the `ClusterInstance` CR by name. The namespace
|
||||
must match the `ClusterInstance` namespace.
|
||||
====
|
||||
|
||||
.Procedure
|
||||
@@ -42,9 +42,9 @@ data:
|
||||
.dockerconfigjson: <pull_secret> <4>
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
----
|
||||
<1> Must match the namespace configured in the related `SiteConfig` CR
|
||||
<1> Must match the namespace configured in the related `ClusterInstance` CR
|
||||
<2> Base64-encoded values for `password` and `username`
|
||||
<3> Must match the namespace configured in the related `SiteConfig` CR
|
||||
<3> Must match the namespace configured in the related `ClusterInstance` CR
|
||||
<4> Base64-encoded pull secret
|
||||
|
||||
. Add the relative path to `example-sno-secret.yaml` to the `kustomization.yaml` file that you use to install the cluster.
|
||||
|
||||
@@ -4,16 +4,13 @@
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
[id="ztp-creating-ztp-crs-for-multiple-managed-clusters_{context}"]
|
||||
= Installing managed clusters with SiteConfig resources and {rh-rhacm}
|
||||
= Installing managed clusters with ClusterInstance resources and {rh-rhacm}
|
||||
|
||||
{ztp-first} uses `SiteConfig` custom resources (CRs) in a Git repository to manage the processes that install {product-title} clusters. The `SiteConfig` CR contains cluster-specific parameters required for installation. It has options for applying select configuration CRs during installation including user defined extra manifests.
|
||||
|
||||
The {ztp} plugin processes `SiteConfig` CRs to generate a collection of CRs on the hub cluster. This triggers the assisted service in {rh-rhacm-first} to install {product-title} on the bare-metal host. You can find installation status and error messages in these CRs on the hub cluster.
|
||||
{ztp-first} uses `ClusterInstance` custom resources (CRs) in a Git repository to manage the processes that install {product-title} clusters. The `ClusterInstance` CR contains cluster-specific parameters required for installation. It has options for applying select configuration CRs during installation including user defined extra manifests.
|
||||
|
||||
The {ztp} plugin processes `ClusterInstance` CRs to generate a collection of CRs on the hub cluster. This triggers the assisted service in {rh-rhacm-first} to install {product-title} on the bare-metal host. You can find installation status and error messages in these CRs on the hub cluster.
|
||||
You can provision single clusters manually or in batches with {ztp}:
|
||||
|
||||
Provisioning a single cluster:: Create a single `SiteConfig` CR and related installation and configuration CRs for the cluster, and apply them in the hub cluster to begin cluster provisioning. This is a good way to test your CRs before deploying on a larger scale.
|
||||
Provisioning a single cluster:: Create a single `ClusterInstance` CR and related configuration CRs for the cluster, and apply them in the hub cluster to begin cluster provisioning. This is a good way to test your CRs before deploying on a larger scale.
|
||||
|
||||
Provisioning many clusters:: Install managed clusters in batches of up to 400 by defining `SiteConfig` and related CRs in a Git repository. ArgoCD uses the `SiteConfig` CRs to deploy the sites. The {rh-rhacm} policy generator creates the manifests and applies them to the hub cluster. This starts the cluster provisioning process.
|
||||
|
||||
include::snippets/siteconfig-deprecation-notice.adoc[]
|
||||
Provisioning many clusters:: Install managed clusters in batches of up to 500 by defining `ClusterInstance` and related CRs in a Git repository. ArgoCD uses the `ClusterInstance` CRs to deploy the clusters. The {rh-rhacm} policy generator creates the manifests and applies them to the hub cluster. This starts the cluster provisioning process.
|
||||
|
||||
@@ -24,11 +24,11 @@ Use the following procedure to customize the policies that get applied to the ma
|
||||
|
||||
.. Choose the appropriate example for your CR from the `{argocd-folder}` folder, for example, `{policy-prefix}example-sno-site.yaml` or `{policy-prefix}example-multinode-site.yaml`.
|
||||
|
||||
.. Change the `{binding-field}` field in the example file to match the site-specific label included in the `SiteConfig` CR. In the example `SiteConfig` file, the site-specific label is `sites: example-sno`.
|
||||
.. Change the `{binding-field}` field in the example file to match the site-specific label included in the `ClusterInstance` CR. In the example `ClusterInstance` file, the site-specific label is `sites: example-sno`.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
Ensure that the labels defined in your `{policy-gen-cr}` `{binding-field}` field correspond to the labels that are defined in the related managed clusters `SiteConfig` CR.
|
||||
Ensure that the labels defined in your `{policy-gen-cr}` `{binding-field}` field correspond to the labels that are defined in the related managed clusters `ClusterInstance` CR.
|
||||
====
|
||||
|
||||
.. Change the content in the example file to match the desired configuration.
|
||||
@@ -65,4 +65,4 @@ Do not include the `Namespace` CR in the same file with the `{policy-gen-cr}` CR
|
||||
|
||||
. Commit the `{policy-gen-cr}` CRs, `Namespace` CR, and associated `kustomization.yaml` file in your Git repository and push the changes.
|
||||
+
|
||||
The ArgoCD pipeline detects the changes and begins the managed cluster deployment. You can push the changes to the `SiteConfig` CR and the `{policy-gen-cr}` CR simultaneously.
|
||||
The ArgoCD pipeline detects the changes and begins the managed cluster deployment. You can push the changes to the `ClusterInstance` CR and the `{policy-gen-cr}` CR simultaneously.
|
||||
|
||||
@@ -6,7 +6,9 @@
|
||||
[id="ztp-customizing-the-install-extra-manifests_{context}"]
|
||||
= Customizing extra installation manifests in the {ztp} pipeline
|
||||
|
||||
You can define a set of extra manifests for inclusion in the installation phase of the {ztp-first} pipeline. These manifests are linked to the `SiteConfig` custom resources (CRs) and are applied to the cluster during installation. Including `MachineConfig` CRs at install time makes the installation process more efficient.
|
||||
You can define a set of extra manifests for inclusion in the installation phase of the {ztp-first} pipeline. These manifests are linked to the `ClusterInstance` custom resources (CRs) and are applied to the cluster during installation. Including `MachineConfig` CRs at install time makes the installation process more efficient.
|
||||
|
||||
Extra manifests must be packaged in `ConfigMap` resources and referenced in the `extraManifestsRefs` field of the `ClusterInstance` CR.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
@@ -16,53 +18,63 @@ You can define a set of extra manifests for inclusion in the installation phase
|
||||
|
||||
. Create a set of extra manifest CRs that the {ztp} pipeline uses to customize the cluster installs.
|
||||
|
||||
. In your custom `/siteconfig` directory, create a subdirectory `/custom-manifest` for your extra manifests. The following example illustrates a sample `/siteconfig` with `/custom-manifest` folder:
|
||||
. In your `/clusterinstance` directory, create a subdirectory with your extra manifests. The following example illustrates a sample folder structure:
|
||||
+
|
||||
[source,text]
|
||||
----
|
||||
siteconfig
|
||||
clusterinstance/
|
||||
├── site1-sno-du.yaml
|
||||
├── site2-standard-du.yaml
|
||||
├── extra-manifest/
|
||||
└── custom-manifest
|
||||
└── 01-example-machine-config.yaml
|
||||
│ ├── 01-example-machine-config.yaml
|
||||
│ ├── enable-crun-master.yaml
|
||||
│ └── enable-crun-worker.yaml
|
||||
└── kustomization.yaml
|
||||
----
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
The subdirectory names `/custom-manifest` and `/extra-manifest` used throughout are example names only. There is no requirement to use these names and no restriction on how you name these subdirectories.
|
||||
In this example `/extra-manifest` refers to the Git subdirectory that stores the contents of `/extra-manifest` from the `ztp-site-generate` container.
|
||||
====
|
||||
|
||||
. Add your custom extra manifest CRs to the `siteconfig/custom-manifest` directory.
|
||||
|
||||
. In your `SiteConfig` CR, enter the directory name in the `extraManifests.searchPaths` field, for example:
|
||||
. Create or update the `kustomization.yaml` file to use `configMapGenerator` to package your extra manifests into a `ConfigMap`:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
clusters:
|
||||
- clusterName: "example-sno"
|
||||
networkType: "OVNKubernetes"
|
||||
extraManifests:
|
||||
searchPaths:
|
||||
- extra-manifest/ <1>
|
||||
- custom-manifest/ <2>
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- site1-sno-du.yaml
|
||||
configMapGenerator:
|
||||
- name: extra-manifests-cm
|
||||
namespace: site1-sno-du <1>
|
||||
files:
|
||||
- extra-manifest/01-example-machine-config.yaml
|
||||
- extra-manifest/enable-crun-master.yaml
|
||||
- extra-manifest/enable-crun-worker.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true <2>
|
||||
----
|
||||
<1> Folder for manifests copied from the `ztp-site-generate` container.
|
||||
<2> Folder for custom manifests.
|
||||
<1> The namespace must match the `ClusterInstance` namespace.
|
||||
<2> Disables the hash suffix so the `ConfigMap` name is predictable.
|
||||
|
||||
. Save the `SiteConfig`, `/extra-manifest`, and `/custom-manifest` CRs, and push them to the site configuration repo.
|
||||
. In your `ClusterInstance` CR, reference the `ConfigMap` in the `extraManifestsRefs` field:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "site1-sno-du"
|
||||
namespace: "site1-sno-du"
|
||||
spec:
|
||||
clusterName: "site1-sno-du"
|
||||
networkType: "OVNKubernetes"
|
||||
extraManifestsRefs:
|
||||
- name: extra-manifests-cm <1>
|
||||
# ...
|
||||
----
|
||||
<1> Reference to the `ConfigMap` containing the extra manifests.
|
||||
|
||||
During cluster provisioning, the {ztp} pipeline appends the CRs in the `/custom-manifest` directory to the default set of extra manifests stored in `extra-manifest/`.
|
||||
. Commit the `ClusterInstance` CR, extra manifest files, and `kustomization.yaml` to your Git repository and push the changes.
|
||||
|
||||
During cluster provisioning, the SiteConfig Operator applies the CRs contained in the referenced `ConfigMap` resources as extra manifests.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
As of version 4.14 `extraManifestPath` is subject to a deprecation warning.
|
||||
|
||||
While `extraManifestPath` is still supported, we recommend that you use `extraManifests.searchPaths`.
|
||||
If you define `extraManifests.searchPaths` in the `SiteConfig` file, the {ztp} pipeline does not fetch manifests from the `ztp-site-generate` container during site installation.
|
||||
|
||||
If you define both `extraManifestPath` and `extraManifests.searchPaths` in the `Siteconfig` CR, the setting defined for `extraManifests.searchPaths` takes precedence.
|
||||
|
||||
It is strongly recommended that you extract the contents of `/extra-manifest` from the `ztp-site-generate` container and push it to the GIT repository.
|
||||
====
|
||||
You can reference multiple `ConfigMap` resources in `extraManifestsRefs` to organize your manifests logically. For example, you might have separate `ConfigMap` resources for crun configuration, custom `MachineConfig` CRs, and other Day 0 configurations.
|
||||
====
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
// * edge_computing/ztp-advanced-install-ztp.adoc
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="ztp-deleting-node-siteconfig_{context}"]
|
||||
= Deleting a node by using the SiteConfig CR
|
||||
[id="ztp-deleting-node-clusterinstance_{context}"]
|
||||
= Deleting a node by using the ClusterInstance CR
|
||||
|
||||
By using a `SiteConfig` custom resource (CR), you can delete and reprovision a node.
|
||||
By using a `ClusterInstance` custom resource (CR), you can delete and reprovision a node.
|
||||
This method is more efficient than manually deleting the node.
|
||||
|
||||
.Prerequisites
|
||||
@@ -18,32 +18,31 @@ This method is more efficient than manually deleting the node.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Update the `SiteConfig` CR to include the `bmac.agent-install.openshift.io/remove-agent-and-node-on-delete=true` annotation and push the changes to the Git repository:
|
||||
. Update the `ClusterInstance` CR to add the `bmac.agent-install.openshift.io/remove-agent-and-node-on-delete=true` annotation to the `BareMetalHost` resource for the node, and push the changes to the Git repository:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "cnfdf20"
|
||||
namespace: "cnfdf20"
|
||||
name: "example-cluster"
|
||||
namespace: "example-cluster"
|
||||
spec:
|
||||
clusters:
|
||||
nodes:
|
||||
- hostname: node6
|
||||
# ...
|
||||
nodes:
|
||||
- hostName: "worker-node2.example.com"
|
||||
role: "worker"
|
||||
crAnnotations:
|
||||
add:
|
||||
BareMetalHost:
|
||||
bmac.agent-install.openshift.io/remove-agent-and-node-on-delete: true
|
||||
extraAnnotations:
|
||||
BareMetalHost:
|
||||
bmac.agent-install.openshift.io/remove-agent-and-node-on-delete: "true"
|
||||
# ...
|
||||
----
|
||||
|
||||
. Verify that the `BareMetalHost` object is annotated by running the following command:
|
||||
+
|
||||
[source,yaml]
|
||||
[source,terminal]
|
||||
----
|
||||
oc get bmh -n <managed-cluster-namespace> <bmh-object> -ojsonpath='{.metadata}' | jq -r '.annotations["bmac.agent-install.openshift.io/remove-agent-and-node-on-delete"]'
|
||||
$ oc get bmh -n <cluster_namespace> <bmh_name> -ojsonpath='{.metadata}' | jq -r '.annotations["bmac.agent-install.openshift.io/remove-agent-and-node-on-delete"]'
|
||||
----
|
||||
+
|
||||
.Example output
|
||||
@@ -52,22 +51,23 @@ oc get bmh -n <managed-cluster-namespace> <bmh-object> -ojsonpath='{.metadata}'
|
||||
true
|
||||
----
|
||||
|
||||
. Suppress the generation of the `BareMetalHost` CR by updating the `SiteConfig` CR to include the `crSuppression.BareMetalHost` annotation:
|
||||
. Delete the `BareMetalHost` CR by configuring the `pruneManifests` field in the `ClusterInstance` CR to remove the target `BareMetalHost` resource:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "cnfdf20"
|
||||
namespace: "cnfdf20"
|
||||
name: "example-cluster"
|
||||
namespace: "example-cluster"
|
||||
spec:
|
||||
clusters:
|
||||
- nodes:
|
||||
- hostName: node6
|
||||
# ...
|
||||
nodes:
|
||||
- hostName: "worker-node2.example.com"
|
||||
role: "worker"
|
||||
crSuppression:
|
||||
- BareMetalHost
|
||||
pruneManifests:
|
||||
- apiVersion: metal3.io/v1alpha1
|
||||
kind: BareMetalHost
|
||||
# ...
|
||||
----
|
||||
|
||||
@@ -80,12 +80,12 @@ The status of the `BareMetalHost` CR should change to `deprovisioning`. Wait for
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get bmh -n <cluster-ns>
|
||||
$ oc get bmh -n <cluster_namespace>
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get agent -n <cluster-ns>
|
||||
$ oc get agent -n <cluster_namespace>
|
||||
----
|
||||
|
||||
. Verify that the node record has been deleted from the spoke cluster by running the following command:
|
||||
@@ -101,7 +101,9 @@ If you are working with secrets, deleting a secret too early can cause an issue
|
||||
Delete the secret only after the node cleanup, when the current ArgoCD synchronization is complete.
|
||||
====
|
||||
|
||||
. After the `BareMetalHost` object is successfully deleted, remove the worker node definition from the `spec.nodes` section in the `ClusterInstance` CR and push the changes to the Git repository.
|
||||
|
||||
.Next steps
|
||||
|
||||
To reprovision a node, delete the changes previously added to the `SiteConfig`, push the changes to the Git repository, and wait for the synchronization to complete.
|
||||
To reprovision a node, add the node definition back to the `spec.nodes` section in the `ClusterInstance` CR, push the changes to the Git repository, and wait for the synchronization to complete.
|
||||
This regenerates the `BareMetalHost` CR of the worker node and triggers the re-install of the node.
|
||||
|
||||
@@ -4,16 +4,21 @@
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="ztp-deploying-a-site_{context}"]
|
||||
= Deploying a managed cluster with SiteConfig and {ztp}
|
||||
= Deploying a managed cluster with ClusterInstance and {ztp}
|
||||
|
||||
Use the following procedure to create a `SiteConfig` custom resource (CR) and related files and initiate the {ztp-first} cluster deployment.
|
||||
Use the following procedure to create a `ClusterInstance` custom resource (CR) and related files and initiate the {ztp-first} cluster deployment.
|
||||
|
||||
include::snippets/siteconfig-deprecation-notice.adoc[]
|
||||
[NOTE]
|
||||
====
|
||||
You require {rh-rhacm-first} version 2.12 or later to install the SiteConfig Operator and use the `ClusterInstance` CR.
|
||||
====
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* You have installed the OpenShift CLI (`oc`).
|
||||
|
||||
* You installed the SiteConfig Operator in the hub cluster.
|
||||
|
||||
* You have logged in to the hub cluster as a user with `cluster-admin` privileges.
|
||||
|
||||
* You configured the hub cluster for generating the required installation and policy CRs.
|
||||
@@ -29,11 +34,11 @@ When you create the source repository, ensure that you patch the ArgoCD applicat
|
||||
+
|
||||
Network connectivity:: Your network requires DNS. Managed cluster hosts should be reachable from the hub cluster. Ensure that Layer 3 connectivity exists between the hub cluster and the managed cluster host.
|
||||
+
|
||||
Baseboard Management Controller (BMC) details:: {ztp} uses BMC username and password details to connect to the BMC during cluster installation. The {ztp} plugin manages the `ManagedCluster` CRs on the hub cluster based on the `SiteConfig` CR in your site Git repo. You create individual `BMCSecret` CRs for each host manually.
|
||||
Baseboard Management Controller (BMC) details:: {ztp} uses BMC username and password details to connect to the BMC during cluster installation. The {ztp} plugin manages the `ManagedCluster` CRs on the hub cluster based on the `ClusterInstance` CR in your site Git repo. You create individual `BMCSecret` CRs for each host manually.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create the required managed cluster secrets on the hub cluster. These resources must be in a namespace with a name matching the cluster name. For example, in `out/argocd/example/siteconfig/example-sno.yaml`, the cluster name and namespace is `example-sno`.
|
||||
. Create the required managed cluster secrets on the hub cluster. These resources must be in a namespace with a name matching the cluster name. For example, in `out/argocd/example/clusterinstance/example-sno.yaml`, the cluster name and namespace is `example-sno`.
|
||||
|
||||
.. Export the cluster namespace by running the following command:
|
||||
+
|
||||
@@ -53,12 +58,12 @@ $ oc create namespace $CLUSTERNS
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
The secrets are referenced from the `SiteConfig` custom resource (CR) by name. The namespace must match the `SiteConfig` namespace.
|
||||
The secrets are referenced from the `ClusterInstance` custom resource (CR) by name. The namespace must match the `ClusterInstance` namespace.
|
||||
====
|
||||
|
||||
. Create a `SiteConfig` CR for your cluster in your local clone of the Git repository:
|
||||
. Create a `ClusterInstance` CR for your cluster in your local clone of the Git repository:
|
||||
|
||||
.. Choose the appropriate example for your CR from the `out/argocd/example/siteconfig/` folder.
|
||||
.. Choose the appropriate example for your CR from the `out/argocd/example/clusterinstance/` folder.
|
||||
The folder includes example files for single node, three-node, and standard clusters:
|
||||
+
|
||||
*** `example-sno.yaml`
|
||||
@@ -67,7 +72,7 @@ The folder includes example files for single node, three-node, and standard clus
|
||||
|
||||
.. Change the cluster and host details in the example file to match the type of cluster you want. For example:
|
||||
+
|
||||
.Example {sno} SiteConfig CR
|
||||
.Example {sno} ClusterInstance CR
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_example-sno.yaml[]
|
||||
@@ -80,14 +85,13 @@ For more information about BMC addressing, see the "Additional resources" sectio
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
To override the default `BareMetalHost` CR for a node, you can reference the override CR in the node-level `crTemplates` field in the `SiteConfig` CR. Ensure that you set the `argocd.argoproj.io/sync-wave: "3"` annotation in your override `BareMetalHost` CR.
|
||||
To override the default `BareMetalHost` CR for a node, create a custom node template in a `ConfigMap` and reference it in the node-level `spec.nodes.templateRefs` field in the `ClusterInstance` CR. Ensure that you set the `argocd.argoproj.io/sync-wave: "3"` annotation in your override `BareMetalHost` CR.
|
||||
====
|
||||
|
||||
.. You can inspect the default set of extra-manifest `MachineConfig` CRs in `out/argocd/extra-manifest`. It is automatically applied to the cluster when it is installed.
|
||||
|
||||
.. Optional: To provision additional install-time manifests on the provisioned cluster, create a directory in your Git repository, for example, `sno-extra-manifest/`, and add your custom manifest CRs to this directory. If your `SiteConfig.yaml` refers to this directory in the `extraManifestPath` field, any CRs in this referenced directory are appended to the default set of extra manifests.
|
||||
.. Optional: To provision additional install-time manifests on the provisioned cluster, package your extra manifest CRs in a `ConfigMap` and reference it in the `extraManifestsRefs` field of the `ClusterInstance` CR. For more information, see "Customizing extra installation manifests in the {ztp} pipeline".
|
||||
+
|
||||
.Enabling the crun OCI container runtime
|
||||
[IMPORTANT]
|
||||
====
|
||||
For optimal cluster performance, enable crun for master and worker nodes in {sno}, {sno} with additional worker nodes, {3no}, and standard clusters.
|
||||
@@ -95,12 +99,11 @@ For optimal cluster performance, enable crun for master and worker nodes in {sno
|
||||
Enable crun in a `ContainerRuntimeConfig` CR as an additional Day 0 install-time manifest to avoid the cluster having to reboot.
|
||||
|
||||
The `enable-crun-master.yaml` and `enable-crun-worker.yaml` CR files are in the `out/source-crs/optional-extra-manifest/` folder that you can extract from the `ztp-site-generate` container.
|
||||
For more information, see "Customizing extra installation manifests in the {ztp} pipeline".
|
||||
====
|
||||
|
||||
. Add the `SiteConfig` CR to the `kustomization.yaml` file in the `generators` section, similar to the example shown in `out/argocd/example/siteconfig/kustomization.yaml`.
|
||||
. Add the `ClusterInstance` CR to the `kustomization.yaml` file in the `generators` section, similar to the example shown in `out/argocd/example/clusterinstance/kustomization.yaml`.
|
||||
|
||||
. Commit the `SiteConfig` CR and associated `kustomization.yaml` changes in your Git repository and push the changes.
|
||||
. Commit the `ClusterInstance` CR and associated `kustomization.yaml` changes in your Git repository and push the changes.
|
||||
+
|
||||
The ArgoCD pipeline detects the changes and begins the managed cluster deployment.
|
||||
|
||||
|
||||
@@ -6,112 +6,136 @@
|
||||
[id="ztp-deploying-user-defined-firmware-configuration-with-gitops-ztp_{context}"]
|
||||
= Deploying user-defined firmware to cluster hosts with {ztp}
|
||||
|
||||
You can deploy user-defined firmware settings to cluster hosts by configuring the `SiteConfig` custom resource (CR) to include a hardware profile that you want to apply during cluster host provisioning.
|
||||
You can deploy user-defined firmware settings to cluster hosts by creating custom node templates that include `HostFirmwareSettings` CRs, and referencing them in the `ClusterInstance` CR.
|
||||
You can configure hardware profiles to apply to hosts in the following scenarios:
|
||||
|
||||
* All hosts site-wide
|
||||
* Only cluster hosts that meet certain criteria
|
||||
* Individual cluster hosts
|
||||
* All hosts in the cluster
|
||||
* Individual hosts in the cluster
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
You can configure host hardware profiles to be applied in a hierarchy.
|
||||
Cluster-level settings override site-wide settings.
|
||||
Node level profiles override cluster and site-wide settings.
|
||||
Node-level profiles override cluster-wide settings.
|
||||
====
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* You have installed the OpenShift CLI (`oc`).
|
||||
|
||||
* You have installed {rh-rhacm-first} and logged in to the hub cluster as a user with `cluster-admin` privileges.
|
||||
* You have installed {rh-rhacm-first} version 2.12 or later and logged in to the hub cluster as a user with `cluster-admin` privileges.
|
||||
|
||||
* You have provisioned a cluster that is managed by {rh-rhacm}.
|
||||
* You have installed the SiteConfig Operator in the hub cluster.
|
||||
|
||||
* You created a Git repository where you manage your custom site configuration data.
|
||||
The repository must be accessible from the hub cluster and be defined as a source repository for the Argo CD application.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create the host firmware profile that contain the firmware settings you want to apply.
|
||||
. Create the `HostFirmwareSettings` CR that contains the firmware settings you want to apply.
|
||||
For example, create the following YAML file:
|
||||
+
|
||||
.host-firmware.profile
|
||||
.host-firmware-settings.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
BootMode: Uefi
|
||||
LogicalProc: Enabled
|
||||
ProcVirtualization: Enabled
|
||||
apiVersion: metal3.io/v1alpha1
|
||||
kind: HostFirmwareSettings
|
||||
metadata:
|
||||
name: "site1-sno-du"
|
||||
namespace: "site1-sno-du"
|
||||
spec:
|
||||
settings:
|
||||
BootMode: "Uefi"
|
||||
LogicalProc: "Enabled"
|
||||
ProcVirtualization: "Enabled"
|
||||
----
|
||||
|
||||
. Save the hardware profile YAML file relative to the `kustomization.yaml` file that you use to define how to provision the cluster, for example:
|
||||
. Save the `HostFirmwareSettings` CR file relative to the `kustomization.yaml` file that you use to provision the cluster.
|
||||
For example:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
example-ztp/install
|
||||
└── site-install
|
||||
├── siteconfig-example.yaml
|
||||
├── kustomization.yaml
|
||||
└── host-firmware.profile
|
||||
site-configs/
|
||||
└── site1-sno-du/
|
||||
├── clusterinstance-site1-sno-du.yaml
|
||||
├── kustomization.yaml
|
||||
└── host-firmware-settings.yaml
|
||||
----
|
||||
|
||||
. Edit the `SiteConfig` CR to include the firmware profile that you want to apply in the cluster.
|
||||
. Create a `ConfigMap` to store the `HostFirmwareSettings` CR.
|
||||
You can use a `kustomization.yaml` file with `configMapGenerator` to create the `ConfigMap`.
|
||||
For example:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
metadata:
|
||||
name: "site-plan-cluster"
|
||||
namespace: "example-cluster-namespace"
|
||||
spec:
|
||||
baseDomain: "example.com"
|
||||
# ...
|
||||
biosConfigRef:
|
||||
filePath: "./host-firmware.profile" <1>
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- clusterinstance-site1-sno-du.yaml
|
||||
configMapGenerator:
|
||||
- name: host-firmware-settings-cm
|
||||
namespace: site1-sno-du <1>
|
||||
files:
|
||||
- host-firmware-settings.yaml <2>
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
----
|
||||
<1> Applies the hardware profile to all cluster hosts site-wide
|
||||
<1> The namespace must match the `ClusterInstance` namespace.
|
||||
<2> The name of the `HostFirmwareSettings` CR.
|
||||
|
||||
. To apply a hardware profile to all hosts in the cluster, reference the `ConfigMap` in the `spec.templateRefs` field of your `ClusterInstance` CR.
|
||||
For example:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "site1-sno-du"
|
||||
namespace: "site1-sno-du"
|
||||
spec:
|
||||
clusterName: "site1-sno-du"
|
||||
# ...
|
||||
templateRefs:
|
||||
- name: host-firmware-settings-cm <1>
|
||||
namespace: site1-sno-du
|
||||
nodes:
|
||||
- hostName: "node1.example.com"
|
||||
# ...
|
||||
----
|
||||
<1> Applies the firmware profile to all hosts in the cluster.
|
||||
|
||||
. Optional: To apply a hardware profile to a specific host in the cluster, reference the `ConfigMap` in the `spec.nodes[].templateRefs` field.
|
||||
For example:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "site1-sno-du"
|
||||
namespace: "site1-sno-du"
|
||||
spec:
|
||||
clusterName: "site1-sno-du"
|
||||
# ...
|
||||
nodes:
|
||||
- hostName: "node1.example.com"
|
||||
# ...
|
||||
templateRefs:
|
||||
- name: host-firmware-node1-cm <1>
|
||||
namespace: site1-sno-du
|
||||
- hostName: "node2.example.com"
|
||||
# ...
|
||||
----
|
||||
<1> Applies the firmware profile only to the `node1.example.com` host.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
Where possible, use a single `SiteConfig` CR per cluster.
|
||||
Node-level `templateRefs` settings override cluster-level `templateRefs` settings.
|
||||
====
|
||||
|
||||
. Optional. To apply a hardware profile to hosts in a specific cluster, update `clusters.biosConfigRef.filePath` with the hardware profile that you want to apply.
|
||||
For example:
|
||||
. Commit the `ClusterInstance` CR, `ConfigMap`, and associated `kustomization.yaml` changes in your Git repository and push the changes.
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
clusters:
|
||||
- clusterName: "cluster-1"
|
||||
# ...
|
||||
biosConfigRef:
|
||||
filePath: "./host-firmware.profile" <1>
|
||||
----
|
||||
<1> Applies to all hosts in the `cluster-1` cluster
|
||||
|
||||
. Optional. To apply a hardware profile to a specific host in the cluster, update `clusters.nodes.biosConfigRef.filePath` with the hardware profile that you want to apply.
|
||||
For example:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
clusters:
|
||||
- clusterName: "cluster-1"
|
||||
# ...
|
||||
nodes:
|
||||
- hostName: "compute-1.example.com"
|
||||
# ...
|
||||
bootMode: "UEFI"
|
||||
biosConfigRef:
|
||||
filePath: "./host-firmware.profile" <1>
|
||||
----
|
||||
<1> Applies the firmware profile to the `compute-1.example.com` host in the cluster
|
||||
|
||||
. Commit the `SiteConfig` CR and associated `kustomization.yaml` changes in your Git repository and push the changes.
|
||||
+
|
||||
The ArgoCD pipeline detects the changes and begins the managed cluster deployment.
|
||||
The Argo CD pipeline detects the changes and begins the managed cluster deployment.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
@@ -129,6 +153,8 @@ For example, run the following command:
|
||||
$ oc get hfs -n <managed_cluster_namespace> <managed_cluster_name> -o jsonpath='{.status.conditions[?(@.type=="Valid")].status}'
|
||||
----
|
||||
+
|
||||
** where `<managed_cluster_namespace>` is the namespace of the managed cluster and `<managed_cluster_name>` is the name of the managed cluster.
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
|
||||
@@ -8,21 +8,11 @@
|
||||
|
||||
Workload partitioning configures {product-title} services, cluster management workloads, and infrastructure pods to run on a reserved number of host CPUs.
|
||||
|
||||
To configure workload partitioning with {ztp-first}, you configure a `cpuPartitioningMode` field in the `SiteConfig` custom resource (CR) that you use to install the cluster and you apply a `PerformanceProfile` CR that configures the `isolated` and `reserved` CPUs on the host.
|
||||
To configure workload partitioning with {ztp-first}, you configure a `cpuPartitioningMode` field in the `ClusterInstance` custom resource (CR) that you use to install the cluster and you apply a `PerformanceProfile` CR that configures the `isolated` and `reserved` CPUs on the host.
|
||||
|
||||
Configuring the `SiteConfig` CR enables workload partitioning at cluster installation time and applying the `PerformanceProfile` CR configures the specific allocation of CPUs to reserved and isolated sets.
|
||||
Configuring the `ClusterInstance` CR enables workload partitioning at cluster installation time and applying the `PerformanceProfile` CR configures the specific allocation of CPUs to reserved and isolated sets.
|
||||
Both of these steps happen at different points during cluster provisioning.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Configuring workload partitioning by using the `cpuPartitioningMode` field in the `SiteConfig` CR is a Tech Preview feature in {product-title} 4.13.
|
||||
|
||||
Alternatively, you can specify cluster management CPU resources with the `cpuset` field of the `SiteConfig` custom resource (CR) and the `reserved` field of the group `PolicyGenerator` or `PolicyGentemplate` CR.
|
||||
The `{policy-gen-cr}` CR is the recommended approach.
|
||||
The {ztp} pipeline uses these values to populate the required fields in the workload partitioning `MachineConfig` CR (`cpuset`) and the `PerformanceProfile` CR (`reserved`) that configure the {sno} cluster.
|
||||
This method is a General Availability feature in {product-title} 4.14.
|
||||
====
|
||||
|
||||
The workload partitioning configuration pins the {product-title} infrastructure pods to the `reserved` CPU set.
|
||||
Platform services such as systemd, CRI-O, and kubelet run on the `reserved` CPU set.
|
||||
The `isolated` CPU sets are exclusively allocated to your container workloads.
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * scalability_and_performance/ztp_far_edge/ztp-advanced-install-ztp.adoc
|
||||
|
||||
:_module-type: PROCEDURE
|
||||
[id="ztp-filtering-ai-crs-using-siteconfig_{context}"]
|
||||
= Filtering custom resources using SiteConfig filters
|
||||
|
||||
By using filters, you can easily customize `SiteConfig` custom resources (CRs) to include or exclude other CRs for use in the installation phase of the {ztp-first} pipeline.
|
||||
|
||||
You can specify an `inclusionDefault` value of `include` or `exclude` for the `SiteConfig` CR, along with a list of the specific `extraManifest` RAN CRs that you want to include or exclude. Setting `inclusionDefault` to `include` makes the {ztp} pipeline apply all the files in `/source-crs/extra-manifest` during installation. Setting `inclusionDefault` to `exclude` does the opposite.
|
||||
|
||||
You can exclude individual CRs from the `/source-crs/extra-manifest` folder that are otherwise included by default. The following example configures a custom {sno} `SiteConfig` CR to exclude the `/source-crs/extra-manifest/03-sctp-machine-config-worker.yaml` CR at installation time.
|
||||
|
||||
Some additional optional filtering scenarios are also described.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* You configured the hub cluster for generating the required installation and policy CRs.
|
||||
|
||||
* You created a Git repository where you manage your custom site configuration data. The repository must be accessible from the hub cluster and be defined as a source repository for the Argo CD application.
|
||||
|
||||
.Procedure
|
||||
|
||||
. To prevent the {ztp} pipeline from applying the `03-sctp-machine-config-worker.yaml` CR file, apply the following YAML in the `SiteConfig` CR:
|
||||
+
|
||||
[source,yaml,subs="attributes+"]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
metadata:
|
||||
name: "site1-sno-du"
|
||||
namespace: "site1-sno-du"
|
||||
spec:
|
||||
baseDomain: "example.com"
|
||||
pullSecretRef:
|
||||
name: "assisted-deployment-pull-secret"
|
||||
clusterImageSetNameRef: "openshift-{product-version}"
|
||||
sshPublicKey: "<ssh_public_key>"
|
||||
clusters:
|
||||
- clusterName: "site1-sno-du"
|
||||
extraManifests:
|
||||
filter:
|
||||
exclude:
|
||||
- 03-sctp-machine-config-worker.yaml
|
||||
----
|
||||
+
|
||||
The {ztp} pipeline skips the `03-sctp-machine-config-worker.yaml` CR during installation. All other CRs in `/source-crs/extra-manifest` are applied.
|
||||
|
||||
. Save the `SiteConfig` CR and push the changes to the site configuration repository.
|
||||
+
|
||||
The {ztp} pipeline monitors and adjusts what CRs it applies based on the `SiteConfig` filter instructions.
|
||||
|
||||
. Optional: To prevent the {ztp} pipeline from applying all the `/source-crs/extra-manifest` CRs during cluster installation, apply the following YAML in the `SiteConfig` CR:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
- clusterName: "site1-sno-du"
|
||||
extraManifests:
|
||||
filter:
|
||||
inclusionDefault: exclude
|
||||
----
|
||||
|
||||
. Optional: To exclude all the `/source-crs/extra-manifest` RAN CRs and instead include a custom CR file during installation, edit the custom `SiteConfig` CR to set the custom manifests folder and the `include` file, for example:
|
||||
+
|
||||
[source,yaml,subs="attributes+"]
|
||||
----
|
||||
clusters:
|
||||
- clusterName: "site1-sno-du"
|
||||
extraManifestPath: "<custom_manifest_folder>" <1>
|
||||
extraManifests:
|
||||
filter:
|
||||
inclusionDefault: exclude <2>
|
||||
include:
|
||||
- custom-sctp-machine-config-worker.yaml
|
||||
----
|
||||
<1> Replace `<custom_manifest_folder>` with the name of the folder that contains the custom installation CRs, for example, `user-custom-manifest/`.
|
||||
<2> Set `inclusionDefault` to `exclude` to prevent the {ztp} pipeline from applying the files in `/source-crs/extra-manifest` during installation.
|
||||
+
|
||||
The following example illustrates the custom folder structure:
|
||||
+
|
||||
[source,text]
|
||||
----
|
||||
siteconfig
|
||||
├── site1-sno-du.yaml
|
||||
└── user-custom-manifest
|
||||
└── custom-sctp-machine-config-worker.yaml
|
||||
----
|
||||
@@ -4,11 +4,9 @@
|
||||
|
||||
:_mod-docs-content-type: PROCEDURE
|
||||
[id="ztp-generating-install-and-config-crs-manually_{context}"]
|
||||
= Generating {ztp} installation and configuration CRs manually
|
||||
= Extracting reference and example CRs from the ztp-site-generate container
|
||||
|
||||
Use the `generator` entrypoint for the `ztp-site-generate` container to generate the site installation and configuration custom resource (CRs) for a cluster based on `SiteConfig` and `{policy-gen-cr}` CRs.
|
||||
|
||||
include::snippets/siteconfig-deprecation-notice.adoc[]
|
||||
Use the `ztp-site-generate` container to extract reference custom resources (CRs) and example `ClusterInstance` CRs to prepare for cluster installation and Day 2 configuration.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
@@ -16,6 +14,8 @@ include::snippets/siteconfig-deprecation-notice.adoc[]
|
||||
|
||||
* You have logged in to the hub cluster as a user with `cluster-admin` privileges.
|
||||
|
||||
* You installed `podman`.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create an output folder by running the following command:
|
||||
@@ -25,14 +25,21 @@ include::snippets/siteconfig-deprecation-notice.adoc[]
|
||||
$ mkdir -p ./out
|
||||
----
|
||||
|
||||
. Export the `argocd` directory from the `ztp-site-generate` container image:
|
||||
. Log in to the Ecosystem container registry with your credentials by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ podman login registry.redhat.io
|
||||
----
|
||||
|
||||
. Extract the reference and example CRs from the `ztp-site-generate` container image by running the following command:
|
||||
+
|
||||
[source,terminal,subs="attributes+"]
|
||||
----
|
||||
$ podman run --log-driver=none --rm registry.redhat.io/openshift4/ztp-site-generate-rhel8:v{product-version} extract /home/ztp --tar | tar x -C ./out
|
||||
----
|
||||
+
|
||||
The `./out` directory has the reference `{policy-gen-cr}` and `SiteConfig` CRs in the `out/argocd/example/` folder.
|
||||
The `./out` directory contains the reference `{policy-gen-cr}` and `ClusterInstance` CRs in the `out/argocd/example/` folder.
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
@@ -44,25 +51,27 @@ out
|
||||
│ ├── {policy-prefix}common-ranGen.yaml
|
||||
│ ├── {policy-prefix}example-sno-site.yaml
|
||||
│ ├── {policy-prefix}group-du-sno-ranGen.yaml
|
||||
│ ├── {policy-prefix}group-du-sno-validator-ranGen.yaml
|
||||
│ ├── ...
|
||||
│ ├── kustomization.yaml
|
||||
│ └── ns.yaml
|
||||
└── siteconfig
|
||||
├── example-sno.yaml
|
||||
├── KlusterletAddonConfigOverride.yaml
|
||||
└── kustomization.yaml
|
||||
└── clusterinstance
|
||||
├── example-sno.yaml
|
||||
├── example-3node.yaml
|
||||
├── example-standard.yaml
|
||||
└── ...
|
||||
----
|
||||
|
||||
. Create an output folder for the site installation CRs:
|
||||
. Create a `ClusterInstance` CR for your cluster.
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ mkdir -p ./site-install
|
||||
----
|
||||
|
||||
. Modify the example `SiteConfig` CR for the cluster type that you want to install. Copy `example-sno.yaml` to `site-1-sno.yaml` and modify the CR to match the details of the site and bare-metal host that you want to install, for example:
|
||||
Use the example `ClusterInstance` CRs in the `out/argocd/example/clusterinstance/` folder that you previously extracted from the `ztp-site-generate` container as a reference. The folder includes example files for single node, three-node, and standard clusters:
|
||||
+
|
||||
* `example-sno.yaml`
|
||||
* `example-3node.yaml`
|
||||
* `example-standard.yaml`
|
||||
+
|
||||
Change the cluster and host details in the example file to match the type of cluster you want to install. For example:
|
||||
+
|
||||
.Example {sno} ClusterInstance CR
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_example-sno.yaml[]
|
||||
@@ -70,125 +79,23 @@ include::snippets/ztp_example-sno.yaml[]
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
Once you have extracted reference CR configuration files from the `out/extra-manifest` directory of the `ztp-site-generate` container, you can use `extraManifests.searchPaths` to include the path to the git directory containing those files.
|
||||
This allows the {ztp} pipeline to apply those CR files during cluster installation.
|
||||
If you configure a `searchPaths` directory, the {ztp} pipeline does not fetch manifests from the `ztp-site-generate` container during site installation.
|
||||
Optional: To provision additional install-time manifests on the provisioned cluster, create the extra manifest CRs and apply them to the hub cluster. Then reference them in the `extraManifestsRefs` field of the `ClusterInstance` CR. For more information, see "Customizing extra installation manifests in the {ztp} pipeline".
|
||||
====
|
||||
|
||||
. Generate the Day 0 installation CRs by processing the modified `SiteConfig` CR `site-1-sno.yaml` by running the following command:
|
||||
+
|
||||
[source,terminal,subs="attributes+"]
|
||||
----
|
||||
$ podman run -it --rm -v `pwd`/out/argocd/example/siteconfig:/resources:Z -v `pwd`/site-install:/output:Z,U registry.redhat.io/openshift4/ztp-site-generate-rhel8:v{product-version} generator install site-1-sno.yaml /output
|
||||
----
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
site-install
|
||||
└── site-1-sno
|
||||
├── site-1_agentclusterinstall_example-sno.yaml
|
||||
├── site-1-sno_baremetalhost_example-node1.example.com.yaml
|
||||
├── site-1-sno_clusterdeployment_example-sno.yaml
|
||||
├── site-1-sno_configmap_example-sno.yaml
|
||||
├── site-1-sno_infraenv_example-sno.yaml
|
||||
├── site-1-sno_klusterletaddonconfig_example-sno.yaml
|
||||
├── site-1-sno_machineconfig_02-master-workload-partitioning.yaml
|
||||
├── site-1-sno_machineconfig_predefined-extra-manifests-master.yaml
|
||||
├── site-1-sno_machineconfig_predefined-extra-manifests-worker.yaml
|
||||
├── site-1-sno_managedcluster_example-sno.yaml
|
||||
├── site-1-sno_namespace_example-sno.yaml
|
||||
└── site-1-sno_nmstateconfig_example-node1.example.com.yaml
|
||||
----
|
||||
. Optional: Generate Day 2 configuration CRs from the reference `{policy-gen-cr}` CRs:
|
||||
|
||||
. Optional: Generate just the Day 0 `MachineConfig` installation CRs for a particular cluster type by processing the reference `SiteConfig` CR with the `-E` option. For example, run the following commands:
|
||||
|
||||
.. Create an output folder for the `MachineConfig` CRs:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ mkdir -p ./site-machineconfig
|
||||
----
|
||||
|
||||
.. Generate the `MachineConfig` installation CRs:
|
||||
+
|
||||
[source,terminal,subs="attributes+"]
|
||||
----
|
||||
$ podman run -it --rm -v `pwd`/out/argocd/example/siteconfig:/resources:Z -v `pwd`/site-machineconfig:/output:Z,U registry.redhat.io/openshift4/ztp-site-generate-rhel8:v{product-version} generator install -E site-1-sno.yaml /output
|
||||
----
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
site-machineconfig
|
||||
└── site-1-sno
|
||||
├── site-1-sno_machineconfig_02-master-workload-partitioning.yaml
|
||||
├── site-1-sno_machineconfig_predefined-extra-manifests-master.yaml
|
||||
└── site-1-sno_machineconfig_predefined-extra-manifests-worker.yaml
|
||||
----
|
||||
|
||||
. Generate and export the Day 2 configuration CRs using the reference `{policy-gen-cr}` CRs from the previous step. Run the following commands:
|
||||
|
||||
.. Create an output folder for the Day 2 CRs:
|
||||
.. Create an output folder for the configuration CRs by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ mkdir -p ./ref
|
||||
----
|
||||
|
||||
.. Generate and export the Day 2 configuration CRs:
|
||||
.. Generate the configuration CRs by running the following command:
|
||||
+
|
||||
[source,terminal,subs="attributes+"]
|
||||
----
|
||||
$ podman run -it --rm -v `pwd`/out/argocd/example/acmpolicygenerator:/resources:Z -v `pwd`/ref:/output:Z,U registry.redhat.io/openshift4/ztp-site-generate-rhel8:v{product-version} generator config -N . /output
|
||||
$ podman run -it --rm -v `pwd`/out/argocd/example/policygentemplates:/resources:Z -v `pwd`/ref:/output:Z,U registry.redhat.io/openshift4/ztp-site-generate-rhel8:v{product-version} generator config -N . /output
|
||||
----
|
||||
+
|
||||
The command generates example group and site-specific `{policy-gen-cr}` CRs for {sno}, three-node clusters, and standard clusters in the `./ref` folder.
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
ref
|
||||
└── customResource
|
||||
├── common
|
||||
├── example-multinode-site
|
||||
├── example-sno
|
||||
├── group-du-3node
|
||||
├── group-du-3node-validator
|
||||
│ └── Multiple-validatorCRs
|
||||
├── group-du-sno
|
||||
├── group-du-sno-validator
|
||||
├── group-du-standard
|
||||
└── group-du-standard-validator
|
||||
└── Multiple-validatorCRs
|
||||
----
|
||||
|
||||
. Use the generated CRs as the basis for the CRs that you use to install the cluster. You apply the installation CRs to the hub cluster as described in "Installing a single managed cluster". The configuration CRs can be applied to the cluster after cluster installation is complete.
|
||||
|
||||
.Verification
|
||||
|
||||
* Verify that the custom roles and labels are applied after the node is deployed:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc describe node example-node.example.com
|
||||
----
|
||||
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
Name: example-node.example.com
|
||||
Roles: control-plane,example-label,master,worker
|
||||
Labels: beta.kubernetes.io/arch=amd64
|
||||
beta.kubernetes.io/os=linux
|
||||
custom-label/parameter1=true
|
||||
kubernetes.io/arch=amd64
|
||||
kubernetes.io/hostname=cnfdf03.telco5gran.eng.rdu2.redhat.com
|
||||
kubernetes.io/os=linux
|
||||
node-role.kubernetes.io/control-plane=
|
||||
node-role.kubernetes.io/example-label= <1>
|
||||
node-role.kubernetes.io/master=
|
||||
node-role.kubernetes.io/worker=
|
||||
node.openshift.io/os_id=rhcos
|
||||
----
|
||||
<1> The custom label is applied to the node.
|
||||
The command generates example group and cluster-specific configuration CRs in the `./ref` folder. You can apply these CRs to the cluster after installation is complete.
|
||||
|
||||
@@ -110,17 +110,20 @@ ztp-group.example-group-ibu-rollback-stage-policy inform NonC
|
||||
----
|
||||
--
|
||||
|
||||
. Update the `du-profile` cluster label to the target platform version or the corresponding policy-binding label in the `SiteConfig` CR.
|
||||
. Update the `du-profile` cluster label to the target platform version or the corresponding policy-binding label in the `ClusterInstance` CR.
|
||||
+
|
||||
--
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
[...]
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-sno"
|
||||
namespace: "example-sno"
|
||||
spec:
|
||||
[...]
|
||||
clusterLabels:
|
||||
# ...
|
||||
extraLabels:
|
||||
ManagedCluster:
|
||||
du-profile: "4.15.0"
|
||||
----
|
||||
|
||||
@@ -130,7 +133,7 @@ Updating the labels to the target platform version unbinds the existing set of p
|
||||
====
|
||||
--
|
||||
|
||||
. Commit and push the updated `SiteConfig` CR to the Git repository.
|
||||
. Commit and push the updated `ClusterInstance` CR to the Git repository.
|
||||
|
||||
. When you are ready to move to the `Prep` stage, create the `ClusterGroupUpgrade` CR on the target hub cluster with the `Prep` and {oadp-short} `ConfigMap` policies:
|
||||
+
|
||||
|
||||
@@ -13,16 +13,19 @@ If you encounter an issue after upgrade, you can start a manual rollback.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Revert the `du-profile` or the corresponding policy-binding label to the original platform version in the `SiteConfig` CR:
|
||||
. Revert the `du-profile` or the corresponding policy-binding label to the original platform version in the `ClusterInstance` CR:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
[...]
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-sno"
|
||||
namespace: "example-sno"
|
||||
spec:
|
||||
[...]
|
||||
clusterLabels:
|
||||
# ...
|
||||
extraLabels:
|
||||
ManagedCluster:
|
||||
du-profile: "4.14.x"
|
||||
----
|
||||
|
||||
|
||||
@@ -61,17 +61,21 @@ $ butane storage.bu
|
||||
----
|
||||
--
|
||||
|
||||
. Copy the output into the `.spec.clusters.nodes.ignitionConfigOverride` field in the `SiteConfig` CR:
|
||||
. Copy the output into the `spec.nodes[].ignitionConfigOverride` field in the `ClusterInstance` CR:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
[...]
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-sno"
|
||||
namespace: "example-sno"
|
||||
spec:
|
||||
clusters:
|
||||
- nodes:
|
||||
- hostName: <name>
|
||||
# ...
|
||||
nodes:
|
||||
- hostName: "node1.example.com"
|
||||
role: "master"
|
||||
ignitionConfigOverride: '{"ignition":{"version":"3.2.0"},"storage":{"disks":[{"device":"/dev/disk/by-path/pci-0000:00:17.0-ata-1.0","partitions":[{"label":"var-lib-containers","sizeMiB":0,"startMiB":250000}],"wipeTable":false}],"filesystems":[{"device":"/dev/disk/by-partlabel/var-lib-containers","format":"xfs","mountOptions":["defaults","prjquota"],"path":"/var/lib/containers","wipeFilesystem":true}]},"systemd":{"units":[{"contents":"# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target","enabled":true,"name":"var-lib-containers.mount"}]}}'
|
||||
[...]
|
||||
----
|
||||
|
||||
.Verification
|
||||
|
||||
@@ -6,14 +6,14 @@
|
||||
[id="ztp-installation-crs_{context}"]
|
||||
= {rh-rhacm} generated cluster installation CRs reference
|
||||
|
||||
{rh-rhacm-first} supports deploying {product-title} on single-node clusters, three-node clusters, and standard clusters with a specific set of installation custom resources (CRs) that you generate using `SiteConfig` CRs for each site.
|
||||
{rh-rhacm-first} supports deploying {product-title} on single-node clusters, three-node clusters, and standard clusters with a specific set of installation custom resources (CRs) that you generate using `ClusterInstance` CRs for each cluster.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Every managed cluster has its own namespace, and all of the installation CRs except for `ManagedCluster` and `ClusterImageSet` are under that namespace. `ManagedCluster` and `ClusterImageSet` are cluster-scoped, not namespace-scoped. The namespace and the CR names match the cluster name.
|
||||
====
|
||||
|
||||
The following table lists the installation CRs that are automatically applied by the {rh-rhacm} assisted service when it installs clusters using the `SiteConfig` CRs that you configure.
|
||||
The following table lists the installation CRs that are automatically applied by the {rh-rhacm} assisted service when it installs clusters using the `ClusterInstance` CRs that you configure.
|
||||
|
||||
.Cluster installation CRs generated by {rh-rhacm}
|
||||
[cols="1,3,3", options="header"]
|
||||
|
||||
@@ -14,6 +14,8 @@ You can manually deploy a single managed cluster using the assisted service and
|
||||
|
||||
* You have logged in to the hub cluster as a user with `cluster-admin` privileges.
|
||||
|
||||
* You have extracted the reference and example CRs from the `ztp-site-generate` container and you configured the `ClusterInstance` CR.
|
||||
|
||||
* You have created the baseboard management controller (BMC) `Secret` and the image pull-secret `Secret` custom resources (CRs). See "Creating the managed bare-metal host secrets" for details.
|
||||
|
||||
* Your target bare-metal host meets the networking and hardware requirements for managed clusters.
|
||||
@@ -61,9 +63,11 @@ metadata:
|
||||
$ oc apply -f cluster-namespace.yaml
|
||||
----
|
||||
|
||||
. Apply the generated day-0 CRs that you extracted from the `ztp-site-generate` container and customized to meet your requirements:
|
||||
. Apply the `ClusterInstance` CR that you configured to the hub cluster by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc apply -R ./site-install/site-sno-1
|
||||
$ oc apply -f clusterinstance.yaml
|
||||
----
|
||||
+
|
||||
The SiteConfig Operator processes the `ClusterInstance` CR and automatically generates the required installation CRs, including `BareMetalHost`, `AgentClusterInstall`, `ClusterDeployment`, `InfraEnv`, and `NMStateConfig`. The assisted service then begins the cluster installation.
|
||||
@@ -133,24 +133,17 @@ $ podman run -v "${PWD}":/resources:Z,U -it registry.redhat.io/openshift4/ztp-si
|
||||
----
|
||||
Successfully read SiteConfig: sno1/sno1
|
||||
Converted cluster 1 (sno1) to ClusterInstance: /resources/output/sno1.yaml
|
||||
WARNING: extraManifests field is not supported in ClusterInstance and will be ignored. Create one or more configmaps with the exact desired set of CRs for the cluster and include them in the extraManifestsRefs.
|
||||
WARNING: Added default extraManifest ConfigMap 'extra-manifests-cm' to extraManifestsRefs. This configmap is created automatically.
|
||||
Successfully converted 1 cluster(s) to ClusterInstance files in /resources/output: sno1.yaml
|
||||
Generating ConfigMap kustomization files...
|
||||
Using ConfigMap name: extra-manifests-cm, namespace: sno1, manifests directory: extra-manifests
|
||||
Generating ConfigMap kustomization files with name: extra-manifests-cm, namespace: sno1, manifests directory: extra-manifests
|
||||
Generating extraManifests for SiteConfig: /resources/sno1.yaml
|
||||
Using absolute path for input file: /resources/sno1.yaml
|
||||
Running siteconfig-generator from directory: /resources
|
||||
Found extraManifests directory: /resources/output/extra-manifests/sno1
|
||||
Moved sno1_containerruntimeconfig_enable-crun-master.yaml to /resources/output/extra-manifests/sno1_containerruntimeconfig_enable-crun-master.yaml
|
||||
Moved sno1_containerruntimeconfig_enable-crun-worker.yaml to /resources/output/extra-manifests/sno1_containerruntimeconfig_enable-crun-worker.yaml
|
||||
Moved 2 extraManifest files from /resources/output/extra-manifests/sno1 to /resources/output/extra-manifests
|
||||
Removed directory: /resources/output/extra-manifests/sno1
|
||||
Successfully generated extra manifests in /resources/output/extra-manifests
|
||||
--- Kustomization.yaml Generator ---
|
||||
Scanning directory: /resources/output/extra-manifests
|
||||
Found and adding: extra-manifests/sno1_containerruntimeconfig_enable-crun-master.yaml
|
||||
Found and adding: extra-manifests/sno1_containerruntimeconfig_enable-crun-worker.yaml
|
||||
Found and adding: extra-manifests/enable-crun-master.yaml
|
||||
Found and adding: extra-manifests/enable-crun-worker.yaml
|
||||
------------------------------------
|
||||
kustomization-configMapGenerator-snippet.yaml generated successfully at: /resources/output/kustomization-configMapGenerator-snippet.yaml
|
||||
Content:
|
||||
@@ -158,8 +151,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
configMapGenerator:
|
||||
- files:
|
||||
- extra-manifests/sno1_containerruntimeconfig_enable-crun-master.yaml
|
||||
- extra-manifests/sno1_containerruntimeconfig_enable-crun-worker.yaml
|
||||
- extra-manifests/enable-crun-master.yaml
|
||||
- extra-manifests/enable-crun-worker.yaml
|
||||
name: extra-manifests-cm
|
||||
namespace: sno1
|
||||
generatorOptions:
|
||||
@@ -251,5 +244,5 @@ site-configs-v2/
|
||||
└── kustomization.yaml
|
||||
----
|
||||
<1> This `ClusterInstance` CR for the `sno1` cluster.
|
||||
<2> The tool automatically generates the extra manifests referenced by the `ClusterInstance` CR. After generation, the file names might change. You can rename the files to match the original naming convention in the associated `kustomization.yaml` file.
|
||||
<2> The tool automatically generates the extra manifests referenced by the `ClusterInstance` CR.
|
||||
<3> The tool generates a `kuztomization.yaml` file snippet to create the `ConfigMap` resources that specifies the extra manifests. You can merge the generated `kustomization` snippet with your original `kuztomization.yaml` file.
|
||||
@@ -6,7 +6,7 @@
|
||||
[id="ztp-monitoring-deployment-progress_{context}"]
|
||||
= Monitoring managed cluster installation progress
|
||||
|
||||
The ArgoCD pipeline uses the `SiteConfig` CR to generate the cluster configuration CRs and syncs it with the hub cluster. You can monitor the progress of the synchronization in the ArgoCD dashboard.
|
||||
The Argo CD pipeline syncs the `ClusterInstance` CR from the Git repository to the hub cluster. The SiteConfig Operator then processes the `ClusterInstance` CR and generates the required cluster configuration CRs. You can monitor the progress of the cluster installation from the {rh-rhacm} dashboard or from the command line.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ Consider the following best practices when customizing site configuration `{poli
|
||||
|
||||
* In disconnected environments, use a single catalog source for all Operators by configuring the registry as a single index containing all Operators. Each additional `CatalogSource` CR on the managed clusters increases CPU usage.
|
||||
|
||||
* `MachineConfig` CRs should be included as `extraManifests` in the `SiteConfig` CR so that they are applied during installation. This can reduce the overall time taken until the cluster is ready to deploy applications.
|
||||
* Reduce the overall time taken until the cluster is ready to deploy applications by including `MachineConfig` CRs as extra manifests in the installation. To do this, package `MachineConfig` CRs in a `ConfigMap` CR. Reference the `ConfigMap` CRs in the `extraManifestsRefs` field in the `ClusterInstance` CR.
|
||||
|
||||
* `{policy-gen-cr}` CRs should override the channel field to explicitly identify the desired version. This ensures that changes in the source CR during upgrades does not update the generated subscription.
|
||||
|
||||
|
||||
@@ -6,162 +6,167 @@
|
||||
[id="ztp-pre-caching-config-con_{context}"]
|
||||
= Pre-caching images in {ztp}
|
||||
|
||||
The `SiteConfig` manifest defines how an OpenShift cluster is to be installed and configured.
|
||||
In the {ztp-first} provisioning workflow, the {factory-prestaging-tool} requires the following additional fields in the `SiteConfig` manifest:
|
||||
The `ClusterInstance` manifest defines the installation and configuration parameters for an {product-title} cluster.
|
||||
In the {ztp-first} provisioning workflow, the {factory-prestaging-tool} uses the following fields in the `ClusterInstance` manifest to load the pre-cached images:
|
||||
|
||||
* `clusters.ignitionConfigOverride`
|
||||
* `nodes.installerArgs`
|
||||
* `nodes.ignitionConfigOverride`
|
||||
* `spec.ignitionConfigOverride`
|
||||
* `spec.nodes[].ignitionConfigOverride`
|
||||
* `spec.nodes[].installerArgs`
|
||||
|
||||
include::snippets/siteconfig-deprecation-notice.adoc[]
|
||||
|
||||
.Example SiteConfig with additional fields
|
||||
.Example ClusterInstance with pre-caching fields
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-5g-lab"
|
||||
namespace: "example-5g-lab"
|
||||
name: "sno-worker-0"
|
||||
namespace: "sno-worker-0"
|
||||
spec:
|
||||
baseDomain: "example.domain.redhat.com"
|
||||
pullSecretRef:
|
||||
name: "assisted-deployment-pull-secret"
|
||||
clusterImageSetNameRef: "img4.9.10-x86-64-appsub" <1>
|
||||
clusterImageSetNameRef: "openshift-4.21" <1>
|
||||
sshPublicKey: "ssh-rsa ..."
|
||||
clusters:
|
||||
- clusterName: "sno-worker-0"
|
||||
clusterImageSetNameRef: "eko4-img4.11.5-x86-64-appsub" <2>
|
||||
clusterLabels:
|
||||
clusterName: "sno-worker-0"
|
||||
extraLabels:
|
||||
ManagedCluster:
|
||||
group-du-sno: ""
|
||||
common-411: true
|
||||
sites : "example-5g-lab"
|
||||
common-411: "true"
|
||||
sites: "example-5g-lab"
|
||||
vendor: "OpenShift"
|
||||
clusterNetwork:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostPrefix: 23
|
||||
machineNetwork:
|
||||
- cidr: 10.19.32.192/26
|
||||
serviceNetwork:
|
||||
- 172.30.0.0/16
|
||||
networkType: "OVNKubernetes"
|
||||
additionalNTPSources:
|
||||
- clock.corp.redhat.com
|
||||
ignitionConfigOverride:
|
||||
'{
|
||||
"ignition": {
|
||||
"version": "3.1.0"
|
||||
},
|
||||
"systemd": {
|
||||
"units": [
|
||||
{
|
||||
"name": "var-mnt.mount",
|
||||
"enabled": true,
|
||||
"contents": "[Unit]\nDescription=Mount partition with artifacts\nBefore=precache-images.service\nBindsTo=precache-images.service\nStopWhenUnneeded=true\n\n[Mount]\nWhat=/dev/disk/by-partlabel/data\nWhere=/var/mnt\nType=xfs\nTimeoutSec=30\n\n[Install]\nRequiredBy=precache-images.service"
|
||||
clusterNetwork:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostPrefix: 23
|
||||
machineNetwork:
|
||||
- cidr: 10.19.32.192/26
|
||||
serviceNetwork:
|
||||
- cidr: 172.30.0.0/16
|
||||
networkType: "OVNKubernetes"
|
||||
additionalNTPSources:
|
||||
- 1111:2222:3333:4444::2
|
||||
templateRefs:
|
||||
- name: ai-cluster-templates-v1
|
||||
namespace: siteconfig-system
|
||||
ignitionConfigOverride: | <2>
|
||||
{
|
||||
"ignition": {
|
||||
"version": "3.2.0"
|
||||
},
|
||||
"systemd": {
|
||||
"units": [
|
||||
{
|
||||
"name": "var-mnt.mount",
|
||||
"enabled": true,
|
||||
"contents": "[Unit]\nDescription=Mount partition with artifacts\nBefore=precache-images.service\nBindsTo=precache-images.service\nStopWhenUnneeded=true\n\n[Mount]\nWhat=/dev/disk/by-partlabel/data\nWhere=/var/mnt\nType=xfs\nTimeoutSec=30\n\n[Install]\nRequiredBy=precache-images.service"
|
||||
},
|
||||
{
|
||||
"name": "precache-images.service",
|
||||
"enabled": true,
|
||||
"contents": "[Unit]\nDescription=Extracts the precached images in discovery stage\nAfter=var-mnt.mount\nBefore=agent.service\n\n[Service]\nType=oneshot\nUser=root\nWorkingDirectory=/var/mnt\nExecStart=bash /usr/local/bin/extract-ai.sh\n\n[Install]\nWantedBy=multi-user.target default.target\nWantedBy=agent.service"
|
||||
}
|
||||
]
|
||||
},
|
||||
"storage": {
|
||||
"files": [
|
||||
{
|
||||
"overwrite": true,
|
||||
"path": "/usr/local/bin/extract-ai.sh",
|
||||
"mode": 755,
|
||||
"user": {
|
||||
"name": "root"
|
||||
},
|
||||
{
|
||||
"name": "precache-images.service",
|
||||
"enabled": true,
|
||||
"contents": "[Unit]\nDescription=Extracts the precached images in discovery stage\nAfter=var-mnt.mount\nBefore=agent.service\n\n[Service]\nType=oneshot\nUser=root\nWorkingDirectory=/var/mnt\nExecStart=bash /usr/local/bin/extract-ai.sh\n#TimeoutStopSec=30\n\n[Install]\nWantedBy=multi-user.target default.target\nWantedBy=agent.service"
|
||||
"contents": {
|
||||
"source": "data:,%23%21%2Fbin%2Fbash%0A%0AFOLDER%3D%22%24%7BFOLDER%3A-%24%28pwd%29%7D%22%0AOCP_RELEASE_LIST%3D%22%24%7BOCP_RELEASE_LIST%3A-ai-images.txt%7D%22%0ABINARY_FOLDER%3D%2Fvar%2Fmnt%0A%0Apushd%20%24FOLDER%0A%0Atotal_copies%3D%24%28sort%20-u%20%24BINARY_FOLDER%2F%24OCP_RELEASE_LIST%20%7C%20wc%20-l%29%20%20%23%20Required%20to%20keep%20track%20of%20the%20pull%20task%20vs%20total%0Acurrent_copy%3D1%0A%0Awhile%20read%20-r%20line%3B%0Ado%0A%20%20uri%3D%24%28echo%20%22%24line%22%20%7C%20awk%20%27%7Bprint%241%7D%27%29%0A%20%20%23tar%3D%24%28echo%20%22%24line%22%20%7C%20awk%20%27%7Bprint%242%7D%27%29%0A%20%20podman%20image%20exists%20%24uri%0A%20%20if%20%5B%5B%20%24%3F%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Skipping%20existing%20image%20%24tar%22%0A%20%20%20%20%20%20echo%20%22Copying%20%24%7Buri%7D%20%5B%24%7Bcurrent_copy%7D%2F%24%7Btotal_copies%7D%5D%22%0A%20%20%20%20%20%20current_copy%3D%24%28%28current_copy%20%2B%201%29%29%0A%20%20%20%20%20%20continue%0A%20%20fi%0A%20%20tar%3D%24%28echo%20%22%24uri%22%20%7C%20%20rev%20%7C%20cut%20-d%20%22%2F%22%20-f1%20%7C%20rev%20%7C%20tr%20%22%3A%22%20%22_%22%29%0A%20%20tar%20zxvf%20%24%7Btar%7D.tgz%0A%20%20if%20%5B%20%24%3F%20-eq%200%20%5D%3B%20then%20rm%20-f%20%24%7Btar%7D.gz%3B%20fi%0A%20%20echo%20%22Copying%20%24%7Buri%7D%20%5B%24%7Bcurrent_copy%7D%2F%24%7Btotal_copies%7D%5D%22%0A%20%20skopeo%20copy%20dir%3A%2F%2F%24%28pwd%29%2F%24%7Btar%7D%20containers-storage%3A%24%7Buri%7D%0A%20%20if%20%5B%20%24%3F%20-eq%200%20%5D%3B%20then%20rm%20-rf%20%24%7Btar%7D%3B%20current_copy%3D%24%28%28current_copy%20%2B%201%29%29%3B%20fi%0Adone%20%3C%20%24%7BBINARY_FOLDER%7D%2F%24%7BOCP_RELEASE_LIST%7D%0A%0A%23%20workaround%20while%20https%3A%2F%2Fgithub.com%2Fopenshift%2Fassisted-service%2Fpull%2F3546%0A%23cp%20%2Fvar%2Fmnt%2Fmodified-rhcos-4.10.3-x86_64-metal.x86_64.raw.gz%20%2Fvar%2Ftmp%2F.%0A%0Aexit%200"
|
||||
}
|
||||
]
|
||||
},
|
||||
"storage": {
|
||||
"files": [
|
||||
{
|
||||
"overwrite": true,
|
||||
"path": "/usr/local/bin/extract-ai.sh",
|
||||
"mode": 755,
|
||||
"user": {
|
||||
"name": "root"
|
||||
},
|
||||
"contents": {
|
||||
"source": "data:,%23%21%2Fbin%2Fbash%0A%0AFOLDER%3D%22%24%7BFOLDER%3A-%24%28pwd%29%7D%22%0AOCP_RELEASE_LIST%3D%22%24%7BOCP_RELEASE_LIST%3A-ai-images.txt%7D%22%0ABINARY_FOLDER%3D%2Fvar%2Fmnt%0A%0Apushd%20%24FOLDER%0A%0Atotal_copies%3D%24%28sort%20-u%20%24BINARY_FOLDER%2F%24OCP_RELEASE_LIST%20%7C%20wc%20-l%29%20%20%23%20Required%20to%20keep%20track%20of%20the%20pull%20task%20vs%20total%0Acurrent_copy%3D1%0A%0Awhile%20read%20-r%20line%3B%0Ado%0A%20%20uri%3D%24%28echo%20%22%24line%22%20%7C%20awk%20%27%7Bprint%241%7D%27%29%0A%20%20%23tar%3D%24%28echo%20%22%24line%22%20%7C%20awk%20%27%7Bprint%242%7D%27%29%0A%20%20podman%20image%20exists%20%24uri%0A%20%20if%20%5B%5B%20%24%3F%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Skipping%20existing%20image%20%24tar%22%0A%20%20%20%20%20%20echo%20%22Copying%20%24%7Buri%7D%20%5B%24%7Bcurrent_copy%7D%2F%24%7Btotal_copies%7D%5D%22%0A%20%20%20%20%20%20current_copy%3D%24%28%28current_copy%20%2B%201%29%29%0A%20%20%20%20%20%20continue%0A%20%20fi%0A%20%20tar%3D%24%28echo%20%22%24uri%22%20%7C%20%20rev%20%7C%20cut%20-d%20%22%2F%22%20-f1%20%7C%20rev%20%7C%20tr%20%22%3A%22%20%22_%22%29%0A%20%20tar%20zxvf%20%24%7Btar%7D.tgz%0A%20%20if%20%5B%20%24%3F%20-eq%200%20%5D%3B%20then%20rm%20-f%20%24%7Btar%7D.gz%3B%20fi%0A%20%20echo%20%22Copying%20%24%7Buri%7D%20%5B%24%7Bcurrent_copy%7D%2F%24%7Btotal_copies%7D%5D%22%0A%20%20skopeo%20copy%20dir%3A%2F%2F%24%28pwd%29%2F%24%7Btar%7D%20containers-storage%3A%24%7Buri%7D%0A%20%20if%20%5B%20%24%3F%20-eq%200%20%5D%3B%20then%20rm%20-rf%20%24%7Btar%7D%3B%20current_copy%3D%24%28%28current_copy%20%2B%201%29%29%3B%20fi%0Adone%20%3C%20%24%7BBINARY_FOLDER%7D%2F%24%7BOCP_RELEASE_LIST%7D%0A%0A%23%20workaround%20while%20https%3A%2F%2Fgithub.com%2Fopenshift%2Fassisted-service%2Fpull%2F3546%0A%23cp%20%2Fvar%2Fmnt%2Fmodified-rhcos-4.10.3-x86_64-metal.x86_64.raw.gz%20%2Fvar%2Ftmp%2F.%0A%0Aexit%200"
|
||||
}
|
||||
},
|
||||
{
|
||||
"overwrite": true,
|
||||
"path": "/usr/local/bin/agent-fix-bz1964591",
|
||||
"mode": 755,
|
||||
"user": {
|
||||
"name": "root"
|
||||
},
|
||||
{
|
||||
"overwrite": true,
|
||||
"path": "/usr/local/bin/agent-fix-bz1964591",
|
||||
"mode": 755,
|
||||
"user": {
|
||||
"name": "root"
|
||||
},
|
||||
"contents": {
|
||||
"source": "data:,%23%21%2Fusr%2Fbin%2Fsh%0A%0A%23%20This%20script%20is%20a%20workaround%20for%20bugzilla%201964591%20where%20symlinks%20inside%20%2Fvar%2Flib%2Fcontainers%2F%20get%0A%23%20corrupted%20under%20some%20circumstances.%0A%23%0A%23%20In%20order%20to%20let%20agent.service%20start%20correctly%20we%20are%20checking%20here%20whether%20the%20requested%0A%23%20container%20image%20exists%20and%20in%20case%20%22podman%20images%22%20returns%20an%20error%20we%20try%20removing%20the%20faulty%0A%23%20image.%0A%23%0A%23%20In%20such%20a%20scenario%20agent.service%20will%20detect%20the%20image%20is%20not%20present%20and%20pull%20it%20again.%20In%20case%0A%23%20the%20image%20is%20present%20and%20can%20be%20detected%20correctly%2C%20no%20any%20action%20is%20required.%0A%0AIMAGE%3D%24%28echo%20%241%20%7C%20sed%20%27s%2F%3A.%2A%2F%2F%27%29%0Apodman%20image%20exists%20%24IMAGE%20%7C%7C%20echo%20%22already%20loaded%22%20%7C%7C%20echo%20%22need%20to%20be%20pulled%22%0A%23podman%20images%20%7C%20grep%20%24IMAGE%20%7C%7C%20podman%20rmi%20--force%20%241%20%7C%7C%20true"
|
||||
}
|
||||
"contents": {
|
||||
"source": "data:,%23%21%2Fusr%2Fbin%2Fsh%0A%0A%23%20This%20script%20is%20a%20workaround%20for%20bugzilla%201964591%20where%20symlinks%20inside%20%2Fvar%2Flib%2Fcontainers%2F%20get%0A%23%20corrupted%20under%20some%20circumstances.%0A%23%0A%23%20In%20order%20to%20let%20agent.service%20start%20correctly%20we%20are%20checking%20here%20whether%20the%20requested%0A%23%20container%20image%20exists%20and%20in%20case%20%22podman%20images%22%20returns%20an%20error%20we%20try%20removing%20the%20faulty%0A%23%20image.%0A%23%0A%23%20In%20such%20a%20scenario%20agent.service%20will%20detect%20the%20image%20is%20not%20present%20and%20pull%20it%20again.%20In%20case%0A%23%20the%20image%20is%20present%20and%20can%20be%20detected%20correctly%2C%20no%20any%20action%20is%20required.%0A%0AIMAGE%3D%24%28echo%20%241%20%7C%20sed%20%27s%2F%3A.%2A%2F%2F%27%29%0Apodman%20image%20exists%20%24IMAGE%20%7C%7C%20echo%20%22already%20loaded%22%20%7C%7C%20echo%20%22need%20to%20be%20pulled%22%0A%23podman%20images%20%7C%20grep%20%24IMAGE%20%7C%7C%20podman%20rmi%20--force%20%241%20%7C%7C%20true"
|
||||
}
|
||||
]
|
||||
}
|
||||
}'
|
||||
nodes:
|
||||
- hostName: "snonode.sno-worker-0.example.domain.redhat.com"
|
||||
role: "master"
|
||||
bmcAddress: "idrac-virtualmedia+https://10.19.28.53/redfish/v1/Systems/System.Embedded.1"
|
||||
bmcCredentialsName:
|
||||
name: "worker0-bmh-secret"
|
||||
bootMACAddress: "e4:43:4b:bd:90:46"
|
||||
bootMode: "UEFI"
|
||||
rootDeviceHints:
|
||||
deviceName: /dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0
|
||||
installerArgs: '["--save-partlabel", "data"]'
|
||||
ignitionConfigOverride: |
|
||||
{
|
||||
"ignition": {
|
||||
"version": "3.1.0"
|
||||
},
|
||||
"systemd": {
|
||||
"units": [
|
||||
{
|
||||
"name": "var-mnt.mount",
|
||||
"enabled": true,
|
||||
"contents": "[Unit]\nDescription=Mount partition with artifacts\nBefore=precache-ocp-images.service\nBindsTo=precache-ocp-images.service\nStopWhenUnneeded=true\n\n[Mount]\nWhat=/dev/disk/by-partlabel/data\nWhere=/var/mnt\nType=xfs\nTimeoutSec=30\n\n[Install]\nRequiredBy=precache-ocp-images.service"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
nodes:
|
||||
- hostName: "snonode.sno-worker-0.example.domain.redhat.com"
|
||||
role: "master"
|
||||
bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1"
|
||||
bmcCredentialsName:
|
||||
name: "worker0-bmh-secret"
|
||||
bootMACAddress: "AA:BB:CC:DD:EE:11"
|
||||
bootMode: "UEFI"
|
||||
rootDeviceHints:
|
||||
deviceName: /dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0
|
||||
installerArgs: '["--save-partlabel", "data"]' <3>
|
||||
ignitionConfigOverride: | <4>
|
||||
{
|
||||
"ignition": {
|
||||
"version": "3.2.0"
|
||||
},
|
||||
"systemd": {
|
||||
"units": [
|
||||
{
|
||||
"name": "var-mnt.mount",
|
||||
"enabled": true,
|
||||
"contents": "[Unit]\nDescription=Mount partition with artifacts\nBefore=precache-ocp-images.service\nBindsTo=precache-ocp-images.service\nStopWhenUnneeded=true\n\n[Mount]\nWhat=/dev/disk/by-partlabel/data\nWhere=/var/mnt\nType=xfs\nTimeoutSec=30\n\n[Install]\nRequiredBy=precache-ocp-images.service"
|
||||
},
|
||||
{
|
||||
"name": "precache-ocp-images.service",
|
||||
"enabled": true,
|
||||
"contents": "[Unit]\nDescription=Extracts the precached OCP images into containers storage\nAfter=var-mnt.mount\nBefore=machine-config-daemon-pull.service nodeip-configuration.service\n\n[Service]\nType=oneshot\nUser=root\nWorkingDirectory=/var/mnt\nExecStart=bash /usr/local/bin/extract-ocp.sh\nTimeoutStopSec=60\n\n[Install]\nWantedBy=multi-user.target"
|
||||
}
|
||||
]
|
||||
},
|
||||
"storage": {
|
||||
"files": [
|
||||
{
|
||||
"overwrite": true,
|
||||
"path": "/usr/local/bin/extract-ocp.sh",
|
||||
"mode": 755,
|
||||
"user": {
|
||||
"name": "root"
|
||||
},
|
||||
{
|
||||
"name": "precache-ocp-images.service",
|
||||
"enabled": true,
|
||||
"contents": "[Unit]\nDescription=Extracts the precached OCP images into containers storage\nAfter=var-mnt.mount\nBefore=machine-config-daemon-pull.service nodeip-configuration.service\n\n[Service]\nType=oneshot\nUser=root\nWorkingDirectory=/var/mnt\nExecStart=bash /usr/local/bin/extract-ocp.sh\nTimeoutStopSec=60\n\n[Install]\nWantedBy=multi-user.target"
|
||||
"contents": {
|
||||
"source": "data:,%23%21%2Fbin%2Fbash%0A%0AFOLDER%3D%22%24%7BFOLDER%3A-%24%28pwd%29%7D%22%0AOCP_RELEASE_LIST%3D%22%24%7BOCP_RELEASE_LIST%3A-ocp-images.txt%7D%22%0ABINARY_FOLDER%3D%2Fvar%2Fmnt%0A%0Apushd%20%24FOLDER%0A%0Atotal_copies%3D%24%28sort%20-u%20%24BINARY_FOLDER%2F%24OCP_RELEASE_LIST%20%7C%20wc%20-l%29%20%20%23%20Required%20to%20keep%20track%20of%20the%20pull%20task%20vs%20total%0Acurrent_copy%3D1%0A%0Awhile%20read%20-r%20line%3B%0Ado%0A%20%20uri%3D%24%28echo%20%22%24line%22%20%7C%20awk%20%27%7Bprint%241%7D%27%29%0A%20%20%23tar%3D%24%28echo%20%22%24line%22%20%7C%20awk%20%27%7Bprint%242%7D%27%29%0A%20%20podman%20image%20exists%20%24uri%0A%20%20if%20%5B%5B%20%24%3F%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Skipping%20existing%20image%20%24tar%22%0A%20%20%20%20%20%20echo%20%22Copying%20%24%7Buri%7D%20%5B%24%7Bcurrent_copy%7D%2F%24%7Btotal_copies%7D%5D%22%0A%20%20%20%20%20%20current_copy%3D%24%28%28current_copy%20%2B%201%29%29%0A%20%20%20%20%20%20continue%0A%20%20fi%0A%20%20tar%3D%24%28echo%20%22%24uri%22%20%7C%20%20rev%20%7C%20cut%20-d%20%22%2F%22%20-f1%20%7C%20rev%20%7C%20tr%20%22%3A%22%20%22_%22%29%0A%20%20tar%20zxvf%20%24%7Btar%7D.tgz%0A%20%20if%20%5B%20%24%3F%20-eq%200%20%5D%3B%20then%20rm%20-f%20%24%7Btar%7D.gz%3B%20fi%0A%20%20echo%20%22Copying%20%24%7Buri%7D%20%5B%24%7Bcurrent_copy%7D%2F%24%7Btotal_copies%7D%5D%22%0A%20%20skopeo%20copy%20dir%3A%2F%2F%24%28pwd%29%2F%24%7Btar%7D%20containers-storage%3A%24%7Buri%7D%0A%20%20if%20%5B%20%24%3F%20-eq%200%20%5D%3B%20then%20rm%20-rf%20%24%7Btar%7D%3B%20current_copy%3D%24%28%28current_copy%20%2B%201%29%29%3B%20fi%0Adone%20%3C%20%24%7BBINARY_FOLDER%7D%2F%24%7BOCP_RELEASE_LIST%7D%0A%0Aexit%200"
|
||||
}
|
||||
]
|
||||
},
|
||||
"storage": {
|
||||
"files": [
|
||||
{
|
||||
"overwrite": true,
|
||||
"path": "/usr/local/bin/extract-ocp.sh",
|
||||
"mode": 755,
|
||||
"user": {
|
||||
"name": "root"
|
||||
},
|
||||
"contents": {
|
||||
"source": "data:,%23%21%2Fbin%2Fbash%0A%0AFOLDER%3D%22%24%7BFOLDER%3A-%24%28pwd%29%7D%22%0AOCP_RELEASE_LIST%3D%22%24%7BOCP_RELEASE_LIST%3A-ocp-images.txt%7D%22%0ABINARY_FOLDER%3D%2Fvar%2Fmnt%0A%0Apushd%20%24FOLDER%0A%0Atotal_copies%3D%24%28sort%20-u%20%24BINARY_FOLDER%2F%24OCP_RELEASE_LIST%20%7C%20wc%20-l%29%20%20%23%20Required%20to%20keep%20track%20of%20the%20pull%20task%20vs%20total%0Acurrent_copy%3D1%0A%0Awhile%20read%20-r%20line%3B%0Ado%0A%20%20uri%3D%24%28echo%20%22%24line%22%20%7C%20awk%20%27%7Bprint%241%7D%27%29%0A%20%20%23tar%3D%24%28echo%20%22%24line%22%20%7C%20awk%20%27%7Bprint%242%7D%27%29%0A%20%20podman%20image%20exists%20%24uri%0A%20%20if%20%5B%5B%20%24%3F%20-eq%200%20%5D%5D%3B%20then%0A%20%20%20%20%20%20echo%20%22Skipping%20existing%20image%20%24tar%22%0A%20%20%20%20%20%20echo%20%22Copying%20%24%7Buri%7D%20%5B%24%7Bcurrent_copy%7D%2F%24%7Btotal_copies%7D%5D%22%0A%20%20%20%20%20%20current_copy%3D%24%28%28current_copy%20%2B%201%29%29%0A%20%20%20%20%20%20continue%0A%20%20fi%0A%20%20tar%3D%24%28echo%20%22%24uri%22%20%7C%20%20rev%20%7C%20cut%20-d%20%22%2F%22%20-f1%20%7C%20rev%20%7C%20tr%20%22%3A%22%20%22_%22%29%0A%20%20tar%20zxvf%20%24%7Btar%7D.tgz%0A%20%20if%20%5B%20%24%3F%20-eq%200%20%5D%3B%20then%20rm%20-f%20%24%7Btar%7D.gz%3B%20fi%0A%20%20echo%20%22Copying%20%24%7Buri%7D%20%5B%24%7Bcurrent_copy%7D%2F%24%7Btotal_copies%7D%5D%22%0A%20%20skopeo%20copy%20dir%3A%2F%2F%24%28pwd%29%2F%24%7Btar%7D%20containers-storage%3A%24%7Buri%7D%0A%20%20if%20%5B%20%24%3F%20-eq%200%20%5D%3B%20then%20rm%20-rf%20%24%7Btar%7D%3B%20current_copy%3D%24%28%28current_copy%20%2B%201%29%29%3B%20fi%0Adone%20%3C%20%24%7BBINARY_FOLDER%7D%2F%24%7BOCP_RELEASE_LIST%7D%0A%0Aexit%200"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
nodeNetwork:
|
||||
config:
|
||||
interfaces:
|
||||
- name: ens1f0
|
||||
type: ethernet
|
||||
state: up
|
||||
macAddress: "AA:BB:CC:11:22:33"
|
||||
ipv4:
|
||||
enabled: true
|
||||
dhcp: true
|
||||
ipv6:
|
||||
enabled: false
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
nodeNetwork:
|
||||
interfaces:
|
||||
- name: "ens1f0"
|
||||
macAddress: "AA:BB:CC:11:22:33"
|
||||
config:
|
||||
interfaces:
|
||||
- name: "ens1f0"
|
||||
- name: ens1f0
|
||||
type: ethernet
|
||||
state: up
|
||||
macAddress: "AA:BB:CC:11:22:33"
|
||||
ipv4:
|
||||
enabled: true
|
||||
dhcp: true
|
||||
ipv6:
|
||||
enabled: false
|
||||
templateRefs:
|
||||
- name: ai-node-templates-v1
|
||||
namespace: siteconfig-system
|
||||
----
|
||||
<1> Specifies the cluster image set used for deployment, unless you specify a different image set in the `spec.clusters.clusterImageSetNameRef` field.
|
||||
<2> Specifies the cluster image set used to deploy an individual cluster. If defined, it overrides the `spec.clusterImageSetNameRef` at the site level.
|
||||
<1> Specifies the cluster image set used for deployment.
|
||||
<2> Configures the cluster-level ignition config override for the discovery stage.
|
||||
<3> Specifies the installation program arguments to preserve the data partition.
|
||||
<4> Configures the node-level ignition config override for the installation stage.
|
||||
|
||||
[id="ztp-pre-caching-config-clusters-ignitionconfigoverride_{context}"]
|
||||
== Understanding the clusters.ignitionConfigOverride field
|
||||
[id="ztp-pre-caching-config-spec-ignitionconfigoverride_{context}"]
|
||||
== Understanding the spec.ignitionConfigOverride field
|
||||
|
||||
The `clusters.ignitionConfigOverride` field adds a configuration in Ignition format during the {ztp} discovery stage.
|
||||
The `spec.ignitionConfigOverride` field adds a configuration in Ignition format during the {ztp} discovery stage.
|
||||
The configuration includes `systemd` services in the ISO mounted in virtual media. This way, the scripts are part of the discovery {op-system} live ISO and they can be used to load the Assisted Installer (AI) images.
|
||||
|
||||
`systemd` services:: The `systemd` services are `var-mnt.mount` and `precache-images.services`. The `precache-images.service` depends on the disk partition to be mounted in `/var/mnt` by the `var-mnt.mount` unit.
|
||||
@@ -171,10 +176,10 @@ When the script finishes successfully, you can use the images locally.
|
||||
`agent-fix-bz1964591`:: The `agent-fix-bz1964591` script is a workaround for an AI issue.
|
||||
To prevent AI from removing the images, which can force the `agent.service` to pull the images again from the registry, the `agent-fix-bz1964591` script checks if the requested container images exist.
|
||||
|
||||
[id="ztp-pre-caching-config-nodes-installerargs_{context}"]
|
||||
== Understanding the nodes.installerArgs field
|
||||
[id="ztp-pre-caching-config-spec-nodes-installerargs_{context}"]
|
||||
== Understanding the spec.nodes[].installerArgs field
|
||||
|
||||
The `nodes.installerArgs` field allows you to configure how the `coreos-installer` utility writes the {op-system} live ISO to disk. You need to indicate to save the disk partition labeled as `data` because the artifacts saved in the `data` partition are needed during the {product-title} installation stage.
|
||||
The `spec.nodes[].installerArgs` field allows you to configure how the `coreos-installer` utility writes the {op-system} live ISO to disk. You need to indicate to save the disk partition labeled as `data` because the artifacts saved in the `data` partition are needed during the {product-title} installation stage.
|
||||
|
||||
The extra parameters are passed directly to the `coreos-installer` utility that writes the live {op-system} to disk.
|
||||
On the next reboot, the operating system starts from the disk.
|
||||
@@ -207,10 +212,10 @@ OPTIONS:
|
||||
Allow Ignition URL without HTTPS or hash
|
||||
----
|
||||
|
||||
[id="ztp-pre-caching-config-nodes-ignitionconfigoverride_{context}"]
|
||||
== Understanding the nodes.ignitionConfigOverride field
|
||||
[id="ztp-pre-caching-config-spec-nodes-ignitionconfigoverride_{context}"]
|
||||
== Understanding the spec.nodes[].ignitionConfigOverride field
|
||||
|
||||
Similarly to `clusters.ignitionConfigOverride`, the `nodes.ignitionConfigOverride` field allows the addition of configurations in Ignition format to the `coreos-installer` utility, but at the {product-title} installation stage.
|
||||
Similarly to `spec.ignitionConfigOverride`, the `spec.nodes[].ignitionConfigOverride` field allows the addition of configurations in Ignition format to the `coreos-installer` utility, but at the {product-title} installation stage.
|
||||
When the {op-system} is written to disk, the extra configuration included in the {ztp} discovery ISO is no longer available. During the discovery stage, the extra configuration is stored in the memory of the live OS.
|
||||
|
||||
[NOTE]
|
||||
@@ -230,4 +235,4 @@ To extract all the images before the {product-title} installation, you must exec
|
||||
|
||||
`extract-ocp.sh`:: The `extract-ocp.sh` script extracts and loads the required images from the disk partition to the local container storage.
|
||||
|
||||
When you commit the `SiteConfig` and optional `PolicyGenerator` or `PolicyGenTemplate` custom resources (CRs) to the Git repo that Argo CD is monitoring, you can start the {ztp} workflow by syncing the CRs with the hub cluster.
|
||||
When you commit the `ClusterInstance` and optional `PolicyGenerator` or `PolicyGenTemplate` custom resources (CRs) to the Git repo that Argo CD is monitoring, you can start the {ztp} workflow by syncing the CRs with the hub cluster.
|
||||
|
||||
@@ -26,10 +26,10 @@ $ podman run --log-driver=none --rm registry.redhat.io/openshift4/ztp-site-gener
|
||||
+
|
||||
The `/update` directory contains the following subdirectories:
|
||||
+
|
||||
* `update/extra-manifest`: contains the source CR files that the `SiteConfig` CR uses to generate the extra manifest `configMap`.
|
||||
* `update/extra-manifest`: contains the source CR files that you package into a `ConfigMap` and reference in the `ClusterInstance` CR using the `extraManifestsRefs` field.
|
||||
* `update/source-crs`: contains the source CR files that the `PolicyGenerator` or `PolicyGentemplate` CR uses to generate the {rh-rhacm-first} policies.
|
||||
* `update/argocd/deployment`: contains patches and YAML files to apply on the hub cluster for use in the next step of this procedure.
|
||||
* `update/argocd/example`: contains example `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` files that represent the recommended configuration.
|
||||
* `update/argocd/example`: contains example `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` files that represent the recommended configuration.
|
||||
|
||||
. Update the `clusters-app.yaml` and `policies-app.yaml` files to reflect the name of your applications and the URL, branch, and path for your Git repository.
|
||||
+
|
||||
|
||||
@@ -10,7 +10,7 @@ You can configure the hub cluster with a set of ArgoCD applications that generat
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
{rh-rhacm-first} uses `SiteConfig` CRs to generate the Day 1 managed cluster installation CRs for ArgoCD. Each ArgoCD application can manage a maximum of 300 `SiteConfig` CRs.
|
||||
{rh-rhacm-first} uses `ClusterInstance` CRs to generate the Day 1 managed cluster installation CRs for ArgoCD. Each ArgoCD application can manage a maximum of 1000 `ClusterInstance` CRs.
|
||||
====
|
||||
|
||||
.Prerequisites
|
||||
@@ -37,7 +37,7 @@ You can configure the hub cluster with a set of ArgoCD applications that generat
|
||||
|
||||
*** The `targetRevision` indicates which Git repository branch to monitor.
|
||||
|
||||
*** `path` specifies the path to the `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` CRs, respectively.
|
||||
*** `path` specifies the path to the `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` CRs, respectively.
|
||||
|
||||
[start=2]
|
||||
include::snippets/ztp-patch-argocd-hub-cluster.adoc[]
|
||||
|
||||
@@ -22,7 +22,7 @@ The following procedure assumes you are using `PolicyGenerator` resources instea
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create a directory structure with separate paths for the `SiteConfig` and `PolicyGenerator` CRs.
|
||||
. Create a directory structure with separate paths for the `ClusterInstance` and `PolicyGenerator` CRs.
|
||||
|
||||
. Within the `PolicyGenerator` directory, create a directory for each {product-title} version you want to make available.
|
||||
For each version, create the following resources:
|
||||
@@ -31,7 +31,7 @@ For each version, create the following resources:
|
||||
+
|
||||
If you want to work with user-provided CRs, you must create a separate directory for them.
|
||||
|
||||
. In the `/siteconfig` directory, create a subdirectory for each {product-title} version you want to make available. For each version, create at least one directory for reference CRs to be copied from the container. There is no restriction on the naming of directories or on the number of reference directories. If you want to work with custom manifests, you must create a separate directory for them.
|
||||
. In the `/clusterinstance` directory, create a subdirectory for each {product-title} version you want to make available. For each version, create at least one directory for reference CRs to be copied from the container. There is no restriction on the naming of directories or on the number of reference directories. If you want to work with custom manifests, you must create a separate directory for them.
|
||||
+
|
||||
The following example describes a structure using user-provided manifests and CRs for different versions of {product-title}:
|
||||
+
|
||||
@@ -59,7 +59,7 @@ The following example describes a structure using user-provided manifests and CR
|
||||
│ └── source-crs/ <4>
|
||||
│ └── reference-crs/ <5>
|
||||
│ └── custom-crs/ <6>
|
||||
└── siteconfig
|
||||
└── clusterinstance
|
||||
├── kustomization.yaml
|
||||
├── version_4.13
|
||||
│ ├── helix56-v413.yaml
|
||||
@@ -69,8 +69,8 @@ The following example describes a structure using user-provided manifests and CR
|
||||
└── version_4.14
|
||||
├── helix57-v414.yaml
|
||||
├── kustomization.yaml
|
||||
├── extra-manifest/ <7>
|
||||
└── custom-manifest/ <8>
|
||||
├── extra-manifest/
|
||||
└── custom-manifest/
|
||||
|
||||
----
|
||||
<1> Create a top-level `kustomization` YAML file.
|
||||
@@ -79,32 +79,56 @@ The following example describes a structure using user-provided manifests and CR
|
||||
<4> Create a `source-crs` directory for each version to contain reference CRs from the `ztp-site-generate` container.
|
||||
<5> Create the `reference-crs` directory for policy CRs that are extracted from the ZTP container.
|
||||
<6> Optional: Create a `custom-crs` directory for user-provided CRs.
|
||||
<7> Create a directory within the custom `/siteconfig` directory to contain extra manifests from the `ztp-site-generate` container.
|
||||
<7> Create a directory within the custom `/clusterinstance` directory to contain extra manifests from the `ztp-site-generate` container.
|
||||
<8> Create a folder to hold user-provided manifests.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
In the previous example, each version subdirectory in the custom `/siteconfig` directory contains two further subdirectories, one containing the reference manifests copied from the container, the other for custom manifests that you provide.
|
||||
In the example directory structure, each version subdirectory in the custom `/clusterinstance` directory contains two further subdirectories, one containing the reference manifests copied from the container, the other for custom manifests that you provide.
|
||||
The names assigned to those directories are examples.
|
||||
If you use user-provided CRs, the last directory listed under `extraManifests.searchPaths` in the `SiteConfig` CR must be the directory containing user-provided CRs.
|
||||
====
|
||||
|
||||
. Edit the `SiteConfig` CR to include the search paths of any directories you have created.
|
||||
The first directory that is listed under `extraManifests.searchPaths` must be the directory containing the reference manifests.
|
||||
Consider the order in which the directories are listed.
|
||||
In cases where directories contain files with the same name, the file in the final directory takes precedence.
|
||||
. Create ConfigMaps from the manifest directories and reference them in the `ClusterInstance` CR using the `extraManifestsRefs` field.
|
||||
+
|
||||
.Example SiteConfig CR
|
||||
.Example kustomization.yaml with configMapGenerator
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
extraManifests:
|
||||
searchPaths:
|
||||
- extra-manifest/ <1>
|
||||
- custom-manifest/ <2>
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
configMapGenerator:
|
||||
- name: extra-manifests-cm
|
||||
namespace: helix56-v413
|
||||
files:
|
||||
- extra-manifest/workload-partitioning.yaml <1>
|
||||
- extra-manifest/enable-crun-master.yaml
|
||||
- custom-manifest/custom-config.yaml <2>
|
||||
# ...
|
||||
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
----
|
||||
<1> The directory containing the reference manifests must be listed first under `extraManifests.searchPaths`.
|
||||
<2> If you are using user-provided CRs, the last directory listed under `extraManifests.searchPaths` in the `SiteConfig` CR must be the directory containing those user-provided CRs.
|
||||
<1> Extra manifest files from the `ztp-site-generate` container.
|
||||
<2> User-provided custom manifest files.
|
||||
|
||||
. Edit the `ClusterInstance` CR to reference the `ConfigMap` CR:
|
||||
+
|
||||
.Example ClusterInstance CR
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: helix56-v413
|
||||
namespace: helix56-v413
|
||||
spec:
|
||||
# ...
|
||||
extraManifestsRefs:
|
||||
- name: extra-manifests-cm <1>
|
||||
----
|
||||
<1> Reference the ConfigMap containing the extra manifests.
|
||||
|
||||
. Edit the top-level `kustomization.yaml` file to control which {product-title} versions are active. The following is an example of a `kustomization.yaml` file at the top level:
|
||||
+
|
||||
|
||||
@@ -16,12 +16,12 @@ Before you can use the {ztp-first} pipeline, you need to prepare the Git reposit
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create a directory structure with separate paths for the `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` CRs.
|
||||
. Create a directory structure with separate paths for the `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` CRs.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
Keep `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` CRs in separate directories.
|
||||
Both the `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` directories must contain a `kustomization.yaml` file that explicitly includes the files in that directory.
|
||||
Keep `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` CRs in separate directories.
|
||||
Both the `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` directories must contain a `kustomization.yaml` file that explicitly includes the files in that directory.
|
||||
====
|
||||
|
||||
. Export the `argocd` directory from the `ztp-site-generate` container image using the following commands:
|
||||
@@ -43,15 +43,15 @@ $ podman run --log-driver=none --rm registry.redhat.io/openshift4/ztp-site-gener
|
||||
|
||||
. Check that the `out` directory contains the following subdirectories:
|
||||
+
|
||||
* `out/extra-manifest` contains the source CR files that `SiteConfig` uses to generate extra manifest `configMap`.
|
||||
* `out/extra-manifest` contains the source CR files that you use to create extra manifest `ConfigMap` resources through the `configMapGenerator` in the `kustomization.yaml` file. The `ClusterInstance` CR references these `ConfigMap` resources using the `extraManifestsRefs` field.
|
||||
* `out/source-crs` contains the source CR files that `PolicyGenerator` uses to generate the {rh-rhacm-first} policies.
|
||||
* `out/argocd/deployment` contains patches and YAML files to apply on the hub cluster for use in the next step of this procedure.
|
||||
* `out/argocd/example` contains the examples for `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` files that represent the recommended configuration.
|
||||
* `out/argocd/example/clusterinstance` contains the examples for `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` files that represent the recommended configuration.
|
||||
|
||||
. Copy the `out/source-crs` folder and contents to the `PolicyGenerator` or `PolicyGentemplate` directory.
|
||||
|
||||
. The out/extra-manifests directory contains the reference manifests for a RAN DU cluster.
|
||||
Copy the `out/extra-manifests` directory into the `SiteConfig` folder.
|
||||
Copy the `out/extra-manifests` directory into the `ClusterInstance` folder.
|
||||
This directory should contain CRs from the `ztp-site-generate` container only.
|
||||
Do not add user-provided CRs here.
|
||||
If you want to work with user-provided CRs you must create another directory for that content.
|
||||
@@ -66,7 +66,7 @@ example/
|
||||
├── policygentemplates <1>
|
||||
│ ├── kustomization.yaml
|
||||
│ └── source-crs/
|
||||
└── siteconfig
|
||||
└── clusterinstance
|
||||
├── extra-manifests
|
||||
└── kustomization.yaml
|
||||
----
|
||||
@@ -77,13 +77,13 @@ Equivalent and improved functionality is available by using {rh-rhacm-first} and
|
||||
The initial push to Git should include the `kustomization.yaml` files.
|
||||
|
||||
You can use the directory structure under `out/argocd/example` as a reference for the structure and content of your Git repository.
|
||||
That structure includes `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` reference CRs for single-node, three-node, and standard clusters.
|
||||
That structure includes `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` reference CRs for single-node, three-node, and standard clusters.
|
||||
Remove references to cluster types that you are not using.
|
||||
|
||||
For all cluster types, you must:
|
||||
|
||||
* Add the `source-crs` subdirectory to the `acmpolicygenerator` or `policygentemplates` directory.
|
||||
* Add the `extra-manifests` directory to the `siteconfig` directory.
|
||||
* Add the `extra-manifests` directory to the `clusterinstance` directory.
|
||||
|
||||
The following example describes a set of CRs for a network of single-node clusters:
|
||||
|
||||
@@ -98,7 +98,7 @@ example/
|
||||
│ ├── kustomization.yaml
|
||||
│ ├── source-crs/
|
||||
│ └── ns.yaml
|
||||
└── siteconfig
|
||||
└── clusterinstance
|
||||
├── example-sno.yaml
|
||||
├── extra-manifests/ <1>
|
||||
├── custom-manifests/ <2>
|
||||
|
||||
@@ -62,5 +62,5 @@ a|`extra-manifest/enable-cgroups-v1.yaml`
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
In {product-title} 4.14 and later, you configure workload partitioning with the `cpuPartitioningMode` field in the `SiteConfig` CR.
|
||||
In {product-title} 4.14 and later, you configure workload partitioning with the `cpuPartitioningMode` field in the `ClusterInstance` CR.
|
||||
====
|
||||
|
||||
@@ -19,7 +19,7 @@ All `PolicyGenerator` files must be created in a `Namespace` prefixed with `ztp`
|
||||
|
||||
* Add the `kustomization.yaml` file to the repository:
|
||||
+
|
||||
All `SiteConfig` and `PolicyGenerator` CRs must be included in a `kustomization.yaml` file under their respective directory trees. For example:
|
||||
All `ClusterInstance` and `PolicyGenerator` CRs must be included in a `kustomization.yaml` file under their respective directory trees. For example:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
@@ -33,7 +33,7 @@ All `SiteConfig` and `PolicyGenerator` CRs must be included in a `kustomization.
|
||||
│ ├── group-du-sno-ranGen-ns.yaml
|
||||
│ ├── group-du-sno-ranGen.yaml
|
||||
│ └── kustomization.yaml
|
||||
└── siteconfig
|
||||
└── clusterinstance
|
||||
├── site1.yaml
|
||||
├── site2.yaml
|
||||
└── kustomization.yaml
|
||||
@@ -41,7 +41,7 @@ All `SiteConfig` and `PolicyGenerator` CRs must be included in a `kustomization.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
The files listed in the `generator` sections must contain either `SiteConfig` or `{policy-gen-cr}` CRs only. If your existing YAML files contain other CRs, for example, `Namespace`, these other CRs must be pulled out into separate files and listed in the `resources` section.
|
||||
The files listed in the `generator` sections must contain either `ClusterInstance` or `{policy-gen-cr}` CRs only. If your existing YAML files contain other CRs, for example, `Namespace`, these other CRs must be pulled out into separate files and listed in the `resources` section.
|
||||
====
|
||||
+
|
||||
The `PolicyGenerator` kustomization file must contain all `PolicyGenerator` YAML files in the `generator` section and `Namespace` CRs in the `resources` section. For example:
|
||||
@@ -64,7 +64,7 @@ resources:
|
||||
- site2-ns.yaml
|
||||
----
|
||||
+
|
||||
The `SiteConfig` kustomization file must contain all `SiteConfig` YAML files in the `generator` section and any other CRs in the resources:
|
||||
The `ClusterInstance` kustomization file must contain all `ClusterInstance` YAML files in the `generator` section and any other CRs in the resources:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
@@ -82,11 +82,11 @@ In {product-title} 4.10 and later, the `pre-sync.yaml` and `post-sync.yaml` file
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
There is a set of `pre-sync.yaml` and `post-sync.yaml` files under both the `SiteConfig` and `{policy-gen-cr}` trees.
|
||||
There is a set of `pre-sync.yaml` and `post-sync.yaml` files under both the `ClusterInstance` and `{policy-gen-cr}` trees.
|
||||
====
|
||||
|
||||
* Review and incorporate recommended changes
|
||||
+
|
||||
Each release may include additional recommended changes to the configuration applied to deployed clusters. Typically these changes result in lower CPU use by the OpenShift platform, additional features, or improved tuning of the platform.
|
||||
+
|
||||
Review the reference `SiteConfig` and `PolicyGenerator` CRs applicable to the types of cluster in your network. These examples can be found in the `argocd/example` directory extracted from the {ztp} container.
|
||||
Review the reference `ClusterInstance` and `PolicyGenerator` CRs applicable to the types of cluster in your network. These examples can be found in the `argocd/example` directory extracted from the {ztp} container.
|
||||
|
||||
@@ -16,9 +16,9 @@ You can remove a managed site and the associated installation and configuration
|
||||
|
||||
.Procedure
|
||||
|
||||
. Remove a site and the associated CRs by removing the associated `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` files from the `kustomization.yaml` file.
|
||||
. Remove a site and the associated CRs by removing the associated `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` files from the `kustomization.yaml` file.
|
||||
|
||||
. Add the following `syncOptions` field to your `SiteConfig` application.
|
||||
. Add the following `syncOptions` field to the ArgoCD application that manages the target site.
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
@@ -31,6 +31,6 @@ spec:
|
||||
+
|
||||
When you run the {ztp} pipeline again, the generated CRs are removed.
|
||||
|
||||
. Optional: If you want to permanently remove a site, you should also remove the `SiteConfig` and site-specific `PolicyGenerator` or `PolicyGentemplate` files from the Git repository.
|
||||
. Optional: If you want to permanently remove a site, you should also remove the `ClusterInstance` and site-specific `PolicyGenerator` or `PolicyGentemplate` files from the Git repository.
|
||||
|
||||
. Optional: If you want to remove a site temporarily, for example when redeploying a site, you can leave the `SiteConfig` and site-specific `PolicyGenerator` or `PolicyGentemplate` CRs in the Git repository.
|
||||
. Optional: If you want to remove a site temporarily, for example when redeploying a site, you can leave the `ClusterInstance` and site-specific `PolicyGenerator` or `PolicyGentemplate` CRs in the Git repository.
|
||||
@@ -22,11 +22,11 @@ Accelerated provisioning of {ztp} is supported only when installing {sno} with A
|
||||
|
||||
You can activate accelerated ZTP using the `spec.clusters.clusterLabels.accelerated-ztp` label, as in the following example:
|
||||
|
||||
.Example Accelerated ZTP `SiteConfig` CR.
|
||||
.Example Accelerated ZTP `ClusterInstance` CR.
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v2
|
||||
kind: SiteConfig
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-sno"
|
||||
namespace: "example-sno"
|
||||
@@ -36,14 +36,13 @@ spec:
|
||||
name: "assisted-deployment-pull-secret"
|
||||
clusterImageSetNameRef: "openshift-4.21"
|
||||
sshPublicKey: "ssh-rsa AAAA..."
|
||||
clusters:
|
||||
# ...
|
||||
clusterLabels:
|
||||
common: true
|
||||
group-du-sno: ""
|
||||
sites : "example-sno"
|
||||
accelerated-ztp: full
|
||||
|
||||
extraLabels:
|
||||
ManagedCluster: # <-- Resource type as outer key
|
||||
common: "true"
|
||||
group-du-sno: ""
|
||||
sites: "example-sno"
|
||||
accelerated-ztp: full # <-- Accelerated ZTP label
|
||||
# ...
|
||||
----
|
||||
|
||||
You can use `accelerated-ztp: full` to fully automate the accelerated process.
|
||||
@@ -74,7 +73,7 @@ Accelerated ZTP uses an additional `ConfigMap` to create the resources derived f
|
||||
The standard `ConfigMap` includes manifests that the {ztp} workflow uses to customize cluster installs.
|
||||
|
||||
{cgu-operator} detects that the `accelerated-ztp` label is set and then creates a second `ConfigMap`.
|
||||
As part of accelerated ZTP, the `SiteConfig` generator adds a reference to that second `ConfigMap` using the naming convention `<spoke-cluster-name>-aztp`.
|
||||
As part of accelerated ZTP, the SiteConfig Operator adds a reference to that second `ConfigMap` using the naming convention `<spoke-cluster-name>-aztp`.
|
||||
|
||||
After {cgu-operator} creates that second `ConfigMap`, it finds all policies bound to the managed cluster and extracts the {ztp} profile information.
|
||||
{cgu-operator} adds the {ztp} profile information to the `<spoke-cluster-name>-aztp` `ConfigMap` custom resource (CR) and applies the CR to the hub cluster API.
|
||||
|
||||
@@ -6,9 +6,9 @@
|
||||
[id="ztp-sno-du-configuring-disk-encryption-with-pcr-protection_{context}"]
|
||||
= Enabling disk encryption with TPM and PCR protection
|
||||
|
||||
You can use the `diskEncryption` field in the `SiteConfig` custom resource (CR) to configure disk encryption with Trusted Platform Module (TPM) and Platform Configuration Registers (PCRs) protection.
|
||||
You can use the `diskEncryption` field in the `ClusterInstance` custom resource (CR) to configure disk encryption with Trusted Platform Module (TPM) and Platform Configuration Registers (PCRs) protection.
|
||||
|
||||
Configuring the `SiteConfig` CR enables disk encryption at the time of cluster installation.
|
||||
Configuring the `ClusterInstance` CR enables disk encryption at the time of cluster installation.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
@@ -18,20 +18,20 @@ Configuring the `SiteConfig` CR enables disk encryption at the time of cluster i
|
||||
|
||||
.Procedure
|
||||
|
||||
* Configure the `spec.clusters.diskEncryption` field in the `SiteConfig` CR:
|
||||
* Configure the `spec.clusters.diskEncryption` field in the `ClusterInstance` CR:
|
||||
+
|
||||
.Recommended `SiteConfig` CR configuration to enable disk encryption with PCR protection
|
||||
.Recommended `ClusterInstance` CR configuration to enable disk encryption with PCR protection
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "encryption-tpm2"
|
||||
namespace: "encryption-tpm2"
|
||||
spec:
|
||||
clusters:
|
||||
- clusterName: "encryption-tpm2"
|
||||
clusterImageSetNameRef: "openshift-v4.13.0"
|
||||
clusterImageSetNameRef: "openshift-v4.21.0"
|
||||
diskEncryption:
|
||||
type: "tpm2" <1>
|
||||
tpm2:
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
[id="ztp-sno-du-disk-encryption_{context}"]
|
||||
= About disk encryption with TPM and PCR protection
|
||||
|
||||
You can use the `diskEncryption` field in the `SiteConfig` custom resource (CR) to configure disk encryption with Trusted Platform Module (TPM) and Platform Configuration Registers (PCRs) protection.
|
||||
You can use the `diskEncryption` field in the `ClusterInstance` custom resource (CR) to configure disk encryption with Trusted Platform Module (TPM) and Platform Configuration Registers (PCRs) protection.
|
||||
|
||||
TPM is a hardware component that stores cryptographic keys and evaluates the security state of your system. PCRs within the TPM store hash values that represent the current hardware and software configuration of your system. You can use the following PCR registers to protect the encryption keys for disk encryption:
|
||||
|
||||
@@ -17,6 +17,6 @@ The TPM safeguards encryption keys by linking them to the system's current state
|
||||
|
||||
During the system boot process, the `dmcrypt` utility uses the TPM PCR values to unlock the disk. If the current PCR values match with the previously linked values, the unlock succeeds. If the PCR values do not match, the encryption keys cannot be released, and the disk remains encrypted and inaccessible.
|
||||
|
||||
:FeatureName: Configuring disk encryption by using the `diskEncryption` field in the `SiteConfig` CR
|
||||
:FeatureName: Configuring disk encryption by using the `diskEncryption` field in the `ClusterInstance` CR
|
||||
include::snippets/technology-preview.adoc[]
|
||||
:!FeatureName:
|
||||
@@ -22,11 +22,11 @@ Changes to CPU settings cause the node to reboot.
|
||||
When transitioning to using `cpuPartitioningMode` for enabling workload partitioning, remove the workload partitioning `MachineConfig` CRs from the `/extra-manifest` folder that you use to provision the cluster.
|
||||
====
|
||||
|
||||
.Recommended `SiteConfig` CR configuration for workload partitioning
|
||||
.Recommended `ClusterInstance` CR configuration for workload partitioning
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "<site_name>"
|
||||
namespace: "<site_name>"
|
||||
|
||||
@@ -11,7 +11,7 @@ When the node is centrally managed it is not needed.
|
||||
Removing the Operator provides additional space and capacity for application workloads.
|
||||
|
||||
|
||||
To disable the Console Operator during the installation of the managed cluster, set the following in the `spec.clusters.0.installConfigOverrides` field of the `SiteConfig` custom resource (CR):
|
||||
To disable the Console Operator during the installation of the managed cluster, set the following in the `spec.installConfigOverrides` field of the `ClusterInstance` custom resource (CR):
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-sites.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="ztp-sno-siteconfig-config-reference_{context}"]
|
||||
= {sno-caps} SiteConfig CR installation reference
|
||||
|
||||
.SiteConfig CR installation options for {sno} clusters
|
||||
[cols="1,3", options="header"]
|
||||
|====
|
||||
|SiteConfig CR field
|
||||
|Description
|
||||
|
||||
|`spec.cpuPartitioningMode`
|
||||
a|Configure workload partitioning by setting the value for `cpuPartitioningMode` to `AllNodes`.
|
||||
To complete the configuration, specify the `isolated` and `reserved` CPUs in the `PerformanceProfile` CR.
|
||||
|
||||
|`metadata.name`
|
||||
|Set `name` to `assisted-deployment-pull-secret` and create the `assisted-deployment-pull-secret` CR in the same namespace as the `SiteConfig` CR.
|
||||
|
||||
|`spec.clusterImageSetNameRef`
|
||||
|Configure the image set available on the hub cluster for all the clusters in the site.
|
||||
To see the list of supported versions on your hub cluster, run `oc get clusterimagesets`.
|
||||
|
||||
|`installConfigOverrides`
|
||||
a|Set the `installConfigOverrides` field to enable or disable optional components prior to cluster installation.
|
||||
[IMPORTANT]
|
||||
====
|
||||
Use the reference configuration as specified in the example `SiteConfig` CR.
|
||||
Adding additional components back into the system might require additional reserved CPU capacity.
|
||||
====
|
||||
|
||||
|`spec.clusters.clusterImageSetNameRef`
|
||||
|Specifies the cluster image set used to deploy an individual cluster. If defined, it overrides the `spec.clusterImageSetNameRef` at site level.
|
||||
|
||||
|`spec.clusters.clusterLabels`
|
||||
|Configure cluster labels to correspond to the binding rules in the `PolicyGenerator` or `PolicyGentemplate` CRs that you define.
|
||||
`PolicyGenerator` CRs use the `policyDefaults.placement.labelSelector` field.
|
||||
`PolicyGentemplate` CRs use the `spec.bindingRules` field.
|
||||
|
||||
For example, `acmpolicygenerator/acm-common-ranGen.yaml` applies to all clusters with `common: true` set, `acmpolicygenerator/acm-group-du-sno-ranGen.yaml` applies to all clusters with `group-du-sno: ""` set.
|
||||
|
||||
|`spec.clusters.crTemplates.KlusterletAddonConfig`
|
||||
|Optional. Set `KlusterletAddonConfig` to `KlusterletAddonConfigOverride.yaml to override the default `KlusterletAddonConfig` that is created for the cluster.
|
||||
|
||||
|`spec.clusters.diskEncryption`
|
||||
a|Configure this field to enable disk encryption with Trusted Platform Module (TPM) and Platform Configuration Registers (PCRs) protection. For more information, see "About disk encryption with TPM and PCR protection".
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Configuring disk encryption by using the `diskEncryption` field in the `SiteConfig` CR is a Technology Preview feature in {product-title} 4.21.
|
||||
====
|
||||
|
||||
|`spec.clusters.diskEncryption.type`
|
||||
|Set the disk encryption type to `tpm2`.
|
||||
|
||||
|`spec.clusters.diskEncryption.tpm2`
|
||||
|Configure the Platform Configuration Registers (PCRs) protection for disk encryption.
|
||||
|
||||
|`spec.clusters.diskEncryption.tpm2.pcrList`
|
||||
|Configure the list of Platform Configuration Registers (PCRs) to be used for disk encryption. You must use PCR registers 1 and 7.
|
||||
|
||||
|`spec.clusters.nodes.hostName`
|
||||
|For single-node deployments, define a single host.
|
||||
For three-node deployments, define three hosts.
|
||||
For standard deployments, define three hosts with `role: master` and two or more hosts defined with `role: worker`.
|
||||
|
||||
|`spec.clusters.nodes.nodeLabels`
|
||||
|Specify custom roles for your nodes in your managed clusters. These are additional roles are not used by any {product-title} components, only by the user. When you add a custom role, it can be associated with a custom machine config pool that references a specific configuration for that role. Adding custom labels or roles during installation makes the deployment process more effective and prevents the need for additional reboots after the installation is complete.
|
||||
|
||||
|`spec.clusters.nodes.automatedCleaningMode`
|
||||
|Optional. Uncomment and set the value to `metadata` to enable the removal of the disk's partitioning table only, without fully wiping the disk. The default value is `disabled`.
|
||||
|
||||
|`spec.clusters.nodes.bmcAddress`
|
||||
|BMC address that you use to access the host. Applies to all cluster types. {ztp} supports iPXE and virtual media booting by using Redfish or IPMI protocols. To use iPXE booting, you must use {rh-rhacm} 2.8 or later. For more information about BMC addressing, see the "Additional resources" section.
|
||||
|
||||
|`spec.clusters.nodes.bmcAddress`
|
||||
a|BMC address that you use to access the host.
|
||||
Applies to all cluster types.
|
||||
{ztp} supports iPXE and virtual media booting by using Redfish or IPMI protocols.
|
||||
To use iPXE booting, you must use {rh-rhacm} 2.8 or later.
|
||||
For more information about BMC addressing, see the "Additional resources" section.
|
||||
[NOTE]
|
||||
====
|
||||
In far edge Telco use cases, only virtual media is supported for use with {ztp}.
|
||||
====
|
||||
|
||||
|`spec.clusters.nodes.bmcCredentialsName`
|
||||
|Configure the `bmh-secret` CR that you separately create with the host BMC credentials.
|
||||
When creating the `bmh-secret` CR, use the same namespace as the `SiteConfig` CR that provisions the host.
|
||||
|
||||
|`spec.clusters.nodes.bootMode`
|
||||
|Set the boot mode for the host to `UEFI`.
|
||||
The default value is `UEFI`. Use `UEFISecureBoot` to enable secure boot on the host.
|
||||
|
||||
|`spec.clusters.nodes.rootDeviceHints`
|
||||
|Specifies the device for deployment. Identifiers that are stable across reboots are recommended. For example, `wwn: <disk_wwn>` or `deviceName: /dev/disk/by-path/<device_path>`. `<by-path>` values are preferred. For a detailed list of stable identifiers, see the "About root device hints" section.
|
||||
|
||||
|`spec.clusters.nodes.ignitionConfigOverride`
|
||||
|Optional. Use this field to assign partitions for persistent storage.
|
||||
Adjust disk ID and size to the specific hardware.
|
||||
|
||||
|`spec.clusters.nodes.nodeNetwork`
|
||||
|Configure the network settings for the node.
|
||||
|
||||
|`spec.clusters.nodes.nodeNetwork.config.interfaces.ipv6`
|
||||
|Configure the IPv6 address for the host.
|
||||
For {sno} clusters with static IP addresses, the node-specific API and Ingress IPs should be the same.
|
||||
|====
|
||||
@@ -6,7 +6,7 @@
|
||||
[id="ztp-troubleshooting-ztp-gitops-installation-crs_{context}"]
|
||||
= Troubleshooting {ztp} by validating the installation CRs
|
||||
|
||||
The ArgoCD pipeline uses the `SiteConfig` and `PolicyGenerator` or `PolicyGentemplate` custom resources (CRs) to generate the cluster configuration CRs and {rh-rhacm-first} policies. Use the following steps to troubleshoot issues that might occur during this process.
|
||||
The ArgoCD pipeline uses the `ClusterInstance` and `PolicyGenerator` or `PolicyGentemplate` custom resources (CRs) to generate the cluster configuration CRs and {rh-rhacm-first} policies. Use the following steps to troubleshoot issues that might occur during this process.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
@@ -23,9 +23,9 @@ The ArgoCD pipeline uses the `SiteConfig` and `PolicyGenerator` or `PolicyGentem
|
||||
$ oc get AgentClusterInstall -n <cluster_name>
|
||||
----
|
||||
+
|
||||
If no object is returned, use the following steps to troubleshoot the ArgoCD pipeline flow from `SiteConfig` files to the installation CRs.
|
||||
If no object is returned, use the following steps to troubleshoot the ArgoCD pipeline flow from `ClusterInstance` files to the installation CRs.
|
||||
|
||||
. Verify that the `ManagedCluster` CR was generated using the `SiteConfig` CR on the hub cluster:
|
||||
. Verify that the `ManagedCluster` CR was generated using the `ClusterInstance` CR on the hub cluster:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
@@ -38,44 +38,3 @@ $ oc get managedcluster
|
||||
----
|
||||
$ oc get applications.argoproj.io -n openshift-gitops clusters -o yaml
|
||||
----
|
||||
|
||||
.. To identify error logs for the managed cluster, inspect the `status.operationState.syncResult.resources` field. For example, if an invalid value is assigned to the `extraManifestPath` in the `SiteConfig` CR, an error similar to the following is generated:
|
||||
+
|
||||
[source,text]
|
||||
----
|
||||
syncResult:
|
||||
resources:
|
||||
- group: ran.openshift.io
|
||||
kind: SiteConfig
|
||||
message: The Kubernetes API could not find ran.openshift.io/SiteConfig for
|
||||
requested resource spoke-sno/spoke-sno. Make sure the "SiteConfig" CRD is
|
||||
installed on the destination cluster
|
||||
----
|
||||
|
||||
.. To see a more detailed `SiteConfig` error, complete the following steps:
|
||||
|
||||
... In the Argo CD dashboard, click the *SiteConfig* resource that Argo CD is trying to sync.
|
||||
|
||||
... Check the *DESIRED MANIFEST* tab to find the `siteConfigError` field.
|
||||
+
|
||||
[source,text]
|
||||
----
|
||||
siteConfigError: >- Error: could not build the entire SiteConfig defined by /tmp/kust-plugin-config-1081291903: stat sno-extra-manifest: no such file or directory
|
||||
----
|
||||
|
||||
.. Check the `Status.Sync` field. If there are log errors, the `Status.Sync` field could indicate an `Unknown` error:
|
||||
+
|
||||
[source,text]
|
||||
----
|
||||
Status:
|
||||
Sync:
|
||||
Compared To:
|
||||
Destination:
|
||||
Namespace: clusters-sub
|
||||
Server: https://kubernetes.default.svc
|
||||
Source:
|
||||
Path: sites-config
|
||||
Repo URL: https://git.com/ran-sites/siteconfigs/.git
|
||||
Target Revision: master
|
||||
Status: Unknown
|
||||
----
|
||||
|
||||
@@ -41,8 +41,3 @@ include::snippets/ztp_PerformanceProfile.yaml[]
|
||||
----
|
||||
|
||||
include::snippets/performance-profile-workload-partitioning.adoc[]
|
||||
|
||||
[role="_additional-resources"]
|
||||
.Additional resources
|
||||
|
||||
* xref:../edge_computing/ztp-reference-cluster-configuration-for-vdu.adoc#ztp-sno-du-enabling-workload-partitioning_sno-configure-for-vdu[Recommended single-node OpenShift cluster configuration for vDU application workloads -> Workload partitioning]
|
||||
|
||||
@@ -20,7 +20,7 @@ spec:
|
||||
<1> The name of the `{policy-gen-crs}` object. This name is also used as part of the names
|
||||
for the `placementBinding`, `placementRule`, and `policy` that are created in the requested `namespace`.
|
||||
<2> This value should match the `namespace` used in the group `policy-gen-crs`.
|
||||
<3> The `group-du-*` label defined in `bindingRules` must exist in the `SiteConfig` files.
|
||||
<3> The `group-du-*` label defined in `bindingRules` must exist in the `ClusterInstance` files.
|
||||
<4> The label defined in `bindingExcludedRules` must be`ztp-done:`. The `ztp-done` label is used in coordination with the {cgu-operator-full}.
|
||||
<5> `mcp` defines the `MachineConfigPool` object that is used in the source file `validatorCRs/informDuValidator.yaml`. It should be `master` for single node and three-node cluster deployments and `worker` for standard cluster deployments.
|
||||
<6> Optional. The default value is `inform`.
|
||||
|
||||
@@ -1,154 +1,152 @@
|
||||
# example-node1-bmh-secret & assisted-deployment-pull-secret need to be created under same namespace example-sno
|
||||
# example-node1-bmh-secret & assisted-deployment-pull-secret need to be created under same namespace example-ai-sno
|
||||
---
|
||||
apiVersion: ran.openshift.io/v1
|
||||
kind: SiteConfig
|
||||
apiVersion: siteconfig.open-cluster-management.io/v1alpha1
|
||||
kind: ClusterInstance
|
||||
metadata:
|
||||
name: "example-sno"
|
||||
namespace: "example-sno"
|
||||
name: "example-ai-sno"
|
||||
namespace: "example-ai-sno"
|
||||
spec:
|
||||
baseDomain: "example.com"
|
||||
pullSecretRef:
|
||||
name: "assisted-deployment-pull-secret"
|
||||
clusterImageSetNameRef: "openshift-4.18"
|
||||
clusterImageSetNameRef: "openshift-4.21"
|
||||
sshPublicKey: "ssh-rsa AAAA..."
|
||||
clusters:
|
||||
- clusterName: "example-sno"
|
||||
networkType: "OVNKubernetes"
|
||||
# installConfigOverrides is a generic way of passing install-config
|
||||
# parameters through the siteConfig. The 'capabilities' field configures
|
||||
# the composable openshift feature. In this 'capabilities' setting, we
|
||||
# remove all the optional set of components.
|
||||
# Notes:
|
||||
# - OperatorLifecycleManager is needed for 4.15 and later
|
||||
# - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier
|
||||
# - Ingress is needed for 4.16 and later
|
||||
installConfigOverrides: |
|
||||
clusterName: "example-ai-sno"
|
||||
networkType: "OVNKubernetes"
|
||||
# installConfigOverrides is a generic way of passing install-config
|
||||
# parameters through the siteConfig. The 'capabilities' field configures
|
||||
# the composable openshift feature. In this 'capabilities' setting, we
|
||||
# remove all the optional set of components.
|
||||
# Notes:
|
||||
# - OperatorLifecycleManager is needed for 4.15 and later
|
||||
# - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier
|
||||
# - Ingress is needed for 4.16 and later
|
||||
installConfigOverrides: |
|
||||
{
|
||||
"capabilities": {
|
||||
"baselineCapabilitySet": "None",
|
||||
"additionalEnabledCapabilities": [
|
||||
"NodeTuning",
|
||||
"OperatorLifecycleManager",
|
||||
"Ingress"
|
||||
]
|
||||
}
|
||||
}
|
||||
# Include references to extraManifest ConfigMaps.
|
||||
extraManifestsRefs:
|
||||
- name: sno-extra-manifest-configmap
|
||||
extraLabels:
|
||||
ManagedCluster:
|
||||
# These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples
|
||||
du-profile: "latest"
|
||||
# These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates:
|
||||
# ../policygentemplates/common-ranGen.yaml will apply to all clusters with 'common: true'
|
||||
common: "true"
|
||||
# ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""'
|
||||
group-du-sno: ""
|
||||
# ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"'
|
||||
# Normally this should match or contain the cluster name so it only applies to a single cluster
|
||||
sites : "example-sno"
|
||||
clusterNetwork:
|
||||
- cidr: 1001:1::/48
|
||||
hostPrefix: 64
|
||||
machineNetwork:
|
||||
- cidr: 1111:2222:3333:4444::/64
|
||||
serviceNetwork:
|
||||
- cidr: 1001:2::/112
|
||||
additionalNTPSources:
|
||||
- 1111:2222:3333:4444::2
|
||||
# Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate
|
||||
# please see Workload Partitioning Feature for a complete guide.
|
||||
cpuPartitioningMode: AllNodes
|
||||
templateRefs:
|
||||
- name: ai-cluster-templates-v1
|
||||
namespace: open-cluster-management
|
||||
nodes:
|
||||
- hostName: "example-node1.example.com"
|
||||
role: "master"
|
||||
bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1"
|
||||
bmcCredentialsName:
|
||||
name: "example-node1-bmh-secret"
|
||||
bootMACAddress: "AA:BB:CC:DD:EE:11"
|
||||
# Use UEFISecureBoot to enable secure boot, UEFI to disable.
|
||||
bootMode: "UEFISecureBoot"
|
||||
rootDeviceHints:
|
||||
deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0"
|
||||
# disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md in argocd folder for more details
|
||||
ignitionConfigOverride: |
|
||||
{
|
||||
"capabilities": {
|
||||
"baselineCapabilitySet": "None",
|
||||
"additionalEnabledCapabilities": [
|
||||
"NodeTuning",
|
||||
"OperatorLifecycleManager",
|
||||
"Ingress"
|
||||
"ignition": {
|
||||
"version": "3.2.0"
|
||||
},
|
||||
"storage": {
|
||||
"disks": [
|
||||
{
|
||||
"device": "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0",
|
||||
"partitions": [
|
||||
{
|
||||
"label": "var-lib-containers",
|
||||
"sizeMiB": 0,
|
||||
"startMiB": 250000
|
||||
}
|
||||
],
|
||||
"wipeTable": false
|
||||
}
|
||||
],
|
||||
"filesystems": [
|
||||
{
|
||||
"device": "/dev/disk/by-partlabel/var-lib-containers",
|
||||
"format": "xfs",
|
||||
"mountOptions": [
|
||||
"defaults",
|
||||
"prjquota"
|
||||
],
|
||||
"path": "/var/lib/containers",
|
||||
"wipeFilesystem": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"systemd": {
|
||||
"units": [
|
||||
{
|
||||
"contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target",
|
||||
"enabled": true,
|
||||
"name": "var-lib-containers.mount"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
# It is strongly recommended to include crun manifests as part of the additional install-time manifests for 4.13+.
|
||||
# The crun manifests can be obtained from source-crs/optional-extra-manifest/ and added to the git repo ie.sno-extra-manifest.
|
||||
# extraManifestPath: sno-extra-manifest
|
||||
clusterLabels:
|
||||
# These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples
|
||||
du-profile: "latest"
|
||||
# These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates:
|
||||
# ../acmpolicygenerator/common-ranGen.yaml will apply to all clusters with 'common: true'
|
||||
common: true
|
||||
# ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""'
|
||||
group-du-sno: ""
|
||||
# ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"'
|
||||
# Normally this should match or contain the cluster name so it only applies to a single cluster
|
||||
sites: "example-sno"
|
||||
clusterNetwork:
|
||||
- cidr: 1001:1::/48
|
||||
hostPrefix: 64
|
||||
machineNetwork:
|
||||
- cidr: 1111:2222:3333:4444::/64
|
||||
serviceNetwork:
|
||||
- 1001:2::/112
|
||||
additionalNTPSources:
|
||||
- 1111:2222:3333:4444::2
|
||||
# Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate
|
||||
# please see Workload Partitioning Feature for a complete guide.
|
||||
cpuPartitioningMode: AllNodes
|
||||
# Optionally; This can be used to override the KlusterletAddonConfig that is created for this cluster:
|
||||
#crTemplates:
|
||||
# KlusterletAddonConfig: "KlusterletAddonConfigOverride.yaml"
|
||||
nodes:
|
||||
- hostName: "example-node1.example.com"
|
||||
role: "master"
|
||||
# Optionally; This can be used to configure desired BIOS setting on a host:
|
||||
#biosConfigRef:
|
||||
# filePath: "example-hw.profile"
|
||||
bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1"
|
||||
bmcCredentialsName:
|
||||
name: "example-node1-bmh-secret"
|
||||
bootMACAddress: "AA:BB:CC:DD:EE:11"
|
||||
# Use UEFISecureBoot to enable secure boot.
|
||||
bootMode: "UEFISecureBoot"
|
||||
rootDeviceHints:
|
||||
deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0"
|
||||
#crTemplates:
|
||||
# BareMetalHost: "bmhOverride.yaml"
|
||||
# disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md for more details
|
||||
ignitionConfigOverride: |
|
||||
{
|
||||
"ignition": {
|
||||
"version": "3.2.0"
|
||||
},
|
||||
"storage": {
|
||||
"disks": [
|
||||
{
|
||||
"device": "/dev/disk/by-id/wwn-0x6b07b250ebb9d0002a33509f24af1f62",
|
||||
"partitions": [
|
||||
{
|
||||
"label": "var-lib-containers",
|
||||
"sizeMiB": 0,
|
||||
"startMiB": 250000
|
||||
}
|
||||
],
|
||||
"wipeTable": false
|
||||
}
|
||||
],
|
||||
"filesystems": [
|
||||
{
|
||||
"device": "/dev/disk/by-partlabel/var-lib-containers",
|
||||
"format": "xfs",
|
||||
"mountOptions": [
|
||||
"defaults",
|
||||
"prjquota"
|
||||
],
|
||||
"path": "/var/lib/containers",
|
||||
"wipeFilesystem": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"systemd": {
|
||||
"units": [
|
||||
{
|
||||
"contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target",
|
||||
"enabled": true,
|
||||
"name": "var-lib-containers.mount"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
nodeNetwork:
|
||||
interfaces:
|
||||
- name: eno1
|
||||
macAddress: "AA:BB:CC:DD:EE:11"
|
||||
nodeNetwork:
|
||||
interfaces:
|
||||
- name: eno1
|
||||
macAddress: "AA:BB:CC:DD:EE:11"
|
||||
config:
|
||||
interfaces:
|
||||
- name: eno1
|
||||
type: ethernet
|
||||
state: up
|
||||
ipv4:
|
||||
enabled: false
|
||||
ipv6:
|
||||
enabled: true
|
||||
address:
|
||||
# For SNO sites with static IP addresses, the node-specific,
|
||||
# API and Ingress IPs should all be the same and configured on
|
||||
# the interface
|
||||
- ip: 1111:2222:3333:4444::aaaa:1
|
||||
prefix-length: 64
|
||||
dns-resolver:
|
||||
config:
|
||||
interfaces:
|
||||
- name: eno1
|
||||
type: ethernet
|
||||
state: up
|
||||
ipv4:
|
||||
enabled: false
|
||||
ipv6:
|
||||
enabled: true
|
||||
address:
|
||||
# For SNO sites with static IP addresses, the node-specific,
|
||||
# API and Ingress IPs should all be the same and configured on
|
||||
# the interface
|
||||
- ip: 1111:2222:3333:4444::aaaa:1
|
||||
prefix-length: 64
|
||||
dns-resolver:
|
||||
config:
|
||||
search:
|
||||
- example.com
|
||||
server:
|
||||
- 1111:2222:3333:4444::2
|
||||
routes:
|
||||
config:
|
||||
- destination: ::/0
|
||||
next-hop-interface: eno1
|
||||
next-hop-address: 1111:2222:3333:4444::1
|
||||
table-id: 254
|
||||
search:
|
||||
- example.com
|
||||
server:
|
||||
- 1111:2222:3333:4444::2
|
||||
routes:
|
||||
config:
|
||||
- destination: ::/0
|
||||
next-hop-interface: eno1
|
||||
next-hop-address: 1111:2222:3333:4444::1
|
||||
table-id: 254
|
||||
templateRefs:
|
||||
- name: ai-node-templates-v1
|
||||
namespace: open-cluster-management
|
||||
|
||||
Reference in New Issue
Block a user