From 3e5160ff84be5f573dbda863509ee86e17e8eba3 Mon Sep 17 00:00:00 2001 From: dfitzmau Date: Fri, 7 Nov 2025 14:14:06 +0000 Subject: [PATCH] OSDOCS-17072-batch4 --- modules/installation-complete-user-infra.adoc | 36 ++++---- ...llation-gcp-user-infra-adding-ingress.adoc | 5 ++ ...orting-common-variables-arm-templates.adoc | 72 ++++++++++----- ...user-infra-exporting-common-variables.adoc | 81 ++++++++++++++--- ...anced-customizing-live-network-config.adoc | 19 ++-- ...ines-advanced-enabling-serial-console.adoc | 7 +- ...allation-user-infra-machines-advanced.adoc | 52 +++++++---- .../installation-user-infra-machines-iso.adoc | 18 ++-- ...acing-a-bare-metal-control-plane-node.adoc | 4 + modules/kmm-gathering-data-for-kmm-hub.adoc | 8 +- modules/kmm-gathering-data-for-kmm.adoc | 8 +- modules/kmm-running-depmod.adoc | 9 +- modules/log6x-oc-explain.adoc | 12 +++ modules/machineconfig-modify-journald.adoc | 19 ++++ ...anually-restoring-cluster-etcd-backup.adoc | 87 ++++++++++--------- ...icroshift-disconnected-host-procedure.adoc | 14 ++- 16 files changed, 312 insertions(+), 139 deletions(-) diff --git a/modules/installation-complete-user-infra.adoc b/modules/installation-complete-user-infra.adoc index 6658042621..6e164aefa7 100644 --- a/modules/installation-complete-user-infra.adoc +++ b/modules/installation-complete-user-infra.adoc @@ -49,6 +49,7 @@ ifeval::["{context}" == "installing-restricted-networks-ibm-power"] :ibm-power: :restricted: endif::[] + :_mod-docs-content-type: PROCEDURE [id="installation-complete-user-infra_{context}"] = Completing installation on user-provisioned infrastructure @@ -133,6 +134,7 @@ The command succeeds when the Cluster Version Operator finishes deploying the ==== . Confirm that the Kubernetes API server is communicating with the pods. ++ .. To view a list of all pods, use the following command: + [source,terminal] @@ -149,20 +151,17 @@ openshift-apiserver apiserver-67b9g openshift-apiserver apiserver-ljcmx 1/1 Running 0 1m openshift-apiserver apiserver-z25h4 1/1 Running 0 2m openshift-authentication-operator authentication-operator-69d5d8bf84-vh2n8 1/1 Running 0 5m -... ---- - ++ .. View the logs for a pod that is listed in the output of the previous command by using the following command: + [source,terminal] ---- -$ oc logs -n <1> +$ oc logs -n ---- -<1> Specify the pod name and namespace, as shown in the output of the previous -command. +* ``: Specify the pod name and namespace, as shown in the output of an earlier command. + -If the pod logs display, the Kubernetes API server can communicate with the -cluster machines. +If the pod logs display, the Kubernetes API server can communicate with the cluster machines. ifndef::ibm-power[] . For an installation with Fibre Channel Protocol (FCP), additional steps are required to enable multipathing. Do not enable multipathing during installation. @@ -187,23 +186,22 @@ If you have enabled secure boot during the {product-title} bootstrap process, th [source,terminal] ---- $ oc debug node/ -chroot /host ----- -+ -. Confirm that secure boot is enabled by running the following command: -+ -[source,terminal] ----- -$ cat /sys/firmware/ipl/secure ---- + .Example output [source,terminal] ---- -1 <1> +chroot /host +---- + +. Confirm that secure boot is enabled by running the following command. Example output states `1` if secure boot is enabled and `0` if secure boot is not enabled. ++ +[source,terminal] +---- +$ cat /sys/firmware/ipl/secure ---- -<1> The value is `1` if secure boot is enabled and `0` if secure boot is not enabled. endif::ibm-z,ibm-z-lpar[] + ifdef::ibm-z-lpar[] . List the re-IPL configuration by running the following command: + @@ -213,7 +211,7 @@ ifdef::ibm-z-lpar[] ---- + .Example output for an FCP disk -[source,terminal] +[source,terminal,subs="attributes+"] ---- Re-IPL type: fcp WWPN: 0x500507630400d1e3 @@ -227,7 +225,7 @@ clear: 0 ---- + .Example output for a DASD disk -[source,terminal] +[source,terminal,subs="attributes+"] ---- for DASD output: Re-IPL type: ccw diff --git a/modules/installation-gcp-user-infra-adding-ingress.adoc b/modules/installation-gcp-user-infra-adding-ingress.adoc index 5acba31df6..2cd991276e 100644 --- a/modules/installation-gcp-user-infra-adding-ingress.adoc +++ b/modules/installation-gcp-user-infra-adding-ingress.adoc @@ -53,13 +53,16 @@ router-default LoadBalancer 172.30.18.154 35.233.157.184 80:32288/TCP,44 ---- . Add the A record to your zones: ++ ** To use A records: ++ ... Export the variable for the router IP address: + [source,terminal] ---- $ export ROUTER_IP=`oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}'` ---- ++ ... Add the A record to the private zones: + ifndef::shared-vpc[] @@ -71,6 +74,7 @@ $ gcloud dns record-sets transaction add ${ROUTER_IP} --name \*.apps.${CLUSTER_N $ gcloud dns record-sets transaction execute --zone ${INFRA_ID}-private-zone ---- endif::shared-vpc[] ++ ifdef::shared-vpc[] [source,terminal] ---- @@ -91,6 +95,7 @@ $ gcloud dns record-sets transaction add ${ROUTER_IP} --name \*.apps.${CLUSTER_N $ gcloud dns record-sets transaction execute --zone ${BASE_DOMAIN_ZONE_NAME} ---- endif::shared-vpc[] ++ ifdef::shared-vpc[] [source,terminal] ---- diff --git a/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc b/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc index ab7868d935..cbf9971fdd 100644 --- a/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc +++ b/modules/installation-user-infra-exporting-common-variables-arm-templates.adoc @@ -19,14 +19,11 @@ endif::[] [id="installation-user-infra-exporting-common-variables-arm-templates_{context}"] = Exporting common variables for ARM templates -You must export a common set of variables that are used with the provided Azure -Resource Manager (ARM) templates used to assist in completing a user-provided -infrastructure install on Microsoft {cp}. +You must export a common set of variables that are used with the provided Azure Resource Manager (ARM) templates used to assist in completing a user-provided infrastructure install on Microsoft {cp}. [NOTE] ==== -Specific ARM templates can also require additional exported variables, which are -detailed in their related procedures. +Specific ARM templates can also require additional exported variables, which are detailed in their related procedures. ==== .Prerequisites @@ -35,36 +32,51 @@ detailed in their related procedures. .Procedure -. Export common variables found in the `install-config.yaml` to be used by the -provided ARM templates: +. Export common variables found in the `install-config.yaml` to be used by the provided ARM templates: + [source,terminal] ---- -$ export CLUSTER_NAME=<1> -$ export AZURE_REGION=<2> -$ export SSH_KEY=<3> -$ export BASE_DOMAIN=<4> -$ export BASE_DOMAIN_RESOURCE_GROUP=<5> +$ export CLUSTER_NAME= +---- +* ``: The value of the `.metadata.name` attribute from the `install-config.yaml` file. ++ +[source,terminal] +---- +$ export AZURE_REGION= ---- -<1> The value of the `.metadata.name` attribute from the `install-config.yaml` file. ifndef::ash[] -<2> The region to deploy the cluster into, for example `centralus`. This is the value of the `.platform.azure.region` attribute from the `install-config.yaml` file. +* ``: The region to deploy the cluster into, for example `centralus`. This is the value of the `.platform.azure.region` attribute from the `install-config.yaml` file. endif::ash[] ifdef::ash[] -<2> The region to deploy the cluster into. This is the value of the `.platform.azure.region` attribute from the `install-config.yaml` file. +* ``: The region to deploy the cluster into. This is the value of the `.platform.azure.region` attribute from the `install-config.yaml` file. endif::ash[] -<3> The SSH RSA public key file as a string. You must enclose the SSH key in quotes since it contains spaces. This is the value of the `.sshKey` attribute from the `install-config.yaml` file. ++ +[source,terminal] +---- +$ export SSH_KEY= +---- +* ``: The SSH RSA public key file as a string. You must enclose the SSH key in quotes since it contains spaces. This is the value of the `.sshKey` attribute from the `install-config.yaml` file. ++ +[source,terminal] +---- +$ export BASE_DOMAIN= +---- ifndef::ash[] -<4> The base domain to deploy the cluster to. The base domain corresponds to the public DNS zone that you created for your cluster. This is the value of the `.baseDomain` attribute from the `install-config.yaml` file. +* ``: The base domain to deploy the cluster to. The base domain corresponds to the public DNS zone that you created for your cluster. This is the value of the `.baseDomain` attribute from the `install-config.yaml` file. endif::ash[] ifdef::ash[] -<4> The base domain to deploy the cluster to. The base domain corresponds to the DNS zone that you created for your cluster. This is the value of the `.baseDomain` attribute from the `install-config.yaml` file. +* ``: The base domain to deploy the cluster to. The base domain corresponds to the DNS zone that you created for your cluster. This is the value of the `.baseDomain` attribute from the `install-config.yaml` file. endif::ash[] ++ +[source,terminal] +---- +$ export BASE_DOMAIN_RESOURCE_GROUP= +---- ifndef::ash[] -<5> The resource group where the public DNS zone exists. This is the value of the `.platform.azure.baseDomainResourceGroupName` attribute from the `install-config.yaml` file. +* ``: The resource group where the public DNS zone exists. This is the value of the `.platform.azure.baseDomainResourceGroupName` attribute from the `install-config.yaml` file. endif::ash[] ifdef::ash[] -<5> The resource group where the DNS zone exists. This is the value of the `.platform.azure.baseDomainResourceGroupName` attribute from the `install-config.yaml` file. +* ``: The resource group where the DNS zone exists. This is the value of the `.platform.azure.baseDomainResourceGroupName` attribute from the `install-config.yaml` file. endif::ash[] + For example: @@ -72,9 +84,25 @@ For example: [source,terminal] ---- $ export CLUSTER_NAME=test-cluster +---- ++ +[source,terminal] +---- $ export AZURE_REGION=centralus +---- ++ +[source,terminal] +---- $ export SSH_KEY="ssh-rsa xxx/xxx/xxx= user@email.com" +---- ++ +[source,terminal] +---- $ export BASE_DOMAIN=example.com +---- ++ +[source,terminal] +---- $ export BASE_DOMAIN_RESOURCE_GROUP=ocp-cluster ---- @@ -82,9 +110,9 @@ $ export BASE_DOMAIN_RESOURCE_GROUP=ocp-cluster + [source,terminal] ---- -$ export KUBECONFIG=/auth/kubeconfig <1> +$ export KUBECONFIG=/auth/kubeconfig ---- -<1> For ``, specify the path to the directory that you stored the installation files in. +* ``: Specify the path to the directory that you stored the installation files in. ifeval::["{context}" == "installing-azure-user-infra"] :!cp: diff --git a/modules/installation-user-infra-exporting-common-variables.adoc b/modules/installation-user-infra-exporting-common-variables.adoc index 4b64b29805..9663057cc9 100644 --- a/modules/installation-user-infra-exporting-common-variables.adoc +++ b/modules/installation-user-infra-exporting-common-variables.adoc @@ -44,8 +44,7 @@ infrastructure install on {cp-first}. [NOTE] ==== -Specific {cp-template} templates can also require additional exported -variables, which are detailed in their related procedures. +Specific {cp-template} templates can also require additional exported variables, which are detailed in their related procedures. ==== .Procedure @@ -57,35 +56,93 @@ ifndef::shared-vpc[] [source,terminal] ---- $ export BASE_DOMAIN='' +---- ++ +[source,terminal] +---- $ export BASE_DOMAIN_ZONE_NAME='' +---- ++ +[source,terminal] +---- $ export NETWORK_CIDR='10.0.0.0/16' +---- ++ +[source,terminal] +---- $ export MASTER_SUBNET_CIDR='10.0.0.0/17' +---- ++ +[source,terminal] +---- $ export WORKER_SUBNET_CIDR='10.0.128.0/17' - -$ export KUBECONFIG=/auth/kubeconfig <1> +---- ++ +[source,terminal] +---- +$ export KUBECONFIG=/auth/kubeconfig +---- +* ``: Specify the path to the directory that you stored the installation files in. ++ +[source,terminal] +---- $ export CLUSTER_NAME=`jq -r .clusterName /metadata.json` +---- ++ +[source,terminal] +---- $ export INFRA_ID=`jq -r .infraID /metadata.json` +---- ++ +[source,terminal] +---- $ export PROJECT_NAME=`jq -r .gcp.projectID /metadata.json` +---- ++ +[source,terminal] +---- $ export REGION=`jq -r .gcp.region /metadata.json` ---- -<1> For ``, specify the path to the directory that you stored the installation files in. endif::shared-vpc[] -//you need some of these variables for the VPC, and you do that ifdef::shared-vpc[] [source,terminal] ---- -$ export BASE_DOMAIN='' <1> -$ export BASE_DOMAIN_ZONE_NAME='' <1> +$ export BASE_DOMAIN='' +---- +* ``: Supply the values for the host project. ++ +[source,terminal] +---- +$ export BASE_DOMAIN_ZONE_NAME='' +---- +* ``: Supply the values for the host project. ++ +[source,terminal] +---- $ export NETWORK_CIDR='10.0.0.0/16' - -$ export KUBECONFIG=/auth/kubeconfig <2> +---- ++ +[source,terminal] +---- +$ export KUBECONFIG=/auth/kubeconfig +---- +* ``: Specify the path to the directory that you stored the installation files in. ++ +[source,terminal] +---- $ export CLUSTER_NAME=`jq -r .clusterName /metadata.json` +---- ++ +[source,terminal] +---- $ export INFRA_ID=`jq -r .infraID /metadata.json` +---- ++ +[source,terminal] +---- $ export PROJECT_NAME=`jq -r .gcp.projectID /metadata.json` ---- -<1> Supply the values for the host project. -<2> For ``, specify the path to the directory that you stored the installation files in. endif::shared-vpc[] ifeval::["{context}" == "installing-gcp-user-infra-vpc"] diff --git a/modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc b/modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc index e08f2cefce..e61c0c983d 100644 --- a/modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc +++ b/modules/installation-user-infra-machines-advanced-customizing-live-network-config.adoc @@ -20,30 +20,30 @@ When creating a connection profile, you must use a `.nmconnection` filename exte . Create a connection profile for a bonded interface. For example, create the `bond0.nmconnection` file in your local directory with the following content: + -[source,ini] +[source,terminal,subs="quotes,verbatim"] ---- -[connection] +/[connection] id=bond0 type=bond interface-name=bond0 multi-connect=1 -[bond] +/[bond] miimon=100 mode=active-backup -[ipv4] +/[ipv4] method=auto -[ipv6] +/[ipv6] method=auto ---- . Create a connection profile for a secondary interface to add to the bond. For example, create the `bond0-proxy-em1.nmconnection` file in your local directory with the following content: + -[source,ini] +[source,terminal,subs="quotes,verbatim"] ---- -[connection] +/[connection] id=em1 type=ethernet interface-name=em1 @@ -54,9 +54,9 @@ slave-type=bond . Create a connection profile for a secondary interface to add to the bond. For example, create the `bond0-proxy-em2.nmconnection` file in your local directory with the following content: + -[source,ini] +[source,terminal,subs="quotes,verbatim"] ---- -[connection] +/[connection] id=em2 type=ethernet interface-name=em2 @@ -93,3 +93,4 @@ $ coreos-installer pxe customize rhcos--live-initramfs.x86_64.img \ endif::[] + Network settings are applied to the live system and are carried over to the destination system. + diff --git a/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc b/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc index 2655fb7084..92ef9bbb9b 100644 --- a/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc +++ b/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc @@ -20,23 +20,26 @@ By default, the {op-system-first} serial console is disabled and all output is w . Run the `coreos-installer` command to install the system, adding the `--console` option once to specify the graphical console, and a second time to specify the serial console: + +ifndef::restricted[] [source,terminal] ---- -ifndef::restricted[] $ coreos-installer install \ --console=tty0 \//<1> --console=ttyS0, \//<2> --ignition-url=http://host/worker.ign /dev/disk/by-id/scsi- +---- endif::[] ifdef::restricted[] +[source,terminal] +---- $ coreos-installer install \ --console=tty0 \//<1> --console=ttyS0, \//<2> --ignition-url=http://host/worker.ign \ --offline \ /dev/disk/by-id/scsi- -endif::[] ---- +endif::[] + <1> The desired secondary console. In this case, the graphical console. Omitting this option will disable the graphical console. <2> The desired primary console. In this case the serial console. The `options` field defines the baud rate and other settings. A common value for this field is `115200n8`. If no options are provided, the default kernel value of `9600n8` is used. For more information on the format of this option, see link:https://www.kernel.org/doc/html/latest/admin-guide/serial-console.html[Linux kernel serial console] documentation. diff --git a/modules/installation-user-infra-machines-advanced.adoc b/modules/installation-user-infra-machines-advanced.adoc index fe011a4e7a..7cd4c884e0 100644 --- a/modules/installation-user-infra-machines-advanced.adoc +++ b/modules/installation-user-infra-machines-advanced.adoc @@ -51,23 +51,28 @@ To configure an ISO installation, use the following procedure. .Procedure . Boot the ISO installer. -. From the live system shell prompt, configure networking for the live -system using available RHEL tools, such as `nmcli` or `nmtui`. + +. From the live system shell prompt, configure networking for the live system using available RHEL tools, such as `nmcli` or `nmtui`. + . Run the `coreos-installer` command to install the system, adding the `--copy-network` option to copy networking configuration. For example: + +ifndef::restricted[] +[source,terminal] +---- +$ sudo coreos-installer install --copy-network \ + --ignition-url=http://host/worker.ign /dev/disk/by-id/scsi- +---- +endif::[] +ifdef::restricted[] ++ [source,terminal] ---- -ifndef::restricted[] -$ sudo coreos-installer install --copy-network \ - --ignition-url=http://host/worker.ign /dev/disk/by-id/scsi- -endif::[] -ifdef::restricted[] $ sudo coreos-installer install --copy-network \ --ignition-url=http://host/worker.ign \ --offline \ /dev/disk/by-id/scsi- -endif::[] ---- +endif::[] + [IMPORTANT] ==== @@ -241,53 +246,65 @@ If you save existing partitions, and those partitions do not leave enough space This example preserves any partition in which the partition label begins with `data` (`data*`): +ifndef::restricted[] [source,terminal] ---- -ifndef::restricted[] # coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign \ --save-partlabel 'data*' \ /dev/disk/by-id/scsi- +---- endif::[] + ifdef::restricted[] +[source,terminal] +---- # coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign \ --save-partlabel 'data*' \ --offline \ /dev/disk/by-id/scsi- -endif::[] ---- +endif::[] The following example illustrates running the `coreos-installer` in a way that preserves the sixth (6) partition on the disk: +ifndef::restricted[] [source,terminal] ---- -ifndef::restricted[] # coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign \ --save-partindex 6 /dev/disk/by-id/scsi- +---- endif::[] + ifdef::restricted[] +[source,terminal] +---- # coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign \ --save-partindex 6 \ --offline \ /dev/disk/by-id/scsi- -endif::[] ---- +endif::[] This example preserves partitions 5 and higher: +ifndef::restricted[] [source,terminal] ---- -ifndef::restricted[] # coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign \ --save-partindex 5- /dev/disk/by-id/scsi- +---- endif::[] + ifdef::restricted[] +[source,terminal] +---- # coreos-installer install --ignition-url http://10.0.2.2:8080/user.ign \ --save-partindex 5- \ --offline \ /dev/disk/by-id/scsi- -endif::[] ---- +endif::[] In the previous examples where partition saving is used, `coreos-installer` recreates the partition immediately. @@ -322,21 +339,20 @@ When doing an {op-system} manual installation, there are two types of Ignition c needs to pass one of the Ignition config files generated by `openshift-installer`, such as `bootstrap.ign`, `master.ign` and `worker.ign`, to carry out the installation. -+ + [IMPORTANT] ==== It is not recommended to modify these Ignition config files directly. You can update the manifest files that are wrapped into the Ignition config files, as outlined in examples in the preceding sections. ==== -+ + For PXE installations, you pass the Ignition configs on the `APPEND` line using the `coreos.inst.ignition_url=` option. For ISO installations, after the ISO boots to the shell prompt, you identify the Ignition config on the `coreos-installer` command line with the `--ignition-url=` option. In both cases, only HTTP and HTTPS protocols are supported. -+ * **Live install Ignition config**: This type can be created by using the `coreos-installer` `customize` subcommand and its various options. With this method, the Ignition config passes to the live install medium, runs immediately upon booting, and performs setup tasks before or after the {op-system} system installs to disk. This method should only be used for performing tasks that must be done once and not applied again later, such as with advanced partitioning that cannot be done using a machine config. -+ + For PXE or ISO boots, you can create the Ignition config and `APPEND` the `ignition.config.url=` option to identify the location of the Ignition config. You also need to append `ignition.firstboot ignition.platform.id=metal` diff --git a/modules/installation-user-infra-machines-iso.adoc b/modules/installation-user-infra-machines-iso.adoc index 1503ae260c..7c96d47156 100644 --- a/modules/installation-user-infra-machines-iso.adoc +++ b/modules/installation-user-infra-machines-iso.adoc @@ -95,6 +95,8 @@ ifndef::openshift-origin[] ---- endif::openshift-origin[] ifdef::openshift-origin[] ++ +[source,terminal] ---- "location": "/prod/streams/stable/builds//x86_64/fedora-coreos--live.x86_64.iso", ---- @@ -127,17 +129,20 @@ It is possible to interrupt the {op-system} installation boot process to add ker . Run the `coreos-installer` command and specify the options that meet your installation requirements. At a minimum, you must specify the URL that points to the Ignition config file for the node type, and the device that you are installing to: + +ifdef::restricted[] [source,terminal] ---- -ifdef::restricted[] $ sudo coreos-installer install --ignition-url=http:///.ign \ <1> --ignition-hash=sha512- --offline <2> +---- endif::[] ifndef::restricted[] +[source,terminal] +---- $ sudo coreos-installer install --ignition-url=http:///.ign \ <1> --ignition-hash=sha512- <2> -endif::[] ---- +endif::[] <1> You must run the `coreos-installer` command by using `sudo`, because the `core` user does not have the required root privileges to perform the installation. <2> The `--ignition-hash` option is required when the Ignition config file is obtained through an HTTP URL to validate the authenticity of the Ignition config file on the cluster node. `` is the Ignition config file SHA512 digest obtained in a preceding step. + @@ -148,19 +153,22 @@ If you want to provide your Ignition config files through an HTTPS server that u + The following example initializes a bootstrap node installation to the `/dev/sda` device. The Ignition config file for the bootstrap node is obtained from an HTTP web server with the IP address 192.168.1.2: + +ifdef::restricted[] [source,terminal] ---- - -ifdef::restricted[] $ sudo coreos-installer install --ignition-url=http://192.168.1.2:80/installation_directory/bootstrap.ign /dev/sda \ --ignition-hash=sha512-a5a2d43879223273c9b60af66b44202a1d1248fc01cf156c46d4a79f552b6bad47bc8cc78ddf0116e80c59d2ea9e32ba53bc807afbca581aa059311def2c3e3b \ --offline +---- endif::[] ifndef::restricted[] ++ +[source,terminal] +---- $ sudo coreos-installer install --ignition-url=http://192.168.1.2:80/installation_directory/bootstrap.ign /dev/sda \ --ignition-hash=sha512-a5a2d43879223273c9b60af66b44202a1d1248fc01cf156c46d4a79f552b6bad47bc8cc78ddf0116e80c59d2ea9e32ba53bc807afbca581aa059311def2c3e3b -endif::[] ---- +endif::[] . Monitor the progress of the {op-system} installation on the console of the machine. + diff --git a/modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc b/modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc index b2b7c5a577..376fe8d5d4 100644 --- a/modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc +++ b/modules/ipi-install-replacing-a-bare-metal-control-plane-node.adoc @@ -47,6 +47,10 @@ baremetal {product-version} True False False 3d15h [source,terminal] ---- $ oc delete bmh -n openshift-machine-api +---- ++ +[source,terminal] +---- $ oc delete machine -n openshift-machine-api ---- + diff --git a/modules/kmm-gathering-data-for-kmm-hub.adoc b/modules/kmm-gathering-data-for-kmm-hub.adoc index b812de51b9..cd686ca8ba 100644 --- a/modules/kmm-gathering-data-for-kmm-hub.adoc +++ b/modules/kmm-gathering-data-for-kmm-hub.adoc @@ -9,12 +9,16 @@ .Procedure . Gather the data for the KMM Operator hub controller manager: - ++ .. Set the `MUST_GATHER_IMAGE` variable: + [source,terminal] ---- $ export MUST_GATHER_IMAGE=$(oc get deployment -n openshift-kmm-hub kmm-operator-hub-controller -ojsonpath='{.spec.template.spec.containers[?(@.name=="manager")].env[?(@.name=="RELATED_IMAGE_MUST_GATHER")].value}') +---- ++ +[source,terminal] +---- $ oc adm must-gather --image="${MUST_GATHER_IMAGE}" -- /usr/bin/gather -u ---- + @@ -22,7 +26,7 @@ $ oc adm must-gather --image="${MUST_GATHER_IMAGE}" -- /usr/bin/gather -u ==== Use the `-n ` switch to specify a namespace if you installed KMM in a custom namespace. ==== - ++ .. Run the `must-gather` tool: + [source,terminal] diff --git a/modules/kmm-gathering-data-for-kmm.adoc b/modules/kmm-gathering-data-for-kmm.adoc index c1c098ec72..ae17686e91 100644 --- a/modules/kmm-gathering-data-for-kmm.adoc +++ b/modules/kmm-gathering-data-for-kmm.adoc @@ -9,12 +9,16 @@ .Procedure . Gather the data for the KMM Operator controller manager: - ++ .. Set the `MUST_GATHER_IMAGE` variable: + [source,terminal] ---- $ export MUST_GATHER_IMAGE=$(oc get deployment -n openshift-kmm kmm-operator-controller -ojsonpath='{.spec.template.spec.containers[?(@.name=="manager")].env[?(@.name=="RELATED_IMAGE_MUST_GATHER")].value}') +---- ++ +[source,terminal] +---- $ oc adm must-gather --image="${MUST_GATHER_IMAGE}" -- /usr/bin/gather ---- + @@ -22,7 +26,7 @@ $ oc adm must-gather --image="${MUST_GATHER_IMAGE}" -- /usr/bin/gather ==== Use the `-n ` switch to specify a namespace if you installed KMM in a custom namespace. ==== - ++ .. Run the `must-gather` tool: + [source,terminal] diff --git a/modules/kmm-running-depmod.adoc b/modules/kmm-running-depmod.adoc index 115805b98f..9eac7b3b6c 100644 --- a/modules/kmm-running-depmod.adoc +++ b/modules/kmm-running-depmod.adoc @@ -22,14 +22,12 @@ You must have a Red Hat subscription to download the `kernel-devel` package. ---- $ depmod -b /opt ${KERNEL_FULL_VERSION}+`. ---- - -[id="example-dockerfile_{context}"] ++ .Example Dockerfile -==== If you are building your image on {product-title}, consider using the Driver Toolkit (DTK). - ++ For further information, see link:https://cloud.redhat.com/blog/how-to-use-entitled-image-builds-to-build-drivercontainers-with-ubi-on-openshift[using an entitled build]. - ++ [source,yaml] ---- apiVersion: v1 @@ -52,4 +50,3 @@ data: COPY --from=builder /usr/src/kernel-module-management/ci/kmm-kmod/kmm_ci_b.ko /opt/lib/modules/${KERNEL_FULL_VERSION}/ RUN depmod -b /opt ${KERNEL_FULL_VERSION} ---- -==== diff --git a/modules/log6x-oc-explain.adoc b/modules/log6x-oc-explain.adoc index 963ae57eb9..695605a620 100644 --- a/modules/log6x-oc-explain.adoc +++ b/modules/log6x-oc-explain.adoc @@ -32,8 +32,20 @@ For instance, here's how you can drill down into the `storage` configuration for [source,terminal] ---- $ oc explain lokistacks.loki.grafana.com +---- + +[source,terminal] +---- $ oc explain lokistacks.loki.grafana.com.spec +---- + +[source,terminal] +---- $ oc explain lokistacks.loki.grafana.com.spec.storage +---- + +[source,terminal] +---- $ oc explain lokistacks.loki.grafana.com.spec.storage.schemas ---- diff --git a/modules/machineconfig-modify-journald.adoc b/modules/machineconfig-modify-journald.adoc index e456dc3ef3..824d779264 100644 --- a/modules/machineconfig-modify-journald.adoc +++ b/modules/machineconfig-modify-journald.adoc @@ -65,6 +65,11 @@ $ oc apply -f 40-worker-custom-journald.yaml [source,terminal] ---- $ oc get machineconfigpool +---- ++ +.Example output +[source,terminal] +---- NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE master rendered-master-35 True False False 3 3 3 0 34m worker rendered-worker-d8 False True False 3 1 1 0 34m @@ -75,8 +80,22 @@ worker rendered-worker-d8 False True False 3 1 [source,terminal] ---- $ oc get node | grep worker +---- ++ +.Example output +[source,terminal] +---- ip-10-0-0-1.us-east-2.compute.internal Ready worker 39m v0.0.0-master+$Format:%h$ +---- ++ +[source,terminal] +---- $ oc debug node/ip-10-0-0-1.us-east-2.compute.internal +---- ++ +.Example output +[source,terminal] +---- Starting pod/ip-10-0-141-142us-east-2computeinternal-debug ... ... sh-4.2# chroot /host diff --git a/modules/manually-restoring-cluster-etcd-backup.adoc b/modules/manually-restoring-cluster-etcd-backup.adoc index 6644e865fc..4829b955d8 100644 --- a/modules/manually-restoring-cluster-etcd-backup.adoc +++ b/modules/manually-restoring-cluster-etcd-backup.adoc @@ -46,15 +46,19 @@ If you do not complete this step, you will not be able to access the control pla This procedure assumes that you copied the `backup` directory containing the etcd snapshot and the resources for the static pods to the `/home/core/assets` directory of each control plane host. You might need to create such `assets` folder if it does not exist yet. . Stop the static pods on all the control plane nodes; one host at a time. - ++ .. Move the existing Kubernetes API Server static pod manifest out of the kubelet manifest directory. + [source,terminal] ---- $ mkdir -p /root/manifests-backup +---- ++ +[source,terminal] +---- $ mv /etc/kubernetes/manifests/kube-apiserver-pod.yaml /root/manifests-backup/ ---- - ++ .. Verify that the Kubernetes API Server containers have stopped with the command: + [source,terminal] @@ -63,51 +67,51 @@ $ crictl ps | grep kube-apiserver | grep -E -v "operator|guard" ---- + The output of this command should be empty. If it is not empty, wait a few minutes and check again. - ++ .. If the Kubernetes API Server containers are still running, terminate them manually with the following command: + [source,terminal] ---- $ crictl stop ---- - ++ .. Repeat the same steps for `kube-controller-manager-pod.yaml`, `kube-scheduler-pod.yaml` and **finally** `etcd-pod.yaml`. - ++ ... Stop the `kube-controller-manager` pod with the following command: + [source,terminal] ---- $ mv /etc/kubernetes/manifests/kube-controller-manager-pod.yaml /root/manifests-backup/ ---- - ++ ... Check if the containers are stopped using the following command: + [source,terminal] ---- $ crictl ps | grep kube-controller-manager | grep -E -v "operator|guard" ---- - ++ ... Stop the `kube-scheduler` pod using the following command: + [source,terminal] ---- $ mv /etc/kubernetes/manifests/kube-scheduler-pod.yaml /root/manifests-backup/ ---- - ++ ... Check if the containers are stopped using the following command: + [source,terminal] ---- $ crictl ps | grep kube-scheduler | grep -E -v "operator|guard" ---- - ++ ... Stop the `etcd` pod using the following command: + [source,terminal] ---- $ mv /etc/kubernetes/manifests/etcd-pod.yaml /root/manifests-backup/ ---- - ++ ... Check if the containers are stopped using the following command: + [source,terminal] @@ -120,13 +124,17 @@ $ crictl ps | grep etcd | grep -E -v "operator|guard" [source,terminal] ---- $ mkdir /home/core/assets/old-member-data +---- ++ +[source,terminal] +---- $ mv /var/lib/etcd/member /home/core/assets/old-member-data ---- + This data will be useful in case the `etcd` backup restore does not work and the `etcd` cluster must be restored to the current state. . Find the correct etcd parameters for each control plane host. - ++ .. The value for `` is unique for the each control plane host, and it is equal to the value of the `ETCD_NAME` variable in the manifest `/etc/kubernetes/static-pod-resources/etcd-certs/configmaps/restore-etcd-pod/pod.yaml` file in the specific control plane host. It can be found with the command: + [source,terminal] @@ -136,7 +144,7 @@ cat $RESTORE_ETCD_POD_YAML | \ grep -A 1 $(cat $RESTORE_ETCD_POD_YAML | grep 'export ETCD_NAME' | grep -Eo 'NODE_.+_ETCD_NAME') | \ grep -Po '(?<=value: ").+(?=")' ---- - ++ .. The value for `` can be generated in a control plane host with the command: + [source,terminal] @@ -148,10 +156,10 @@ $ uuidgen ==== The value for `` must be generated only once. After generating `UUID` on one control plane host, do not generate it again on the others. The same `UUID` will be used in the next steps on all control plane hosts. ==== - ++ .. The value for `ETCD_NODE_PEER_URL` should be set like the following example: + -[source,yaml] +[source,terminal,subs="attributes+"] ---- https://:2380 ---- @@ -165,7 +173,7 @@ $ echo | \ xargs -I {} grep {} /etc/kubernetes/static-pod-resources/etcd-certs/configmaps/etcd-scripts/etcd.env | \ grep "IP" | grep -Po '(?<=").+(?=")' ---- - ++ .. The value for `` should be set like the following, where `` is the `` of each control plane host. + [NOTE] @@ -174,7 +182,7 @@ The port used must be 2380 and not 2379. The port 2379 is used for etcd database ==== + .Example output -[source,terminal] +[source,terminal,subs="attributes+"] ---- =,=,= <1> ---- @@ -185,17 +193,17 @@ The `` value remains same across all control plane hosts. . Regenerate the etcd database from the backup. + Such operation must be executed on each control plane host. - ++ .. Copy the `etcd` backup to `/var/lib/etcd` directory with the command: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- $ cp /home/core/assets/backup/.db /var/lib/etcd ---- - ++ .. Identify the correct `etcdctl` image before proceeding. Use the following command to retrieve the image from the backup of the pod manifest: + -[source,terminal] +[source,terminal,subs="attributes+"] ---- $ jq -r '.spec.containers[]|select(.name=="etcdctl")|.image' /root/manifests-backup/etcd-pod.yaml ---- @@ -204,14 +212,14 @@ $ jq -r '.spec.containers[]|select(.name=="etcdctl")|.image' /root/manifests-bac ---- $ podman run --rm -it --entrypoint="/bin/bash" -v /var/lib/etcd:/var/lib/etcd:z ---- - ++ .. Check that the version of the `etcdctl` tool is the version of the `etcd` server where the backup was created: + [source,terminal] ---- $ etcdctl version ---- - ++ .. Run the following command to regenerate the `etcd` database, using the correct values for the current host: + [source,terminal] @@ -238,29 +246,29 @@ The quotes are mandatory when regenerating the `etcd` database. 2022-06-28T19:52:43Z info membership/cluster.go:421 added member {"cluster-id": "c5996b7c11c30d6b", "local-member-id": "0", "added-peer-id": "1f63d01b31bb9a9e", "added-peer-peer-urls": ["https://10.0.90.221:2380"], "added-peer-is-learner": false} 2022-06-28T19:52:43Z info membership/cluster.go:421 added member {"cluster-id": "c5996b7c11c30d6b", "local-member-id": "0", "added-peer-id": "fdc2725b3b70127c", "added-peer-peer-urls": ["https://10.0.94.214:2380"], "added-peer-is-learner": false} ---- - ++ .. Exit from the container. - ++ .. Repeat these steps on the other control plane hosts, checking that the values printed in the `added member` logs are the same for all control plane hosts. . Move the regenerated `etcd` database to the default location. + Such operation must be executed on each control plane host. - ++ .. Move the regenerated database (the `member` folder created by the previous `etcdctl snapshot restore` command) to the default etcd location `/var/lib/etcd`: + [source,terminal] ---- $ mv /var/lib/etcd/restore-/member /var/lib/etcd ---- - ++ .. Restore the SELinux context for `/var/lib/etcd/member` folder on `/var/lib/etcd` directory: + [source,terminal] ---- $ restorecon -vR /var/lib/etcd/ ---- - ++ .. Remove the leftover files and directories: + [source,terminal] @@ -277,20 +285,20 @@ $ rm /var/lib/etcd/.db ==== When you are finished the `/var/lib/etcd` directory must contain only the folder `member`. ==== - ++ .. Repeat these steps on the other control plane hosts. . Restart the etcd cluster. - ++ .. The following steps must be executed on all control plane hosts, but **one host at a time**. - ++ .. Move the `etcd` static pod manifest back to the kubelet manifest directory, in order to make kubelet start the related containers : + [source,terminal] ---- $ mv /tmp/etcd-pod.yaml /etc/kubernetes/manifests ---- - ++ .. Verify that all the `etcd` containers have started: + [source,terminal] @@ -310,7 +318,7 @@ e1646b15207c6 9d28c15860870e85c91d0e36b45f7a6edd3da757b113ec4abb4507df88b1 If the output of this command is empty, wait a few minutes and check again. . Check the status of the `etcd` cluster. - ++ .. On any of the control plane hosts, check the status of the `etcd` cluster with the following command: + [source,terminal] @@ -333,14 +341,14 @@ $ crictl exec -it $(crictl ps | grep etcdctl | awk '{print $1}') etcdctl endpoin . Restart the other static pods. + The following steps must be executed on all control plane hosts, but one host at a time. - ++ .. Move the Kubernetes API Server static pod manifest back to the kubelet manifest directory to make kubelet start the related containers with the command: + [source,terminal] ---- $ mv /root/manifests-backup/kube-apiserver-pod.yaml /etc/kubernetes/manifests ---- - ++ .. Verify that all the Kubernetes API Server containers have started: + [source,terminal] @@ -352,30 +360,30 @@ $ crictl ps | grep kube-apiserver | grep -v operator ==== if the output of the following command is empty, wait a few minutes and check again. ==== - ++ .. Repeat the same steps for `kube-controller-manager-pod.yaml` and `kube-scheduler-pod.yaml` files. - ++ ... Restart the kubelets in all nodes using the following command: + [source,terminal] ---- $ systemctl restart kubelet ---- - ++ ... Start the remaining control plane pods using the following command: + [source,terminal] ---- $ mv /root/manifests-backup/kube-* /etc/kubernetes/manifests/ ---- - ++ ... Check if the `kube-apiserver`, `kube-scheduler` and `kube-controller-manager` pods start correctly: + [source,terminal] ---- $ crictl ps | grep -E 'kube-(apiserver|scheduler|controller-manager)' | grep -v -E 'operator|guard' ---- - ++ ... Wipe the OVN databases using the following commands: + [source,terminal] @@ -387,4 +395,3 @@ do oc -n openshift-ovn-kubernetes wait pod -l app=ovnkube-node --field-selector=spec.nodeName=${NODE} --for condition=ContainersReady --timeout=600s done ---- - diff --git a/modules/microshift-disconnected-host-procedure.adoc b/modules/microshift-disconnected-host-procedure.adoc index 77e7ddc597..3d68a75d2c 100644 --- a/modules/microshift-disconnected-host-procedure.adoc +++ b/modules/microshift-disconnected-host-procedure.adoc @@ -27,10 +27,14 @@ The following procedure is for use cases in which access to the {microshift-shor [source,terminal] ---- $ IP="10.44.0.1" <1> -$ sudo nmcli con add type loopback con-name stable-microshift ifname lo ip4 ${IP}/32 ---- <1> The fake IP address used in this example is “10.44.0.1”. + +[source,terminal] +---- +$ sudo nmcli con add type loopback con-name stable-microshift ifname lo ip4 ${IP}/32 +---- ++ [NOTE] ==== Any valid IP works if it avoids both internal {microshift-short} and potential future external IP conflicts. This can be any subnet that does not collide with the {microshift-short} node subnet or is be accessed by other services on the device. @@ -78,9 +82,11 @@ EOF ---- . {microshift-short} is now ready to use the loopback device for intra-node communications. Finish preparing the device for offline use. - ++ .. If the device currently has a NIC attached, disconnect the device from the network. ++ .. Shut down the device and disconnect the NIC. ++ .. Restart the device for the offline configuration to take effect. . Restart the {microshift-short} host to apply the configuration changes by running the following command: @@ -100,6 +106,10 @@ At this point, network access to the {microshift-short} host has been severed. I [source,terminal] ---- $ export KUBECONFIG=/var/lib/microshift/resources/kubeadmin/kubeconfig +---- ++ +[source,terminal] +---- $ sudo -E oc get pods -A ---- +