mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-05 12:46:18 +01:00
Merge pull request #102596 from openshift-cherrypick-robot/cherry-pick-102543-to-enterprise-4.21
[enterprise-4.21] OSDOCS-17072-batch6
This commit is contained in:
@@ -31,9 +31,12 @@ $ alias velero='oc -n openshift-adp exec deployment/velero -c velero -it -- ./ve
|
||||
. Check that the alias is working by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
.Example
|
||||
----
|
||||
$ velero version
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
Client:
|
||||
Version: v1.12.1-OADP
|
||||
Git commit: -
|
||||
|
||||
@@ -142,15 +142,18 @@ $ cosign verify-attestation --key cosign.pub $REGISTRY/kaniko-chains
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ rekor-cli search --sha <image_digest> <1>
|
||||
|
||||
$ rekor-cli search --sha <image_digest>
|
||||
----
|
||||
* `<image_digest>`: Substitute with the `sha256` digest of the image.
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
<uuid_1> <2>
|
||||
<uuid_2> <3>
|
||||
...
|
||||
----
|
||||
<1> Substitute with the `sha256` digest of the image.
|
||||
<2> The first matching universally unique identifier (UUID).
|
||||
<3> The second matching UUID.
|
||||
* `<uuid_1>`: The first matching universally unique identifier (UUID).
|
||||
* `<uuid_2>`: The second matching UUID.
|
||||
+
|
||||
The search result displays UUIDs of the matching entries. One of those UUIDs holds the attestation.
|
||||
+
|
||||
|
||||
@@ -80,12 +80,9 @@ $ diff -s /tmp/ca-cert.crt.txt /tmp/pod-cert-chain-ca.crt.txt
|
||||
You should see the following result:
|
||||
`Files /tmp/ca-cert.crt.txt and /tmp/pod-cert-chain-ca.crt.txt are identical.`
|
||||
|
||||
. Verify the certificate chain from the root certificate to the workload certificate. Replace `<path>` with the path to your certificates.
|
||||
. Verify the certificate chain from the root certificate to the workload certificate. Replace `<path>` with the path to your certificates. After you run the command, the expected output shows `./proxy-cert-1.pem: OK`.
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ openssl verify -CAfile <(cat <path>/ca-cert.pem <path>/root-cert.pem) ./proxy-cert-1.pem
|
||||
----
|
||||
+
|
||||
You should see the following result:
|
||||
`./proxy-cert-1.pem: OK`
|
||||
@@ -6,6 +6,8 @@
|
||||
[id="persistent-storage-csi-azure-file-cross-sub-dynamic-provisioning-procedure_{context}"]
|
||||
= Dynamic provisioning across subscriptions for Azure File
|
||||
|
||||
To use Azure File dynamic provisioning across subscriptions by completing this procedure.
|
||||
|
||||
.Prerequisites
|
||||
* Installed {product-title} cluster on Azure with the service principal or managed identity as an Azure identity in one subscription (call it Subscription A)
|
||||
|
||||
@@ -14,7 +16,6 @@
|
||||
* Logged in to the Azure CLI
|
||||
|
||||
.Procedure
|
||||
To use Azure File dynamic provisioning across subscriptions:
|
||||
|
||||
. Record the Azure identity (service principal or managed identity) by running the following applicable commands. The Azure identity is needed in a later step:
|
||||
+
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
An {aws-full} account must be prepared and configured to accept an {oadp-first} installation.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create the following environment variables by running the following commands:
|
||||
+
|
||||
[IMPORTANT]
|
||||
@@ -20,32 +21,72 @@ Change the cluster name to match your cluster, and ensure you are logged into th
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export CLUSTER_NAME=my-cluster <1>
|
||||
export ROSA_CLUSTER_ID=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .id)
|
||||
export REGION=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .region.id)
|
||||
export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')
|
||||
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
|
||||
export CLUSTER_VERSION=$(rosa describe cluster -c ${CLUSTER_NAME} -o json | jq -r .version.raw_id | cut -f -2 -d '.')
|
||||
export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials"
|
||||
export SCRATCH="/tmp/${CLUSTER_NAME}/oadp"
|
||||
mkdir -p ${SCRATCH}
|
||||
echo "Cluster ID: ${ROSA_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint:
|
||||
${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
|
||||
$ export CLUSTER_NAME=my-cluster
|
||||
----
|
||||
+
|
||||
<1> Replace `my-cluster` with your cluster name.
|
||||
--
|
||||
* `my-cluster`: Replace `my-cluster` with your cluster name.
|
||||
--
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export ROSA_CLUSTER_ID=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .id)
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export REGION=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .region.id)
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export CLUSTER_VERSION=$(rosa describe cluster -c ${CLUSTER_NAME} -o json | jq -r .version.raw_id | cut -f -2 -d '.')
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials"
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export SCRATCH="/tmp/${CLUSTER_NAME}/oadp"
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ mkdir -p ${SCRATCH}
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ echo "Cluster ID: ${ROSA_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint:
|
||||
${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
|
||||
----
|
||||
|
||||
. On the {aws-short} account, create an IAM policy to allow access to {aws-short} S3:
|
||||
|
||||
+
|
||||
.. Check to see if the policy exists by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='RosaOadpVer1'].{ARN:Arn}" --output text) <1>
|
||||
$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='RosaOadpVer1'].{ARN:Arn}" --output text)
|
||||
----
|
||||
+
|
||||
<1> Replace `RosaOadp` with your policy name.
|
||||
|
||||
--
|
||||
* `RosaOadp`: Replace `RosaOadp` with your policy name.
|
||||
--
|
||||
+
|
||||
.. Enter the following command to create the policy JSON file and then create the policy:
|
||||
+
|
||||
[NOTE]
|
||||
@@ -56,7 +97,7 @@ If the policy ARN is not found, the command creates the policy. If the policy AR
|
||||
[source,terminal]
|
||||
----
|
||||
$ if [[ -z "${POLICY_ARN}" ]]; then
|
||||
cat << EOF > ${SCRATCH}/policy.json <1>
|
||||
cat << EOF > ${SCRATCH}/policy.json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -101,8 +142,10 @@ EOF
|
||||
fi
|
||||
----
|
||||
+
|
||||
<1> `SCRATCH` is a name for a temporary directory created for the environment variables.
|
||||
|
||||
--
|
||||
* `SCRATCH`: `SCRATCH` is a name for a temporary directory created for the environment variables.
|
||||
--
|
||||
+
|
||||
.. View the policy ARN by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -110,9 +153,8 @@ EOF
|
||||
$ echo ${POLICY_ARN}
|
||||
----
|
||||
|
||||
|
||||
. Create an IAM role trust policy for the cluster:
|
||||
|
||||
+
|
||||
.. Create the trust policy file by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -137,7 +179,7 @@ $ cat <<EOF > ${SCRATCH}/trust-policy.json
|
||||
}
|
||||
EOF
|
||||
----
|
||||
|
||||
+
|
||||
.. Create the role by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -152,7 +194,7 @@ $ ROLE_ARN=$(aws iam create-role --role-name \
|
||||
Key=operator_name,Value=openshift-oadp \
|
||||
--query Role.Arn --output text)
|
||||
----
|
||||
|
||||
+
|
||||
.. View the role ARN by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
An {aws-full} account must be prepared and configured to accept an {oadp-first} installation. Prepare the {aws-short} credentials by using the following procedure.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Define the `cluster_name` environment variable by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -23,17 +24,33 @@ $ export CLUSTER_NAME= <AWS_cluster_name> <1>
|
||||
[source,terminal]
|
||||
----
|
||||
$ export CLUSTER_VERSION=$(oc get clusterversion version -o jsonpath='{.status.desired.version}{"\n"}')
|
||||
|
||||
export AWS_CLUSTER_ID=$(oc get clusterversion version -o jsonpath='{.spec.clusterID}{"\n"}')
|
||||
|
||||
export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')
|
||||
|
||||
export REGION=$(oc get infrastructures cluster -o jsonpath='{.status.platformStatus.aws.region}' --allow-missing-template-keys=false || echo us-east-2)
|
||||
|
||||
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
|
||||
|
||||
export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials"
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export AWS_CLUSTER_ID=$(oc get clusterversion version -o jsonpath='{.spec.clusterID}{"\n"}')
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export REGION=$(oc get infrastructures cluster -o jsonpath='{.status.platformStatus.aws.region}' --allow-missing-template-keys=false || echo us-east-2)
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials"
|
||||
----
|
||||
|
||||
. Create a temporary directory to store all of the files by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -41,6 +58,7 @@ export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials"
|
||||
$ export SCRATCH="/tmp/${CLUSTER_NAME}/oadp"
|
||||
mkdir -p ${SCRATCH}
|
||||
----
|
||||
|
||||
. Display all of the gathered details by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -48,20 +66,25 @@ mkdir -p ${SCRATCH}
|
||||
$ echo "Cluster ID: ${AWS_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint:
|
||||
${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
|
||||
----
|
||||
. On the {aws-short} account, create an IAM policy to allow access to {aws-short} S3:
|
||||
|
||||
. On the {aws-short} account, create an IAM policy to allow access to {aws-short} S3:
|
||||
+
|
||||
.. Check to see if the policy exists by running the following commands:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export POLICY_NAME="OadpVer1" <1>
|
||||
$ export POLICY_NAME="OadpVer1"
|
||||
----
|
||||
<1> The variable can be set to any value.
|
||||
+
|
||||
--
|
||||
* `POLICY_NAME`: The variable can be set to any value.
|
||||
--
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='$POLICY_NAME'].{ARN:Arn}" --output text)
|
||||
----
|
||||
+
|
||||
.. Enter the following command to create the policy JSON file and then create the policy:
|
||||
+
|
||||
[NOTE]
|
||||
@@ -113,12 +136,11 @@ EOF
|
||||
POLICY_ARN=$(aws iam create-policy --policy-name $POLICY_NAME \
|
||||
--policy-document file:///${SCRATCH}/policy.json --query Policy.Arn \
|
||||
--tags Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp \
|
||||
--output text) <1>
|
||||
--output text)
|
||||
fi
|
||||
----
|
||||
* `SCRATCH`: The name for a temporary directory created for storing the files.
|
||||
+
|
||||
<1> `SCRATCH` is a name for a temporary directory created for storing the files.
|
||||
|
||||
.. View the policy ARN by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -127,7 +149,7 @@ $ echo ${POLICY_ARN}
|
||||
----
|
||||
|
||||
. Create an IAM role trust policy for the cluster:
|
||||
|
||||
+
|
||||
.. Create the trust policy file by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -152,7 +174,7 @@ $ cat <<EOF > ${SCRATCH}/trust-policy.json
|
||||
}
|
||||
EOF
|
||||
----
|
||||
|
||||
+
|
||||
.. Create an IAM role trust policy for the cluster by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
@@ -162,7 +184,7 @@ $ ROLE_ARN=$(aws iam create-role --role-name \
|
||||
--assume-role-policy-document file://${SCRATCH}/trust-policy.json \
|
||||
--tags Key=cluster_id,Value=${AWS_CLUSTER_ID} Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp --query Role.Arn --output text)
|
||||
----
|
||||
|
||||
+
|
||||
.. View the role ARN by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
|
||||
@@ -35,19 +35,23 @@ ifdef::openshift-rosa-hcp[]
|
||||
* Query `kubelet` `journald` unit logs from {product-title} cluster nodes. The following example queries worker nodes only:
|
||||
endif::openshift-rosa-hcp[]
|
||||
+
|
||||
ifndef::openshift-rosa-hcp[]
|
||||
[source,terminal]
|
||||
----
|
||||
ifndef::openshift-rosa-hcp[]
|
||||
$ oc adm node-logs --role=master -u kubelet <1>
|
||||
----
|
||||
endif::openshift-rosa-hcp[]
|
||||
ifdef::openshift-rosa-hcp[]
|
||||
$ oc adm node-logs --role=worker -u kubelet <1>
|
||||
endif::openshift-rosa-hcp[]
|
||||
[source,terminal]
|
||||
----
|
||||
<1> Replace `kubelet` as appropriate to query other unit logs.
|
||||
$ oc adm node-logs --role=worker -u kubelet
|
||||
----
|
||||
endif::openshift-rosa-hcp[]
|
||||
* `kubelet`: Replace as appropriate to query other unit logs.
|
||||
|
||||
ifndef::openshift-rosa-hcp[]
|
||||
. Collect logs from specific subdirectories under `/var/log/` on cluster nodes.
|
||||
+
|
||||
.. Retrieve a list of logs contained within a `/var/log/` subdirectory. The following example lists files in `/var/log/openshift-apiserver/` on all control plane nodes:
|
||||
+
|
||||
[source,terminal]
|
||||
|
||||
@@ -28,8 +28,15 @@ EOF
|
||||
[source,terminal]
|
||||
----
|
||||
$ PERSIST=`cat 70-persistent-net.rules| base64 -w 0`
|
||||
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ echo $PERSIST
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
U1VCU1lTVEVNPT0ibmV0IixBQ1RJT049PSJhZGQiLEFUVFJ7YWRkcmVzc309PSJiODozZjpkMjozYjo1MToyOCIsQVRUUnt0eXBlfT09IjEiLE5BTUU9ImliczJmMCIKU1VCU1lTVEVNPT0ibmV0IixBQ1RJT049PSJhZGQiLEFUVFJ7YWRkcmVzc309PSJiODozZjpkMjozYjo1MToyOSIsQVRUUnt0eXBlfT09IjEiLE5BTUU9ImVuczhmMG5wMCIKU1VCU1lTVEVNPT0ibmV0IixBQ1RJT049PSJhZGQiLEFUVFJ7YWRkcmVzc309PSJiODozZjpkMjpmMDozNjpkMCIsQVRUUnt0eXBlfT09IjEiLE5BTUU9ImliczJmMCIKU1VCU1lTVEVNPT0ibmV0IixBQ1RJT049PSJhZGQiLEFUVFJ7YWRkcmVzc309PSJiODozZjpkMjpmMDozNjpkMSIsQVRUUnt0eXBlfT09IjEiLE5BTUU9ImVuczhmMG5wMCIK
|
||||
----
|
||||
|
||||
@@ -61,18 +68,12 @@ spec:
|
||||
path: /etc/udev/rules.d/70-persistent-net.rules
|
||||
----
|
||||
|
||||
. Create the machine configuration on the cluster by running the following command:
|
||||
. Create the machine configuration on the cluster by running the following command. After running the command, the expected output shows `machineconfig.machineconfiguration.openshift.io/99-machine-config-udev-network created`.
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc create -f 99-machine-config-udev-network.yaml
|
||||
----
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
machineconfig.machineconfiguration.openshift.io/99-machine-config-udev-network created
|
||||
----
|
||||
|
||||
. Use the `get mcp` command to view the machine configuration status:
|
||||
+
|
||||
|
||||
@@ -45,53 +45,66 @@ The following procedure enables multipath at installation time and appends kerne
|
||||
----
|
||||
$ mpathconf --enable && systemctl start multipathd.service
|
||||
----
|
||||
** Optional: If booting the PXE or ISO, you can instead enable multipath by adding `rd.multipath=default` from the kernel command line.
|
||||
+
|
||||
.. Optional: If booting the PXE or ISO, you can instead enable multipath by adding `rd.multipath=default` from the kernel command line.
|
||||
|
||||
. Append the kernel arguments by invoking the `coreos-installer` program:
|
||||
+
|
||||
* If there is only one multipath device connected to the machine, it should be available at path `/dev/mapper/mpatha`. For example:
|
||||
+
|
||||
ifndef::restricted[]
|
||||
[source,terminal]
|
||||
----
|
||||
ifndef::restricted[]
|
||||
$ coreos-installer install /dev/mapper/mpatha \// <1>
|
||||
$ coreos-installer install /dev/mapper/mpatha \//
|
||||
--ignition-url=http://host/worker.ign \
|
||||
--append-karg rd.multipath=default \
|
||||
--append-karg root=/dev/disk/by-label/dm-mpath-root \
|
||||
--append-karg rw
|
||||
----
|
||||
endif::[]
|
||||
ifdef::restricted[]
|
||||
$ coreos-installer install /dev/mapper/mpatha \// <1>
|
||||
[source,terminal]
|
||||
----
|
||||
$ coreos-installer install /dev/mapper/mpatha \//
|
||||
--ignition-url=http://host/worker.ign \
|
||||
--append-karg rd.multipath=default \
|
||||
--append-karg root=/dev/disk/by-label/dm-mpath-root \
|
||||
--append-karg rw \
|
||||
--offline
|
||||
endif::[]
|
||||
----
|
||||
<1> Indicates the path of the single multipathed device.
|
||||
endif::[]
|
||||
+
|
||||
--
|
||||
* `/dev/mapper/mpatha`: Indicates the path of the single multipathed device.
|
||||
--
|
||||
+
|
||||
* If there are multiple multipath devices connected to the machine, or to be more explicit, instead of using `/dev/mapper/mpatha`, it is recommended to use the World Wide Name (WWN) symlink available in `/dev/disk/by-id`. For example:
|
||||
+
|
||||
ifndef::restricted[]
|
||||
[source,terminal]
|
||||
----
|
||||
ifndef::restricted[]
|
||||
$ coreos-installer install /dev/disk/by-id/wwn-<wwn_ID> \// <1>
|
||||
--ignition-url=http://host/worker.ign \
|
||||
--append-karg rd.multipath=default \
|
||||
--append-karg root=/dev/disk/by-label/dm-mpath-root \
|
||||
--append-karg rw
|
||||
----
|
||||
endif::[]
|
||||
ifdef::restricted[]
|
||||
[source,terminal]
|
||||
----
|
||||
$ coreos-installer install /dev/disk/by-id/wwn-<wwn_ID> \// <1>
|
||||
--ignition-url=http://host/worker.ign \
|
||||
--append-karg rd.multipath=default \
|
||||
--append-karg root=/dev/disk/by-label/dm-mpath-root \
|
||||
--append-karg rw \
|
||||
--offline
|
||||
endif::[]
|
||||
----
|
||||
<1> Indicates the WWN ID of the target multipathed device. For example, `0xx194e957fcedb4841`.
|
||||
endif::[]
|
||||
+
|
||||
--
|
||||
* `<wwn_ID>`: Indicates the WWN ID of the target multipathed device. For example, `0xx194e957fcedb4841`.
|
||||
--
|
||||
+
|
||||
This symlink can also be used as the `coreos.inst.install_dev` kernel argument when using special `coreos.inst.*` arguments to direct the live installer. For more information, see "Installing {op-system} and starting the {product-title} bootstrap process".
|
||||
|
||||
|
||||
@@ -25,10 +25,13 @@ $ rosa create tuning-config -c <cluster_id> --name <name_of_tuning> --spec-path
|
||||
+
|
||||
You must supply the path to the `spec.json` file or the command returns an error.
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
$ I: Tuning config 'sample-tuning' has been created on cluster 'cluster-example'.
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ I: To view all tuning configs, run 'rosa list tuning-configs -c cluster-example'
|
||||
----
|
||||
|
||||
|
||||
@@ -12,16 +12,28 @@ If you have already created your first cluster and users, this list can serve as
|
||||
----
|
||||
## Configures your AWS account and ensures everything is setup correctly
|
||||
$ rosa init
|
||||
----
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
## Starts the cluster creation process (~30-40minutes)
|
||||
$ rosa create cluster --cluster-name=<cluster_name>
|
||||
----
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
## Connect your IDP to your cluster
|
||||
$ rosa create idp --cluster=<cluster_name> --interactive
|
||||
----
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
## Promotes a user from your IDP to dedicated-admin level
|
||||
$ rosa grant user dedicated-admin --user=<idp_user_name> --cluster=<cluster_name>
|
||||
----
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
## Checks if your install is ready (look for State: Ready),
|
||||
## and provides your Console URL to login to the web console.
|
||||
$ rosa describe cluster --cluster=<cluster_name>
|
||||
|
||||
@@ -23,14 +23,26 @@ Ensure that the prerequisites are met by reviewing _Detailed requirements for de
|
||||
include::snippets/rosa-sts.adoc[]
|
||||
. If needed, you can re-create the permissions and policies by using the `-f` flag:
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
$ rosa create ocm-role -f
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ rosa create user-role -f
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ rosa create account-roles -f
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ rosa create operator-roles -c ${CLUSTER} -f
|
||||
----
|
||||
|
||||
. Validate all the prerequisites and attempt cluster reinstallation.
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
//
|
||||
// * updating/understanding_updates/how-updates-work.adoc
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="update-release-images_{context}"]
|
||||
= Release images
|
||||
|
||||
@@ -16,13 +16,19 @@ You can inspect the content of a specific release image by running the following
|
||||
$ oc adm release extract <release image>
|
||||
----
|
||||
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc adm release extract quay.io/openshift-release-dev/ocp-release:4.12.6-x86_64
|
||||
Extracted release payload from digest sha256:800d1e39d145664975a3bb7cbc6e674fbf78e3c45b5dde9ff2c5a11a8690c87b created at 2023-03-01T12:46:29Z
|
||||
----
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ ls
|
||||
----
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml
|
||||
0000_03_config-operator_01_proxy.crd.yaml
|
||||
0000_03_marketplace-operator_01_operatorhub.crd.yaml
|
||||
|
||||
@@ -56,10 +56,15 @@ WantedBy=multi-user.target
|
||||
[source,terminal]
|
||||
----
|
||||
$ sudo systemctl enable node_exporter.service
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ sudo systemctl start node_exporter.service
|
||||
----
|
||||
|
||||
.Verification
|
||||
|
||||
* Verify that the node-exporter agent is reporting metrics from the virtual machine.
|
||||
+
|
||||
[source,terminal]
|
||||
|
||||
@@ -17,7 +17,10 @@ You need to Create a new User Managed Identity and then obtain the Client ID of
|
||||
$ az identity create \
|
||||
--name ${USER_ASSIGNED_IDENTITY_NAME} \
|
||||
--resource-group ${RESOURCE_GROUP}
|
||||
|
||||
----
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ export IDENTITY_CLIENT_ID=$(az identity show --resource-group "${RESOURCE_GROUP}" --name "${USER_ASSIGNED_IDENTITY_NAME}" --query 'clientId' -otsv)
|
||||
----
|
||||
|
||||
|
||||
Reference in New Issue
Block a user