From d759018481d570db89bc9366b39806d7271f5b96 Mon Sep 17 00:00:00 2001 From: Andrea Hoffer Date: Wed, 9 Apr 2025 09:02:15 -0400 Subject: [PATCH] Sweep to update 'command line tool/interface' to 'command-line tool/interface' --- applications/index.adoc | 2 +- .../aws-sts/oadp-aws-sts.adoc | 2 +- cicd/jenkins/images-other-jenkins-agent.adoc | 2 +- .../rosa_cli/rosa-cli-permission-examples.adoc | 2 +- .../cloud-experts-getting-started-accessing.adoc | 2 +- .../cloud-experts-getting-started-admin-rights.adoc | 2 +- .../cloud-experts-getting-started-deleting.adoc | 2 +- .../cloud-experts-getting-started-hcp.adoc | 12 ++++++------ ...oud-experts-getting-started-simple-cli-guide.adoc | 2 +- ...xperts-getting-started-managing-worker-nodes.adoc | 2 +- .../cloud-experts-getting-started-upgrading.adoc | 2 +- .../cloud-experts-getting-started-what-is-rosa.adoc | 2 +- .../cloud-experts-rosa-sts-explained.adoc | 2 +- ...-hcp-activation-and-account-linking-tutorial.adoc | 2 +- .../cloud-experts-update-component-routes.adoc | 2 +- .../rosa-mobb-cli-quickstart.adoc | 2 +- .../preparing-to-install-on-ibm-z.adoc | 2 +- .../advanced-migration-options-3-4.adoc | 2 +- .../installing-restricted-3-4.adoc | 2 +- .../advanced-migration-options-mtc.adoc | 2 +- .../installing-mtc-restricted.adoc | 2 +- modules/about-administrator-perspective.adoc | 2 +- modules/about-developer-perspective.adoc | 2 +- ...bout-manually-maintained-credentials-upgrade.adoc | 2 +- modules/applications-create-using-cli-modify.adoc | 2 +- modules/builds-understanding-openshift-pipeline.adoc | 2 +- ...cy-tuning-debugging-data-for-red-hat-support.adoc | 2 +- .../cnf-performance-profile-creator-arguments.adoc | 2 +- modules/compliance-evaluate-kubeletconfig-rules.adoc | 2 +- modules/developer-cli-odo-developer-setup.adoc | 2 +- modules/images-other-jenkins-agent-gradle.adoc | 4 ++-- .../installation-aws-user-infra-requirements.adoc | 2 +- modules/installation-gcp-shared-vpc-ingress.adoc | 2 +- ...ra-machines-advanced-enabling-serial-console.adoc | 2 +- modules/load-and-merge-rules.adoc | 4 ++-- modules/machine-health-checks-creating.adoc | 2 +- modules/manually-rotating-cloud-creds.adoc | 2 +- modules/microshift-gitops-debug.adoc | 2 +- modules/migration-rolling-back-migration-cli.adoc | 4 ++-- modules/migration-viewing-migration-plan-log.adoc | 2 +- .../migration-viewing-migration-plan-resources.adoc | 2 +- modules/mirror-registry-introduction.adoc | 2 +- ...etwork-observability-netobserv-cli-reference.adoc | 2 +- modules/nodes-containers-copying-files-rsync.adoc | 4 ++-- modules/nodes-nodes-kernel-arguments.adoc | 2 +- modules/nodes-pods-configmap-overview.adoc | 2 +- .../nw-ingress-configuring-application-domain.adoc | 2 +- ...-kubernetes-examine-nb-database-contents-ref.adoc | 6 +++--- ...-kubernetes-examine-sb-database-contents-ref.adoc | 6 +++--- modules/odc-access-web-terminal.adoc | 2 +- ...t-of-tekton-hub-in-the-developer-perspective.adoc | 2 +- modules/op-tkn-clustertask-management.adoc | 2 +- modules/openshift-architecture-common-terms.adoc | 2 +- modules/ossm-cr-example.adoc | 2 +- modules/ossm-cr-status.adoc | 2 +- modules/ossm-migrating-to-20.adoc | 2 +- modules/ossm-vs-istio-1x.adoc | 4 ++-- modules/ossm-vs-istio.adoc | 4 ++-- modules/pod-using-a-different-service-account.adoc | 2 +- modules/psap-configuring-node-feature-discovery.adoc | 10 +++++----- ...discovery-topology-updater-command-reference.adoc | 4 ++-- modules/rhcos-enabling-multipath-day-2.adoc | 2 +- modules/rhcos-enabling-multipath.adoc | 2 +- modules/rosa-adding-tags-cli.adoc | 2 +- modules/rosa-getting-started-environment-setup.adoc | 2 +- ...-getting-started-install-configure-cli-tools.adoc | 2 +- modules/rosa-hcp-aws-private-create-cluster.adoc | 2 +- modules/rosa-hcp-deleting-cluster.adoc | 2 +- modules/rosa-installing.adoc | 2 +- modules/rosa-sts-setting-up-environment.adoc | 2 +- modules/running-insights-operator-gather-cli.adoc | 3 +-- modules/sre-cluster-access.adoc | 2 +- modules/telco-ran-sr-iov-operator.adoc | 2 +- ...using-aws-cli-create-iam-role-alb-controller.adoc | 4 ++-- modules/virt-connecting-vm-virtctl.adoc | 2 +- modules/virt-creating-service-virtctl.adoc | 4 ++-- modules/virt-deleting-vms.adoc | 2 +- modules/virt-uploading-image-virtctl.adoc | 2 +- modules/virt-using-virtctl-ssh-command.adoc | 2 +- modules/ztp-checking-du-cluster-config.adoc | 2 +- nodes/containers/nodes-containers-using.adoc | 2 +- .../eco-about-remediation-fencing-maintenance.adoc | 2 +- .../accessing-third-party-monitoring-apis.adoc | 2 +- .../netobserv_cli/netobserv-cli-reference.adoc | 2 +- .../alertmanager-monitoring-coreos-com-v1.adoc | 2 +- rosa_architecture/rosa-understanding.adoc | 2 +- rosa_hcp/rosa-hcp-deleting-cluster.adoc | 2 +- .../cert-manager-operator-release-notes.adoc | 2 +- serverless/functions/serverless-functions-yaml.adoc | 2 +- service_mesh/v1x/preparing-ossm-installation.adoc | 2 +- service_mesh/v2x/preparing-ossm-installation.adoc | 2 +- virt/getting_started/virt-getting-started.adoc | 2 +- virt/getting_started/virt-using-the-cli-tools.adoc | 4 ++-- virt/install/uninstalling-virt.adoc | 2 +- .../virtual_machines/virt-accessing-vm-consoles.adoc | 4 ++-- virt/virtual_machines/virt-accessing-vm-ssh.adoc | 2 +- virt/virtual_machines/virt-delete-vms.adoc | 2 +- virt/virtual_machines/virt-exporting-vms.adoc | 2 +- web_console/customizing-the-web-console.adoc | 2 +- .../web_terminal/installing-web-terminal.adoc | 2 +- web_console/web_terminal/odc-using-web-terminal.adoc | 2 +- welcome/cloud-experts-rosa-hcp-sts-explained.adoc | 2 +- welcome/oke_about.adoc | 2 +- 103 files changed, 127 insertions(+), 128 deletions(-) diff --git a/applications/index.adoc b/applications/index.adoc index 258dd157f9..4dd3b414aa 100644 --- a/applications/index.adoc +++ b/applications/index.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -Using {product-title}, you can create, edit, delete, and manage applications using the web console or command line interface (CLI). +Using {product-title}, you can create, edit, delete, and manage applications using the web console or command-line interface (CLI). [id="working-on-a-project"] == Working on a project diff --git a/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc b/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc index b867312d98..458bc19a1e 100644 --- a/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc +++ b/backup_and_restore/application_backup_and_restore/aws-sts/oadp-aws-sts.adoc @@ -14,7 +14,7 @@ You configure {aws-short} for Velero, create a default `Secret`, and then instal To install the OADP Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. -You can install {oadp-short} on an AWS {sts-first} (AWS STS) cluster manually. Amazon {aws-short} provides {aws-short} STS as a web service that enables you to request temporary, limited-privilege credentials for users. You use STS to provide trusted users with temporary access to resources via API calls, your {aws-short} console, or the {aws-short} command line interface (CLI). +You can install {oadp-short} on an AWS {sts-first} (AWS STS) cluster manually. Amazon {aws-short} provides {aws-short} STS as a web service that enables you to request temporary, limited-privilege credentials for users. You use STS to provide trusted users with temporary access to resources via API calls, your {aws-short} console, or the {aws-short} command-line interface (CLI). Before installing {oadp-first}, you must set up role and policy credentials for {oadp-short} so that it can use the {aws-full} API. diff --git a/cicd/jenkins/images-other-jenkins-agent.adoc b/cicd/jenkins/images-other-jenkins-agent.adoc index fa160f667a..99130538d0 100644 --- a/cicd/jenkins/images-other-jenkins-agent.adoc +++ b/cicd/jenkins/images-other-jenkins-agent.adoc @@ -12,7 +12,7 @@ The Base image for Jenkins agents does the following: * Pulls in both the required tools, headless Java, the Jenkins JNLP client, and the useful ones, including `git`, `tar`, `zip`, and `nss`, among others. * Establishes the JNLP agent as the entry point. -* Includes the `oc` client tool for invoking command line operations from within Jenkins jobs. +* Includes the `oc` client tool for invoking command-line operations from within Jenkins jobs. * Provides Dockerfiles for both Red Hat Enterprise Linux (RHEL) and `localdev` images. [IMPORTANT] diff --git a/cli_reference/rosa_cli/rosa-cli-permission-examples.adoc b/cli_reference/rosa_cli/rosa-cli-permission-examples.adoc index 3328d47337..1870b679f7 100644 --- a/cli_reference/rosa_cli/rosa-cli-permission-examples.adoc +++ b/cli_reference/rosa_cli/rosa-cli-permission-examples.adoc @@ -5,7 +5,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[] :context: rosa-cli-permission-examples toc::[] -You can create roles with permissions that adhere to the principal of least privilege, in which the users assigned the roles have no other permissions assigned to them outside the scope of the specific action they need to perform. These policies contain only the minimum required permissions needed to perform specific actions by using the {product-title} (ROSA) command line interface (CLI). +You can create roles with permissions that adhere to the principal of least privilege, in which the users assigned the roles have no other permissions assigned to them outside the scope of the specific action they need to perform. These policies contain only the minimum required permissions needed to perform specific actions by using the {product-title} (ROSA) command-line interface (CLI). [IMPORTANT] ==== diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-accessing.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-accessing.adoc index 0d9ec26ca1..a879aca63b 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-accessing.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-accessing.adoc @@ -9,7 +9,7 @@ toc::[] //rosaworkshop.io content metadata //Brought into ROSA product docs 2023-11-30 -You can connect to your cluster using the command line interface (CLI) or the {hybrid-console} user interface (UI). +You can connect to your cluster using the command-line interface (CLI) or the {hybrid-console} user interface (UI). == Accessing your cluster using the CLI diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.adoc index 93c57f476b..bee4783a32 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.adoc @@ -9,7 +9,7 @@ toc::[] //rosaworkshop.io content metadata //Brought into ROSA product docs 2023-11-30 -Administration (admin) privileges are not automatically granted to users that you add to your cluster. If you want to grant admin-level privileges to certain users, you will need to manually grant them to each user. You can grant admin privileges from either the ROSA command line interface (CLI) or the Red{nbsp}Hat OpenShift Cluster Manager web user interface (UI). +Administration (admin) privileges are not automatically granted to users that you add to your cluster. If you want to grant admin-level privileges to certain users, you will need to manually grant them to each user. You can grant admin privileges from either the ROSA command-line interface (CLI) or the Red{nbsp}Hat OpenShift Cluster Manager web user interface (UI). Red{nbsp}Hat offers two types of admin privileges: diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deleting.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deleting.adoc index 085c201436..a0eac9f568 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deleting.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deleting.adoc @@ -9,7 +9,7 @@ toc::[] //rosaworkshop.io content metadata //Brought into ROSA product docs 2024-01-11 -You can delete your {product-title} (ROSA) cluster using either the command line interface (CLI) or the user interface (UI). +You can delete your {product-title} (ROSA) cluster using either the command-line interface (CLI) or the user interface (UI). == Deleting a ROSA cluster using the CLI diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-hcp.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-hcp.adoc index 19c72b095f..eb551ba2a0 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-hcp.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-hcp.adoc @@ -9,7 +9,7 @@ toc::[] //rosaworkshop.io content metadata //Brought into ROSA product docs 2023-11-21 -This tutorial outlines deploying a {hcp-title-first} cluster. +This tutorial outlines deploying a {hcp-title-first} cluster. With {hcp-title}, you can decouple the control plane from the data plane. This is a new deployment model for ROSA in which the control plane is hosted in a Red{nbsp}Hat-owned AWS account. The control plane is no longer hosted in your AWS account, reducing your AWS infrastructure expenses. The control plane is dedicated to a single cluster and is highly available. For more information, see the xref:../../../rosa_hcp/rosa-hcp-sts-creating-a-cluster-quickly.adoc#rosa-hcp-sts-creating-a-cluster-quickly[{hcp-title} documentation]. @@ -30,7 +30,7 @@ In this tutorial, we will create these resources first. We will also set up some ---- rosa list regions --hosted-cp ---- - + . Create the VPC. For this tutorial, the following link:https://github.com/openshift-cs/rosaworkshop/blob/master/rosa-workshop/rosa/resources/setup-vpc.sh[script] creates the VPC and its required components for you. It uses the region configured for the `aws` CLI. + [source,bash] @@ -118,7 +118,7 @@ echo -n "Creating a route table for the private subnet to the NAT gateway..." PRIVATE_ROUTE_TABLE_ID=$(aws ec2 create-route-table --vpc-id $VPC_ID --query RouteTable.RouteTableId --output text) aws ec2 create-tags --resources $PRIVATE_ROUTE_TABLE_ID $NAT_IP_ADDRESS --tags Key=Name,Value=$CLUSTER_NAME-private - + aws ec2 create-route --route-table-id $PRIVATE_ROUTE_TABLE_ID --destination-cidr-block 0.0.0.0/0 --gateway-id $NAT_GATEWAY_ID > /dev/null 2>&1 aws ec2 associate-route-table --subnet-id $PRIVATE_SUBNET_ID --route-table-id $PRIVATE_ROUTE_TABLE_ID > /dev/null 2>&1 @@ -169,7 +169,7 @@ Private Subnet: subnet-011fe340000000000 === Creating your OIDC configuration -In this tutorial, we will use the automatic mode when creating the OIDC configuration. We will also store the OIDC ID as an environment variable for later use. The command uses the ROSA CLI to create your cluster's unique OIDC configuration. +In this tutorial, we will use the automatic mode when creating the OIDC configuration. We will also store the OIDC ID as an environment variable for later use. The command uses the ROSA CLI to create your cluster's unique OIDC configuration. * To create the OIDC configuration for this tutorial, run the following command: + @@ -215,7 +215,7 @@ rosa create cluster --cluster-name $CLUSTER_NAME \ --sts --mode auto --yes ---- -The cluster is ready and completely usable after about 10 minutes. The cluster will have a control plane across three AWS availability zones in your selected region and create two worker nodes in your AWS account. +The cluster is ready and completely usable after about 10 minutes. The cluster will have a control plane across three AWS availability zones in your selected region and create two worker nodes in your AWS account. == Checking the installation status . Run one of the following commands to check the status of the cluster: @@ -241,4 +241,4 @@ rosa list clusters rosa logs install --cluster $CLUSTER_NAME --watch ---- -. Once the state changes to “ready” your cluster is installed. It might take a few more minutes for the worker nodes to come online. \ No newline at end of file +. Once the state changes to “ready” your cluster is installed. It might take a few more minutes for the worker nodes to come online. diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc index d54db7a21c..5158740529 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-deploying/cloud-experts-getting-started-simple-cli-guide.adoc @@ -9,7 +9,7 @@ toc::[] //rosaworkshop.io content metadata //Brought into ROSA product docs 2023-11-16 -This page outlines the minimum list of commands to deploy a {product-title} (ROSA) cluster using the command line interface (CLI). +This page outlines the minimum list of commands to deploy a {product-title} (ROSA) cluster using the command-line interface (CLI). [NOTE] ==== diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-managing-worker-nodes.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-managing-worker-nodes.adoc index 2050275fa4..9208e9b1f4 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-managing-worker-nodes.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-managing-worker-nodes.adoc @@ -13,7 +13,7 @@ In {product-title} (ROSA), changing aspects of your worker nodes is performed th == Creating a machine pool -You can create a machine pool with either the command line interface (CLI) or the user interface (UI). +You can create a machine pool with either the command-line interface (CLI) or the user interface (UI). === Creating a machine pool with the CLI . Run the following command: diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-upgrading.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-upgrading.adoc index 3fb8ec4348..8dc8d98999 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-upgrading.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-upgrading.adoc @@ -13,7 +13,7 @@ toc::[] Ways to schedule a cluster upgrade include: -* *Manually using the command line interface (CLI)*: Start a one-time immediate upgrade or schedule a one-time upgrade for a future date and time. +* *Manually using the command-line interface (CLI)*: Start a one-time immediate upgrade or schedule a one-time upgrade for a future date and time. * *Manually using the Red{nbsp}Hat OpenShift Cluster Manager user interface (UI)*: Start a one-time immediate upgrade or schedule a one-time upgrade for a future date and time. * *Automated upgrades*: Set an upgrade window for recurring y-stream upgrades whenever a new version is available without needing to manually schedule it. Minor versions have to be manually scheduled. diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-what-is-rosa.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-what-is-rosa.adoc index ca2c4ca7ca..44071c38d4 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-what-is-rosa.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-what-is-rosa.adoc @@ -11,7 +11,7 @@ toc::[] Red{nbsp}Hat OpenShift Service on AWS (ROSA) is a fully-managed turnkey application platform that allows you to focus on what matters most, delivering value to your customers by building and deploying applications. Red{nbsp}Hat and AWS SRE experts manage the underlying platform so you do not have to worry about infrastructure management. ROSA provides seamless integration with a wide range of AWS compute, database, analytics, machine learning, networking, mobile, and other services to further accelerate the building and delivering of differentiating experiences to your customers. -ROSA makes use of AWS Security Token Service (STS) to obtain credentials to manage infrastructure in your AWS account. AWS STS is a global web service that creates temporary credentials for IAM users or federated users. ROSA uses this to assign short-term, limited-privilege, security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls. This method aligns with the principals of least privilege and secure practices in cloud service resource management. The ROSA command line interface (CLI) tool manages the STS credentials that are assigned for unique tasks and takes action on AWS resources as part of OpenShift functionality. +ROSA makes use of AWS Security Token Service (STS) to obtain credentials to manage infrastructure in your AWS account. AWS STS is a global web service that creates temporary credentials for IAM users or federated users. ROSA uses this to assign short-term, limited-privilege, security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls. This method aligns with the principals of least privilege and secure practices in cloud service resource management. The ROSA command-line interface (CLI) tool manages the STS credentials that are assigned for unique tasks and takes action on AWS resources as part of OpenShift functionality. //For a detailed explanation, see "ROSA with STS Explained" (add xref when page is migrated). == Key features of ROSA diff --git a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-rosa-sts-explained.adoc b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-rosa-sts-explained.adoc index 10c83c2039..b7a0760c00 100644 --- a/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-rosa-sts-explained.adoc +++ b/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-rosa-sts-explained.adoc @@ -55,7 +55,7 @@ Several crucial components make ROSA with STS more secure than ROSA with IAM Use [id="sts-explained"] == AWS STS explained -ROSA uses AWS STS to grant least-privilege permissions with short-term security credentials to specific and segregated IAM roles. The credentials are associated with IAM roles specific to each component and cluster that makes AWS API calls. This method aligns with principles of least-privilege and secure practices in cloud service resource management. The ROSA command line interface (CLI) tool manages the STS roles and policies that are assigned for unique tasks and takes action upon AWS resources as part of OpenShift functionality. +ROSA uses AWS STS to grant least-privilege permissions with short-term security credentials to specific and segregated IAM roles. The credentials are associated with IAM roles specific to each component and cluster that makes AWS API calls. This method aligns with principles of least-privilege and secure practices in cloud service resource management. The ROSA command-line interface (CLI) tool manages the STS roles and policies that are assigned for unique tasks and takes action upon AWS resources as part of OpenShift functionality. STS roles and policies must be created for each ROSA cluster. To make this easier, the installation tools provide all the commands and files needed to create the roles as policies and an option to allow the CLI to automatically create the roles and policies. See xref:../../rosa_install_access_delete_clusters/rosa-sts-creating-a-cluster-with-customizations.adoc#rosa-sts-creating-cluster-customizations_rosa-sts-creating-a-cluster-with-customizations[Creating a ROSA cluster with STS using customizations] for more information about the different `--mode` options. diff --git a/cloud_experts_tutorials/cloud-experts-rosa-hcp-activation-and-account-linking-tutorial.adoc b/cloud_experts_tutorials/cloud-experts-rosa-hcp-activation-and-account-linking-tutorial.adoc index a365020dac..1fd4641cf8 100644 --- a/cloud_experts_tutorials/cloud-experts-rosa-hcp-activation-and-account-linking-tutorial.adoc +++ b/cloud_experts_tutorials/cloud-experts-rosa-hcp-activation-and-account-linking-tutorial.adoc @@ -116,7 +116,7 @@ image::rosa-cli-ui-12.png[] == {hcp-title} cluster deployment using the CLI -. Click the *Download the ROSA CLI* button to download the ROSA command line interface (CLI) for your operating system and set it up as described in the xref:../cli_reference/rosa_cli/rosa-get-started-cli.adoc#rosa-get-started-cli[Help with ROSA CLI setup]. +. Click the *Download the ROSA CLI* button to download the ROSA command-line interface (CLI) for your operating system and set it up as described in the xref:../cli_reference/rosa_cli/rosa-get-started-cli.adoc#rosa-get-started-cli[Help with ROSA CLI setup]. + [IMPORTANT] ==== diff --git a/cloud_experts_tutorials/cloud-experts-update-component-routes.adoc b/cloud_experts_tutorials/cloud-experts-update-component-routes.adoc index 4092df9c95..d67e6f7143 100644 --- a/cloud_experts_tutorials/cloud-experts-update-component-routes.adoc +++ b/cloud_experts_tutorials/cloud-experts-update-component-routes.adoc @@ -109,7 +109,7 @@ By running these commands you can see that the default component routes for our * `downloads-openshift-console.apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com` for Downloads * `oauth-openshift.apps.my-example-cluster-aws.z9a9.p1.openshiftapps.com` for OAuth -We can use the `rosa edit ingress` command to change the hostname of each service and add a TLS certificate for all of our component routes. The relevant parameters are shown in this excerpt of the command line help for the `rosa edit ingress` command: +We can use the `rosa edit ingress` command to change the hostname of each service and add a TLS certificate for all of our component routes. The relevant parameters are shown in this excerpt of the command-line help for the `rosa edit ingress` command: [source,bash] ---- diff --git a/cloud_experts_tutorials/rosa-mobb-cli-quickstart.adoc b/cloud_experts_tutorials/rosa-mobb-cli-quickstart.adoc index fae9856dec..98571f0d56 100644 --- a/cloud_experts_tutorials/rosa-mobb-cli-quickstart.adoc +++ b/cloud_experts_tutorials/rosa-mobb-cli-quickstart.adoc @@ -200,7 +200,7 @@ Initialize the ROSA CLI to complete the remaining validation checks and configur ### Interactive Installation -ROSA can be installed using command line parameters or in interactive mode. For an interactive installation run the following command +ROSA can be installed using command-line parameters or in interactive mode. For an interactive installation run the following command ```bash rosa create cluster --interactive --mode auto diff --git a/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc b/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc index 1528470aa3..73788e6785 100644 --- a/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc +++ b/installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc @@ -27,7 +27,7 @@ The {product-title} installation program offers the following methods for deploy * *Interactive*: You can deploy a cluster with the web-based link:https://access.redhat.com/documentation/en-us/assisted_installer_for_openshift_container_platform[{ai-full}]. This method requires no setup for the installer, and is ideal for connected environments like {ibm-z-name}. -* *Local Agent-based*: You can deploy a cluster locally with the xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[Agent-based Installer]. It provides many of the benefits of the {ai-full}, but you must download and configure the Agent-based Installer first. Configuration is done with a command line interface (CLI). This approach is ideal for disconnected networks. +* *Local Agent-based*: You can deploy a cluster locally with the xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[Agent-based Installer]. It provides many of the benefits of the {ai-full}, but you must download and configure the Agent-based Installer first. Configuration is done with a command-line interface (CLI). This approach is ideal for disconnected networks. // Tile on the console not available yet. once available add: the link:https://console.redhat.com/openshift/install/ibmz/agent-based[Agent-based Installer] * *Full control*: You can deploy a cluster on xref:../../installing/installing_ibm_z/installing-ibm-z.adoc#installing-ibm-z[infrastructure that you prepare and maintain], which provides maximum customizability. You can deploy clusters in connected or disconnected environments. diff --git a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc b/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc index 4119e132a9..80cab06da0 100644 --- a/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc +++ b/migrating_from_ocp_3_to_4/advanced-migration-options-3-4.adoc @@ -21,7 +21,7 @@ include::modules/migration-migrating-on-prem-to-cloud.adoc[leveloffset=+1] [id="migrating-applications-cli_{context}"] == Migrating applications by using the command line -You can migrate applications with the {mtc-short} API by using the command line interface (CLI) in order to automate the migration. +You can migrate applications with the {mtc-short} API by using the command-line interface (CLI) in order to automate the migration. include::modules/migration-prerequisites.adoc[leveloffset=+2] include::modules/migration-creating-registry-route-for-dim.adoc[leveloffset=+2] diff --git a/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc b/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc index ed81fc9668..bb9041a10b 100644 --- a/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc +++ b/migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc @@ -16,7 +16,7 @@ This process creates a `mapping.txt` file, which contains the mapping between th + By default, the {mtc-short} web console and the `Migration Controller` pod run on the target cluster. You can configure the `Migration Controller` custom resource manifest to run the {mtc-short} web console and the `Migration Controller` pod on a link:https://access.redhat.com/articles/5064151[source cluster or on a remote cluster]. -. Install the _legacy_ {mtc-full} Operator on the {product-title} 3 source cluster from the command line interface. +. Install the _legacy_ {mtc-full} Operator on the {product-title} 3 source cluster from the command-line interface. . Configure object storage to use as a replication repository. To uninstall {mtc-short}, see xref:../migrating_from_ocp_3_to_4/installing-restricted-3-4.adoc#migration-uninstalling-mtc-clean-up_installing-restricted-3-4[Uninstalling {mtc-short} and deleting resources]. diff --git a/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc b/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc index ffa2794c93..75b40c6933 100644 --- a/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc +++ b/migration_toolkit_for_containers/advanced-migration-options-mtc.adoc @@ -13,7 +13,7 @@ include::modules/migration-terminology.adoc[leveloffset=+1] [id="migrating-applications-cli_{context}"] == Migrating applications by using the command line -You can migrate applications with the {mtc-short} API by using the command line interface (CLI) in order to automate the migration. +You can migrate applications with the {mtc-short} API by using the command-line interface (CLI) in order to automate the migration. include::modules/migration-prerequisites.adoc[leveloffset=+2] include::modules/migration-creating-registry-route-for-dim.adoc[leveloffset=+2] diff --git a/migration_toolkit_for_containers/installing-mtc-restricted.adoc b/migration_toolkit_for_containers/installing-mtc-restricted.adoc index 303355ed82..0485145c3d 100644 --- a/migration_toolkit_for_containers/installing-mtc-restricted.adoc +++ b/migration_toolkit_for_containers/installing-mtc-restricted.adoc @@ -19,7 +19,7 @@ By default, the {mtc-short} web console and the `Migration Controller` pod run o . Install the {mtc-full} Operator on the source cluster: * {product-title} 4.6 or later: Install the {mtc-full} Operator by using Operator Lifecycle Manager. -* {product-title} 4.2 to 4.5: Install the legacy {mtc-full} Operator from the command line interface. +* {product-title} 4.2 to 4.5: Install the legacy {mtc-full} Operator from the command-line interface. . Configure object storage to use as a replication repository. diff --git a/modules/about-administrator-perspective.adoc b/modules/about-administrator-perspective.adoc index 9cad69baf8..3b68d41a4a 100644 --- a/modules/about-administrator-perspective.adoc +++ b/modules/about-administrator-perspective.adoc @@ -8,7 +8,7 @@ The *Administrator* perspective enables you to view the cluster inventory, capacity, general and specific utilization information, and the stream of important events, all of which help you to simplify planning and troubleshooting tasks. Both project administrators and cluster administrators can view the *Administrator* perspective. -Cluster administrators can also open an embedded command line terminal instance with the web terminal Operator in {product-title} 4.7 and later. +Cluster administrators can also open an embedded command-line terminal instance with the web terminal Operator in {product-title} 4.7 and later. [NOTE] ==== diff --git a/modules/about-developer-perspective.adoc b/modules/about-developer-perspective.adoc index e4ca78a8e9..d202ee7472 100644 --- a/modules/about-developer-perspective.adoc +++ b/modules/about-developer-perspective.adoc @@ -13,7 +13,7 @@ The *Developer* perspective offers several built-in ways to deploy applications, * Share your project with others. * Troubleshoot problems with your applications by running Prometheus Query Language (PromQL) queries on your project and examining the metrics visualized on a plot. The metrics provide information about the state of a cluster and any user-defined workloads that you are monitoring. -Cluster administrators can also open an embedded command line terminal instance in the web console in {product-title} 4.7 and later. +Cluster administrators can also open an embedded command-line terminal instance in the web console in {product-title} 4.7 and later. [NOTE] ==== diff --git a/modules/about-manually-maintained-credentials-upgrade.adoc b/modules/about-manually-maintained-credentials-upgrade.adoc index 39bcece996..4b6714b789 100644 --- a/modules/about-manually-maintained-credentials-upgrade.adoc +++ b/modules/about-manually-maintained-credentials-upgrade.adoc @@ -15,7 +15,7 @@ After updating the cloud provider resources, you must update the `upgradeable-to [NOTE] ==== -The process to update the cloud provider resources and the `upgradeable-to` annotation can only be completed by using command line tools. +The process to update the cloud provider resources and the `upgradeable-to` annotation can only be completed by using command-line tools. ==== [id="cco-platform-options_{context}"] diff --git a/modules/applications-create-using-cli-modify.adoc b/modules/applications-create-using-cli-modify.adoc index 00d5995362..3089a14e3e 100644 --- a/modules/applications-create-using-cli-modify.adoc +++ b/modules/applications-create-using-cli-modify.adoc @@ -182,7 +182,7 @@ If a source code repository and a builder image are specified as separate argume [id="grouping-images-source-single-pod"] == Grouping images and source in a single pod -The `new-app` command allows deploying multiple images together in a single pod. To specify which images to group together, use the `+` separator. The `--group` command line argument can also be used to specify the images that should be grouped together. To group the image built from a source repository with other images, specify its builder image in the group: +The `new-app` command allows deploying multiple images together in a single pod. To specify which images to group together, use the `+` separator. The `--group` command-line argument can also be used to specify the images that should be grouped together. To group the image built from a source repository with other images, specify its builder image in the group: [source,terminal] ---- diff --git a/modules/builds-understanding-openshift-pipeline.adoc b/modules/builds-understanding-openshift-pipeline.adoc index 9ec09cc0e2..ba00f5b12f 100644 --- a/modules/builds-understanding-openshift-pipeline.adoc +++ b/modules/builds-understanding-openshift-pipeline.adoc @@ -27,7 +27,7 @@ The {product-title} Jenkins Sync Plugin keeps the build configuration and build *{product-title} Jenkins Client Plugin* -The {product-title} Jenkins Client Plugin is a Jenkins plugin which aims to provide a readable, concise, comprehensive, and fluent Jenkins Pipeline syntax for rich interactions with an {product-title} API Server. The plugin uses the {product-title} command line tool, `oc`, which must be available on the nodes executing the script. +The {product-title} Jenkins Client Plugin is a Jenkins plugin which aims to provide a readable, concise, comprehensive, and fluent Jenkins Pipeline syntax for rich interactions with an {product-title} API Server. The plugin uses the {product-title} command-line tool, `oc`, which must be available on the nodes executing the script. The Jenkins Client Plugin must be installed on your Jenkins master so the {product-title} DSL will be available to use within the `jenkinsfile` for your application. This plugin is installed and enabled by default when using the {product-title} Jenkins image. diff --git a/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc b/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc index 07dd7ca884..924a2e9370 100644 --- a/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc +++ b/modules/cnf-collecting-low-latency-tuning-debugging-data-for-red-hat-support.adoc @@ -32,7 +32,7 @@ Use the `oc adm must-gather` CLI command to collect information about your clust * The Node Tuning Operator namespaces and child objects. * `MachineConfigPool` and associated `MachineConfig` objects. * The Node Tuning Operator and associated Tuned objects. -* Linux kernel command line options. +* Linux kernel command-line options. * CPU and NUMA topology * Basic PCI device information and NUMA locality. diff --git a/modules/cnf-performance-profile-creator-arguments.adoc b/modules/cnf-performance-profile-creator-arguments.adoc index 3e25611c50..c89ac29e4a 100644 --- a/modules/cnf-performance-profile-creator-arguments.adoc +++ b/modules/cnf-performance-profile-creator-arguments.adoc @@ -43,7 +43,7 @@ Default: `false`. [WARNING] ==== -If this argument is set to `true` you should not disable Hyper-Threading in the BIOS. Disabling Hyper-Threading is accomplished with a kernel command line argument. +If this argument is set to `true` you should not disable Hyper-Threading in the BIOS. Disabling Hyper-Threading is accomplished with a kernel command-line argument. ==== | `info` diff --git a/modules/compliance-evaluate-kubeletconfig-rules.adoc b/modules/compliance-evaluate-kubeletconfig-rules.adoc index b2536dab7d..4fa6d28c03 100644 --- a/modules/compliance-evaluate-kubeletconfig-rules.adoc +++ b/modules/compliance-evaluate-kubeletconfig-rules.adoc @@ -6,7 +6,7 @@ [id="compliance-evaluate-kubeletconfig-rules_{context}"] = Evaluating KubeletConfig rules against default configuration values -{product-title} infrastructure might contain incomplete configuration files at run time, and nodes assume default configuration values for missing configuration options. Some configuration options can be passed as command line arguments. As a result, the Compliance Operator cannot verify if the configuration file on the node is complete because it might be missing options used in the rule checks. +{product-title} infrastructure might contain incomplete configuration files at run time, and nodes assume default configuration values for missing configuration options. Some configuration options can be passed as command-line arguments. As a result, the Compliance Operator cannot verify if the configuration file on the node is complete because it might be missing options used in the rule checks. To prevent false negative results where the default configuration value passes a check, the Compliance Operator uses the Node/Proxy API to fetch the configuration for each node in a node pool, then all configuration options that are consistent across nodes in the node pool are stored in a file that represents the configuration for all nodes within that node pool. This increases the accuracy of the scan results. diff --git a/modules/developer-cli-odo-developer-setup.adoc b/modules/developer-cli-odo-developer-setup.adoc index 6f12c5abc8..f4ba225477 100644 --- a/modules/developer-cli-odo-developer-setup.adoc +++ b/modules/developer-cli-odo-developer-setup.adoc @@ -8,6 +8,6 @@ With {odo-title} you can create and deploy application on {product-title} clusters from a terminal. Code editor plugins use {odo-title} which allows users to interact with {product-title} clusters from their IDE terminals. Examples of plugins that use {odo-title}: VS Code OpenShift Connector, OpenShift Connector for Intellij, Codewind for Eclipse Che. -{odo-title} works on Windows, macOS, and Linux operating systems and from any terminal. {odo-title} provides autocompletion for bash and zsh command line shells. +{odo-title} works on Windows, macOS, and Linux operating systems and from any terminal. {odo-title} provides autocompletion for bash and zsh command-line shells. {odo-title} supports Node.js and Java components. diff --git a/modules/images-other-jenkins-agent-gradle.adoc b/modules/images-other-jenkins-agent-gradle.adoc index 76aa553cef..3e6b7f1a16 100644 --- a/modules/images-other-jenkins-agent-gradle.adoc +++ b/modules/images-other-jenkins-agent-gradle.adoc @@ -12,8 +12,8 @@ Hosting Gradle builds in the Jenkins agent on {product-title} presents additiona The following settings are suggested as a starting point for running Gradle builds in a memory constrained Jenkins agent on {product-title}. You can modify these settings as required. * Ensure the long-lived Gradle daemon is disabled by adding `org.gradle.daemon=false` to the `gradle.properties` file. -* Disable parallel build execution by ensuring `org.gradle.parallel=true` is not set in the `gradle.properties` file and that `--parallel` is not set as a command line argument. +* Disable parallel build execution by ensuring `org.gradle.parallel=true` is not set in the `gradle.properties` file and that `--parallel` is not set as a command-line argument. * To prevent Java compilations running out-of-process, set `java { options.fork = false }` in the `build.gradle` file. * Disable multiple additional test processes by ensuring `test { maxParallelForks = 1 }` is set in the `build.gradle` file. * Override the Gradle JVM memory parameters by the `GRADLE_OPTS`, `JAVA_OPTS` or `JAVA_TOOL_OPTIONS` environment variables. -* Set the maximum heap size and JVM arguments for any Gradle test JVM by defining the `maxHeapSize` and `jvmArgs` settings in `build.gradle`, or through the `-Dorg.gradle.jvmargs` command line argument. +* Set the maximum heap size and JVM arguments for any Gradle test JVM by defining the `maxHeapSize` and `jvmArgs` settings in `build.gradle`, or through the `-Dorg.gradle.jvmargs` command-line argument. diff --git a/modules/installation-aws-user-infra-requirements.adoc b/modules/installation-aws-user-infra-requirements.adoc index 4f98ac7b8f..b53fc09f6b 100644 --- a/modules/installation-aws-user-infra-requirements.adoc +++ b/modules/installation-aws-user-infra-requirements.adoc @@ -560,5 +560,5 @@ You need `AWS::EC2::Instance` objects for the following machines: //// You can also create and control them by using a MachineSet after your control plane initializes and you can access the cluster API by using the `oc` -command line interface. +command-line interface. //// diff --git a/modules/installation-gcp-shared-vpc-ingress.adoc b/modules/installation-gcp-shared-vpc-ingress.adoc index d0982f8ad2..38aabec405 100644 --- a/modules/installation-gcp-shared-vpc-ingress.adoc +++ b/modules/installation-gcp-shared-vpc-ingress.adoc @@ -30,7 +30,7 @@ router-default LoadBalancer 172.30.18.154 35.233.157.184 80:32288/TCP,44 ---- $ oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}' ---- -. Add a record to your GCP public zone with the router's external IP address and the name `*.apps..`. You can use the `gcloud` command line utility or the GCP web console. +. Add a record to your GCP public zone with the router's external IP address and the name `*.apps..`. You can use the `gcloud` command-line utility or the GCP web console. . To add manual records instead of a wildcard record, create entries for each of the cluster's current routes. You can gather these routes by running the following command: + [source,terminal] diff --git a/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc b/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc index 7597a00a8d..b627eba5c7 100644 --- a/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc +++ b/modules/installation-user-infra-machines-advanced-enabling-serial-console.adoc @@ -34,5 +34,5 @@ $ coreos-installer install \ A similar outcome can be obtained by using the `coreos-installer install --append-karg` option, and specifying the console with `console=`. However, this will only set the console for the kernel and not the bootloader. ==== -To configure a PXE installation, make sure the `coreos.inst.install_dev` kernel command line option is omitted, and use the shell prompt to run `coreos-installer` manually using the above ISO installation procedure. +To configure a PXE installation, make sure the `coreos.inst.install_dev` kernel command-line option is omitted, and use the shell prompt to run `coreos-installer` manually using the above ISO installation procedure. diff --git a/modules/load-and-merge-rules.adoc b/modules/load-and-merge-rules.adoc index ca388757ea..3c3802fad7 100644 --- a/modules/load-and-merge-rules.adoc +++ b/modules/load-and-merge-rules.adoc @@ -26,14 +26,14 @@ cluster name. ** If the `--context` option is present, then use the context's value. ** An empty value is allowed at this stage. * The actual cluster information to use is determined. At this point, you may or may not have cluster information. Each piece of the cluster information is built based on the first match in the following flow: -** The values of any of the following command line options: +** The values of any of the following command-line options: *** `--server`, *** `--api-version` *** `--certificate-authority` *** `--insecure-skip-tls-verify` ** If cluster information and a value for the attribute is present, then use it. ** If you do not have a server location, then there is an error. -* The actual user information to use is determined. Users are built using the same rules as clusters, except that you can only have one authentication technique per user; conflicting techniques cause the operation to fail. Command line options take precedence over config file values. Valid command line options are: +* The actual user information to use is determined. Users are built using the same rules as clusters, except that you can only have one authentication technique per user; conflicting techniques cause the operation to fail. Command-line options take precedence over config file values. Valid command-line options are: ** `--auth-path` ** `--client-certificate` ** `--client-key` diff --git a/modules/machine-health-checks-creating.adoc b/modules/machine-health-checks-creating.adoc index f3b6f269f8..79e51bbd89 100644 --- a/modules/machine-health-checks-creating.adoc +++ b/modules/machine-health-checks-creating.adoc @@ -16,7 +16,7 @@ You can only apply a machine health check to machines that are managed by comput .Prerequisites -* Install the `oc` command line interface. +* Install the `oc` command-line interface. .Procedure diff --git a/modules/manually-rotating-cloud-creds.adoc b/modules/manually-rotating-cloud-creds.adoc index 6a8d1ebf08..a46888ae61 100644 --- a/modules/manually-rotating-cloud-creds.adoc +++ b/modules/manually-rotating-cloud-creds.adoc @@ -26,7 +26,7 @@ The process for rotating cloud credentials depends on the mode that the CCO is c //// [NOTE] ==== -You can also use the command line interface to complete all parts of this procedure. +You can also use the command-line interface to complete all parts of this procedure. ==== //// diff --git a/modules/microshift-gitops-debug.adoc b/modules/microshift-gitops-debug.adoc index 680a3da6a6..2a7c2879ad 100644 --- a/modules/microshift-gitops-debug.adoc +++ b/modules/microshift-gitops-debug.adoc @@ -10,7 +10,7 @@ You can debug GitOps by using the {oc-first}. .Prerequisites -* The `oc` command line tool is installed. +* The `oc` command-line tool is installed. .Procedure diff --git a/modules/migration-rolling-back-migration-cli.adoc b/modules/migration-rolling-back-migration-cli.adoc index 25eef94bab..561d09ad20 100644 --- a/modules/migration-rolling-back-migration-cli.adoc +++ b/modules/migration-rolling-back-migration-cli.adoc @@ -5,9 +5,9 @@ :_mod-docs-content-type: PROCEDURE [id="migration-rolling-back-migration-cli_{context}"] -= Rolling back a migration from the command line interface += Rolling back a migration from the command-line interface -You can roll back a migration by creating a `MigMigration` custom resource (CR) from the command line interface. +You can roll back a migration by creating a `MigMigration` custom resource (CR) from the command-line interface. [NOTE] ==== diff --git a/modules/migration-viewing-migration-plan-log.adoc b/modules/migration-viewing-migration-plan-log.adoc index 550dc4a9fd..ad0b14fa5b 100644 --- a/modules/migration-viewing-migration-plan-log.adoc +++ b/modules/migration-viewing-migration-plan-log.adoc @@ -7,7 +7,7 @@ [id="migration-viewing-migration-plan-log_{context}"] = Viewing a migration plan log -You can view an aggregated log for a migration plan. You use the {mtc-short} web console to copy a command to your clipboard and then run the command from the command line interface (CLI). +You can view an aggregated log for a migration plan. You use the {mtc-short} web console to copy a command to your clipboard and then run the command from the command-line interface (CLI). The command displays the filtered logs of the following pods: diff --git a/modules/migration-viewing-migration-plan-resources.adoc b/modules/migration-viewing-migration-plan-resources.adoc index 30944eaacc..04c3ebacda 100644 --- a/modules/migration-viewing-migration-plan-resources.adoc +++ b/modules/migration-viewing-migration-plan-resources.adoc @@ -7,7 +7,7 @@ [id="migration-viewing-migration-plan-resources_{context}"] = Viewing migration plan resources -You can view migration plan resources to monitor a running migration or to troubleshoot a failed migration by using the {mtc-short} web console and the command line interface (CLI). +You can view migration plan resources to monitor a running migration or to troubleshoot a failed migration by using the {mtc-short} web console and the command-line interface (CLI). .Procedure diff --git a/modules/mirror-registry-introduction.adoc b/modules/mirror-registry-introduction.adoc index 49aee5869d..365219b348 100644 --- a/modules/mirror-registry-introduction.adoc +++ b/modules/mirror-registry-introduction.adoc @@ -7,7 +7,7 @@ For disconnected deployments of {product-title}, a container registry is required to carry out the installation of the clusters. To run a production-grade registry service on such a cluster, you must create a separate registry deployment to install the first cluster. The _mirror registry for Red Hat OpenShift_ addresses this need and is included in every OpenShift subscription. It is available for download on the link:https://console.redhat.com/openshift/downloads#tool-mirror-registry[OpenShift console *Downloads*] page. -The _mirror registry for Red Hat OpenShift_ allows users to install a small-scale version of Red Hat Quay and its required components using the `mirror-registry` command line interface (CLI) tool. The _mirror registry for Red Hat OpenShift_ is deployed automatically with preconfigured local storage and a local database. It also includes auto-generated user credentials and access permissions with a single set of inputs and no additional configuration choices to get started. +The _mirror registry for Red Hat OpenShift_ allows users to install a small-scale version of Red Hat Quay and its required components using the `mirror-registry` command-line interface (CLI) tool. The _mirror registry for Red Hat OpenShift_ is deployed automatically with preconfigured local storage and a local database. It also includes auto-generated user credentials and access permissions with a single set of inputs and no additional configuration choices to get started. The _mirror registry for Red Hat OpenShift_ provides a pre-determined network configuration and reports deployed component credentials and access URLs upon success. A limited set of optional configuration inputs like fully qualified domain name (FQDN) services, superuser name and password, and custom TLS certificates are also provided. This provides users with a container registry so that they can easily create an offline mirror of all {product-title} release content when running {product-title} in restricted network environments. diff --git a/modules/network-observability-netobserv-cli-reference.adoc b/modules/network-observability-netobserv-cli-reference.adoc index 7bdc6aba82..9bc475bce8 100644 --- a/modules/network-observability-netobserv-cli-reference.adoc +++ b/modules/network-observability-netobserv-cli-reference.adoc @@ -4,7 +4,7 @@ [id="network-observability-netobserv-cli-reference_{context}"] = Network Observability CLI usage -You can use the Network Observability CLI (`oc netobserv`) to pass command line arguments to capture flows data, packets data, and metrics for further analysis and enable features supported by the Network Observability Operator. +You can use the Network Observability CLI (`oc netobserv`) to pass command-line arguments to capture flows data, packets data, and metrics for further analysis and enable features supported by the Network Observability Operator. [id="cli-syntax_{context}"] == Syntax diff --git a/modules/nodes-containers-copying-files-rsync.adoc b/modules/nodes-containers-copying-files-rsync.adoc index b9d008ee9b..74e41a7d97 100644 --- a/modules/nodes-containers-copying-files-rsync.adoc +++ b/modules/nodes-containers-copying-files-rsync.adoc @@ -5,8 +5,8 @@ [id="nodes-containers-copying-files-rsync_{context}"] = Using advanced Rsync features -The `oc rsync` command exposes fewer command line options than standard `rsync`. -In the case that you want to use a standard `rsync` command line option that is +The `oc rsync` command exposes fewer command-line options than standard `rsync`. +In the case that you want to use a standard `rsync` command-line option that is not available in `oc rsync`, for example the `--exclude-from=FILE` option, it might be possible to use standard `rsync` 's `--rsh` (`-e`) option or `RSYNC_RSH` environment variable as a workaround, as follows: diff --git a/modules/nodes-nodes-kernel-arguments.adoc b/modules/nodes-nodes-kernel-arguments.adoc index fb11474aa8..a390f20349 100644 --- a/modules/nodes-nodes-kernel-arguments.adoc +++ b/modules/nodes-nodes-kernel-arguments.adoc @@ -188,7 +188,7 @@ ip-10-0-153-150.ec2.internal Ready master 34m v1.28.5 You can see that scheduling on each worker node is disabled as the change is being applied. . Check that the kernel argument worked by going to one of the worker nodes and listing -the kernel command line arguments (in `/proc/cmdline` on the host): +the kernel command-line arguments (in `/proc/cmdline` on the host): + [source,terminal] ---- diff --git a/modules/nodes-pods-configmap-overview.adoc b/modules/nodes-pods-configmap-overview.adoc index d8899b9ae7..6bc04d7b83 100644 --- a/modules/nodes-pods-configmap-overview.adoc +++ b/modules/nodes-pods-configmap-overview.adoc @@ -6,7 +6,7 @@ [id="nodes-pods-configmap-overview_{context}"] = Understanding config maps -Many applications require configuration by using some combination of configuration files, command line arguments, and environment variables. In {product-title}, these configuration artifacts are decoupled from image content to keep containerized applications portable. +Many applications require configuration by using some combination of configuration files, command-line arguments, and environment variables. In {product-title}, these configuration artifacts are decoupled from image content to keep containerized applications portable. The `ConfigMap` object provides mechanisms to inject containers with configuration data while keeping containers agnostic of {product-title}. A config map can be used to store fine-grained information like individual properties or coarse-grained information like entire configuration files or JSON blobs. diff --git a/modules/nw-ingress-configuring-application-domain.adoc b/modules/nw-ingress-configuring-application-domain.adoc index 9ecea63231..7418ec96e8 100644 --- a/modules/nw-ingress-configuring-application-domain.adoc +++ b/modules/nw-ingress-configuring-application-domain.adoc @@ -17,7 +17,7 @@ For example, you can use the DNS domain for your company as the default domain f //* You deployed an {OSD} cluster. * You deployed an {product-title} cluster. -* You installed the `oc` command line interface. +* You installed the `oc` command-line interface. .Procedure diff --git a/modules/nw-ovn-kubernetes-examine-nb-database-contents-ref.adoc b/modules/nw-ovn-kubernetes-examine-nb-database-contents-ref.adoc index 4a293bf0fd..a5071d1e13 100644 --- a/modules/nw-ovn-kubernetes-examine-nb-database-contents-ref.adoc +++ b/modules/nw-ovn-kubernetes-examine-nb-database-contents-ref.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: REFERENCE [id="nw-ovn-kubernetes-examine-nb-database-contents-ref_{context}"] -= Command line arguments for ovn-nbctl to examine northbound database contents += Command-line arguments for ovn-nbctl to examine northbound database contents -The following table describes the command line arguments that can be used with `ovn-nbctl` to examine the contents of the northbound database. +The following table describes the command-line arguments that can be used with `ovn-nbctl` to examine the contents of the northbound database. [NOTE] @@ -14,7 +14,7 @@ The following table describes the command line arguments that can be used with ` Open a remote shell in the pod you want to view the contents of and then run the `ovn-nbctl` commands. ==== -.Command line arguments to examine northbound database contents +.Command-line arguments to examine northbound database contents [cols="30%,70%",options="header"] |=== |Argument |Description diff --git a/modules/nw-ovn-kubernetes-examine-sb-database-contents-ref.adoc b/modules/nw-ovn-kubernetes-examine-sb-database-contents-ref.adoc index 0cb3f168b5..e48bdeac93 100644 --- a/modules/nw-ovn-kubernetes-examine-sb-database-contents-ref.adoc +++ b/modules/nw-ovn-kubernetes-examine-sb-database-contents-ref.adoc @@ -4,16 +4,16 @@ :_mod-docs-content-type: REFERENCE [id="nw-ovn-kubernetes-examine-sb-database-contents-ref_{context}"] -= Command line arguments for ovn-sbctl to examine southbound database contents += Command-line arguments for ovn-sbctl to examine southbound database contents -The following table describes the command line arguments that can be used with `ovn-sbctl` to examine the contents of the southbound database. +The following table describes the command-line arguments that can be used with `ovn-sbctl` to examine the contents of the southbound database. [NOTE] ==== Open a remote shell in the pod you wish to view the contents of and then run the `ovn-sbctl` commands. ==== -.Command line arguments to examine southbound database contents +.Command-line arguments to examine southbound database contents [cols="30%,70%",options="header"] |=== |Argument |Description diff --git a/modules/odc-access-web-terminal.adoc b/modules/odc-access-web-terminal.adoc index 3353a9dcf3..df686a0699 100644 --- a/modules/odc-access-web-terminal.adoc +++ b/modules/odc-access-web-terminal.adoc @@ -17,7 +17,7 @@ The web terminal remains open until you close it or until you close the browser .Procedure -. To launch the web terminal, click the command line terminal icon (image:odc-wto-icon.png[title="wto icon"]) in the masthead of the console. A web terminal instance is displayed in the *Command line terminal* pane. This instance is automatically logged in with your credentials. +. To launch the web terminal, click the command-line terminal icon (image:odc-wto-icon.png[title="wto icon"]) in the masthead of the console. A web terminal instance is displayed in the *Command line terminal* pane. This instance is automatically logged in with your credentials. . If a project has not been selected in the current session, select the project where the `DevWorkspace` CR must be created from the *Project* drop-down list. By default, the current project is selected. + diff --git a/modules/op-opting-out-of-tekton-hub-in-the-developer-perspective.adoc b/modules/op-opting-out-of-tekton-hub-in-the-developer-perspective.adoc index e99bcd78e1..6147242fa8 100644 --- a/modules/op-opting-out-of-tekton-hub-in-the-developer-perspective.adoc +++ b/modules/op-opting-out-of-tekton-hub-in-the-developer-perspective.adoc @@ -12,7 +12,7 @@ Cluster administrators can opt out of displaying {tekton-hub} resources, such as [discrete] .Prerequisite -* Ensure that the {pipelines-title} Operator is installed on the cluster, and the `oc` command line tool is available. +* Ensure that the {pipelines-title} Operator is installed on the cluster, and the `oc` command-line tool is available. [discrete] .Procedure diff --git a/modules/op-tkn-clustertask-management.adoc b/modules/op-tkn-clustertask-management.adoc index 167ddb9dc1..04404cef3b 100644 --- a/modules/op-tkn-clustertask-management.adoc +++ b/modules/op-tkn-clustertask-management.adoc @@ -7,7 +7,7 @@ [IMPORTANT] ==== -In {pipelines-title} 1.10, ClusterTask functionality of the `tkn` command line utility is deprecated and is planned to be removed in a future release. +In {pipelines-title} 1.10, ClusterTask functionality of the `tkn` command-line utility is deprecated and is planned to be removed in a future release. ==== == clustertask diff --git a/modules/openshift-architecture-common-terms.adoc b/modules/openshift-architecture-common-terms.adoc index 87128cfa9c..f9046f84ad 100644 --- a/modules/openshift-architecture-common-terms.adoc +++ b/modules/openshift-architecture-common-terms.adoc @@ -129,7 +129,7 @@ node:: A worker machine in the {product-title} cluster. A node is either a virtual machine (VM) or a physical machine. OpenShift CLI (`oc`):: -A command line tool to run {product-title} commands on the terminal. +A command-line tool to run {product-title} commands on the terminal. ifndef::openshift-dedicated,openshift-rosa[] OpenShift Dedicated:: diff --git a/modules/ossm-cr-example.adoc b/modules/ossm-cr-example.adoc index a95e171131..11b2517e73 100644 --- a/modules/ossm-cr-example.adoc +++ b/modules/ossm-cr-example.adoc @@ -99,7 +99,7 @@ The following table lists the specifications for the `ServiceMeshControlPlane` r |Name |Description |Type |`annotations` -|The `annotations` parameter stores additional, usually redundant status information, such as the number of components deployed by the `ServiceMeshControlPlane`. These statuses are used by the command line tool, `oc`, which does not yet allow counting objects in JSONPath expressions. +|The `annotations` parameter stores additional, usually redundant status information, such as the number of components deployed by the `ServiceMeshControlPlane`. These statuses are used by the command-line tool, `oc`, which does not yet allow counting objects in JSONPath expressions. |Not configurable |`conditions` diff --git a/modules/ossm-cr-status.adoc b/modules/ossm-cr-status.adoc index 3cd888f894..c0151467db 100644 --- a/modules/ossm-cr-status.adoc +++ b/modules/ossm-cr-status.adoc @@ -17,7 +17,7 @@ The `status` parameter describes the current state of your service mesh. This in |integer |`annotations` -|The `annotations` parameter stores additional, usually redundant status information, such as the number of components deployed by the `ServiceMeshControlPlane` object. These statuses are used by the command line tool, `oc`, which does not yet allow counting objects in JSONPath expressions. +|The `annotations` parameter stores additional, usually redundant status information, such as the number of components deployed by the `ServiceMeshControlPlane` object. These statuses are used by the command-line tool, `oc`, which does not yet allow counting objects in JSONPath expressions. |Not configurable |`readiness` diff --git a/modules/ossm-migrating-to-20.adoc b/modules/ossm-migrating-to-20.adoc index 671c87b741..93783ea1d4 100644 --- a/modules/ossm-migrating-to-20.adoc +++ b/modules/ossm-migrating-to-20.adoc @@ -48,7 +48,7 @@ $ oc replace -f smcp-resource.yaml $ oc patch smcp.v1.maistra.io --type json --patch '[{"op": "replace","path":"/spec/path/to/bad/setting","value":"corrected-value"}]' ---- + -.. To fix the resource by editing with command line tools, use `oc edit`. +.. To fix the resource by editing with command-line tools, use `oc edit`. + [source,terminal] ---- diff --git a/modules/ossm-vs-istio-1x.adoc b/modules/ossm-vs-istio-1x.adoc index 1160613290..139763a358 100644 --- a/modules/ossm-vs-istio-1x.adoc +++ b/modules/ossm-vs-istio-1x.adoc @@ -9,9 +9,9 @@ Module included in the following assemblies: An installation of {SMProductName} differs from an installation of Istio in multiple ways. The modifications to {SMProductName} are sometimes necessary to resolve issues, provide additional features, or to handle differences when deploying on {product-title}. [id="ossm-cli-tool_{context}"] -== Command line tool +== Command-line tool -The command line tool for {SMProductName} is oc.  {SMProductName}  does not support istioctl. +The command-line tool for {SMProductName} is oc.  {SMProductName}  does not support istioctl. [id="ossm-automatic-injection_{context}"] == Automatic injection diff --git a/modules/ossm-vs-istio.adoc b/modules/ossm-vs-istio.adoc index 08891aa5e6..cba258390e 100644 --- a/modules/ossm-vs-istio.adoc +++ b/modules/ossm-vs-istio.adoc @@ -9,9 +9,9 @@ Module included in the following assemblies: The following features are different in {SMProductShortName} and Istio. [id="ossm-cli-tool_{context}"] -== Command line tool +== Command-line tool -The command line tool for {SMProductName} is `oc`.  {SMProductName} does not support `istioctl`. +The command-line tool for {SMProductName} is `oc`.  {SMProductName} does not support `istioctl`. [id="ossm-installation-upgrade_{context}"] diff --git a/modules/pod-using-a-different-service-account.adoc b/modules/pod-using-a-different-service-account.adoc index 29794c9f85..d543c53d43 100644 --- a/modules/pod-using-a-different-service-account.adoc +++ b/modules/pod-using-a-different-service-account.adoc @@ -9,7 +9,7 @@ You can run a pod with a service account other than the default: .Prerequisites -* Install the `oc` command line interface. +* Install the `oc` command-line interface. * Configure a service account. * Create a DeploymentConfig. diff --git a/modules/psap-configuring-node-feature-discovery.adoc b/modules/psap-configuring-node-feature-discovery.adoc index fbec39fc80..4995a7e748 100644 --- a/modules/psap-configuring-node-feature-discovery.adoc +++ b/modules/psap-configuring-node-feature-discovery.adoc @@ -17,7 +17,7 @@ The `core` section contains common configuration settings that are not specific `core.sleepInterval` specifies the interval between consecutive passes of feature detection or re-detection, and thus also the interval between node re-labeling. A non-positive value implies infinite sleep interval; no re-detection or re-labeling is done. -This value is overridden by the deprecated `--sleep-interval` command line flag, if specified. +This value is overridden by the deprecated `--sleep-interval` command-line flag, if specified. .Example usage [source,yaml] @@ -33,7 +33,7 @@ The default value is `60s`. `core.sources` specifies the list of enabled feature sources. A special value `all` enables all feature sources. -This value is overridden by the deprecated `--sources` command line flag, if specified. +This value is overridden by the deprecated `--sources` command-line flag, if specified. Default: `[all]` @@ -54,7 +54,7 @@ core: The regular expression is only matched against the basename part of the label, the part of the name after '/'. The label prefix, or namespace, is omitted. -This value is overridden by the deprecated `--label-whitelist` command line flag, if specified. +This value is overridden by the deprecated `--label-whitelist` command-line flag, if specified. Default: `null` @@ -71,7 +71,7 @@ core: Setting `core.noPublish` to `true` disables all communication with the `nfd-master`. It is effectively a dry run flag; `nfd-worker` runs feature detection normally, but no labeling requests are sent to `nfd-master`. -This value is overridden by the `--no-publish` command line flag, if specified. +This value is overridden by the `--no-publish` command-line flag, if specified. Example: @@ -89,7 +89,7 @@ The default value is `false`. The following options specify the logger configuration, most of which can be dynamically adjusted at run-time. -The logger options can also be specified using command line flags, which take precedence over any corresponding config file options. +The logger options can also be specified using command-line flags, which take precedence over any corresponding config file options. [discrete] [id="configuring-node-feature-discovery-operator-core-klog-adddirheader_{context}"] diff --git a/modules/psap-node-feature-discovery-topology-updater-command-reference.adoc b/modules/psap-node-feature-discovery-topology-updater-command-reference.adoc index 8abb06130c..3826b2bdd2 100644 --- a/modules/psap-node-feature-discovery-topology-updater-command-reference.adoc +++ b/modules/psap-node-feature-discovery-topology-updater-command-reference.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: REFERENCE [id="nfd-topology-updater-command-line-flags_{context}"] -= NFD Topology Updater command line flags += NFD Topology Updater command-line flags -To view available command line flags, run the `nfd-topology-updater -help` command. For example, in a podman container, run the following command: +To view available command-line flags, run the `nfd-topology-updater -help` command. For example, in a podman container, run the following command: [source,terminal] ---- diff --git a/modules/rhcos-enabling-multipath-day-2.adoc b/modules/rhcos-enabling-multipath-day-2.adoc index ea3ef2c5ec..beef5c4ed7 100644 --- a/modules/rhcos-enabling-multipath-day-2.adoc +++ b/modules/rhcos-enabling-multipath-day-2.adoc @@ -121,7 +121,7 @@ ip-10-0-153-150.ec2.internal Ready master 34m v1.28.5 You can see that scheduling on each worker node is disabled as the change is being applied. . Check that the kernel argument worked by going to one of the worker nodes and listing -the kernel command line arguments (in `/proc/cmdline` on the host): +the kernel command-line arguments (in `/proc/cmdline` on the host): + [source,terminal] ---- diff --git a/modules/rhcos-enabling-multipath.adoc b/modules/rhcos-enabling-multipath.adoc index 132a525a8c..35d6629342 100644 --- a/modules/rhcos-enabling-multipath.adoc +++ b/modules/rhcos-enabling-multipath.adoc @@ -73,7 +73,7 @@ This symlink can also be used as the `coreos.inst.install_dev` kernel argument w . Reboot into the installed system. -. Check that the kernel arguments worked by going to one of the worker nodes and listing the kernel command line arguments (in `/proc/cmdline` on the host): +. Check that the kernel arguments worked by going to one of the worker nodes and listing the kernel command-line arguments (in `/proc/cmdline` on the host): + [source,terminal] ---- diff --git a/modules/rosa-adding-tags-cli.adoc b/modules/rosa-adding-tags-cli.adoc index 6ad6a87f07..31098f69cd 100644 --- a/modules/rosa-adding-tags-cli.adoc +++ b/modules/rosa-adding-tags-cli.adoc @@ -6,7 +6,7 @@ [id="rosa-adding-tags-cli{context}"] = Adding tags to a machine pool using the ROSA CLI -You can add tags to a machine pool for your {product-title} cluster by using the ROSA command line interface (CLI). +You can add tags to a machine pool for your {product-title} cluster by using the ROSA command-line interface (CLI). [IMPORTANT] ==== diff --git a/modules/rosa-getting-started-environment-setup.adoc b/modules/rosa-getting-started-environment-setup.adoc index 4895ac3952..153cb329ee 100644 --- a/modules/rosa-getting-started-environment-setup.adoc +++ b/modules/rosa-getting-started-environment-setup.adoc @@ -9,7 +9,7 @@ Before you create a {product-title} (ROSA) cluster, you must set up your environment by completing the following tasks: * Verify ROSA prerequisites against your AWS and Red{nbsp}Hat accounts. -* Install and configure the required command line interface (CLI) tools. +* Install and configure the required command-line interface (CLI) tools. * Verify the configuration of the CLI tools. You can follow the procedures in this section to complete these setup requirements. diff --git a/modules/rosa-getting-started-install-configure-cli-tools.adoc b/modules/rosa-getting-started-install-configure-cli-tools.adoc index 3324e3b851..bf2e690e66 100644 --- a/modules/rosa-getting-started-install-configure-cli-tools.adoc +++ b/modules/rosa-getting-started-install-configure-cli-tools.adoc @@ -190,4 +190,4 @@ ifeval::["{context}" == "rosa-getting-started"] endif::[] ifeval::["{context}" == "rosa-quickstart"] :quickstart: -endif::[] \ No newline at end of file +endif::[] diff --git a/modules/rosa-hcp-aws-private-create-cluster.adoc b/modules/rosa-hcp-aws-private-create-cluster.adoc index 2d0c57eb73..f444a65d3d 100644 --- a/modules/rosa-hcp-aws-private-create-cluster.adoc +++ b/modules/rosa-hcp-aws-private-create-cluster.adoc @@ -5,7 +5,7 @@ [id="rosa-hcp-aws-private-create-cluster_{context}"] = Creating a private {hcp-title} cluster using the ROSA CLI -You can create a private cluster with multiple availability zones (Multi-AZ) on {hcp-title} using the ROSA command line interface (CLI), `rosa`. +You can create a private cluster with multiple availability zones (Multi-AZ) on {hcp-title} using the ROSA command-line interface (CLI), `rosa`. .Prerequisites diff --git a/modules/rosa-hcp-deleting-cluster.adoc b/modules/rosa-hcp-deleting-cluster.adoc index 511317999d..498ca1b7c4 100644 --- a/modules/rosa-hcp-deleting-cluster.adoc +++ b/modules/rosa-hcp-deleting-cluster.adoc @@ -6,7 +6,7 @@ [id="rosa-hcp-deleting-cluster_{context}"] = Deleting a {hcp-title} cluster and the cluster-specific IAM resources -You can delete a {hcp-title} cluster by using the ROSA command line interface (CLI) (`rosa`) or {cluster-manager-first}. +You can delete a {hcp-title} cluster by using the ROSA command-line interface (CLI) (`rosa`) or {cluster-manager-first}. After deleting the cluster, you can clean up the cluster-specific Identity and Access Management (IAM) resources in your AWS account by using the ROSA CLI. The cluster-specific resources include the Operator roles and the OpenID Connect (OIDC) provider. diff --git a/modules/rosa-installing.adoc b/modules/rosa-installing.adoc index 662ec5fa30..04af4d9cb0 100644 --- a/modules/rosa-installing.adoc +++ b/modules/rosa-installing.adoc @@ -38,7 +38,7 @@ $ rosa .Example output [source,terminal] ---- -Command line tool for Red Hat OpenShift Service on AWS. +Command-line tool for Red Hat OpenShift Service on AWS. For further documentation visit https://access.redhat.com/documentation/en-us/red_hat_openshift_service_on_aws Usage: diff --git a/modules/rosa-sts-setting-up-environment.adoc b/modules/rosa-sts-setting-up-environment.adoc index 0d6b5fcf4b..7b090bed00 100644 --- a/modules/rosa-sts-setting-up-environment.adoc +++ b/modules/rosa-sts-setting-up-environment.adoc @@ -77,7 +77,7 @@ $ rosa .Example output [source,terminal] ---- -Command line tool for Red Hat OpenShift Service on AWS. +Command-line tool for Red Hat OpenShift Service on AWS. For further documentation visit https://access.redhat.com/documentation/en-us/red_hat_openshift_service_on_aws Usage: diff --git a/modules/running-insights-operator-gather-cli.adoc b/modules/running-insights-operator-gather-cli.adoc index ca683c6f71..2dd748497c 100644 --- a/modules/running-insights-operator-gather-cli.adoc +++ b/modules/running-insights-operator-gather-cli.adoc @@ -6,7 +6,7 @@ :_mod-docs-content-type: PROCEDURE [id="running-insights-operator-gather-openshift-cli_{context}"] = Running an Insights Operator gather operation using the OpenShift CLI -You can run an Insights Operator gather operation using the {product-title} command line interface. +You can run an Insights Operator gather operation using the {product-title} command-line interface. .Prerequisites @@ -42,4 +42,3 @@ spec: .Verification * Check that your new gather operation is prefixed with your chosen name under the list of pods in the `openshift-insights` project. Upon completion, the Insights Operator automatically uploads the data to Red Hat for processing. - diff --git a/modules/sre-cluster-access.adoc b/modules/sre-cluster-access.adoc index 7c822420bb..c05ad6f455 100644 --- a/modules/sre-cluster-access.adoc +++ b/modules/sre-cluster-access.adoc @@ -29,7 +29,7 @@ The information presented below is an overview of the process an SRE must perfor *** Accessing a PrivateLink cluster: Request is sent to the Red Hat Transit Gateway, which then connects to a Red Hat VPC per region. The VPC that receives the request will be dependent on the target private cluster's region. Within the VPC, there is a private subnet that contains the PrivateLink endpoint to the customer's PrivateLink cluster. ifdef::openshift-rosa[] -SREs access ROSA clusters through the web console or command line interface (CLI) tools. Authentication requires multi-factor authentication (MFA) with industry-standard requirements for password complexity and account lockouts. SREs must authenticate as individuals to ensure auditability. All authentication attempts are logged to a Security Information and Event Management (SIEM) system. +SREs access ROSA clusters through the web console or command-line interface (CLI) tools. Authentication requires multi-factor authentication (MFA) with industry-standard requirements for password complexity and account lockouts. SREs must authenticate as individuals to ensure auditability. All authentication attempts are logged to a Security Information and Event Management (SIEM) system. SREs access private clusters using an encrypted HTTP connection. Connections are permitted only from a secured Red Hat network using either an IP allowlist or a private cloud provider link. diff --git a/modules/telco-ran-sr-iov-operator.adoc b/modules/telco-ran-sr-iov-operator.adoc index a7ac9a2757..4426eb0a94 100644 --- a/modules/telco-ran-sr-iov-operator.adoc +++ b/modules/telco-ran-sr-iov-operator.adoc @@ -16,4 +16,4 @@ Both `netdevice` (kernel VFs) and `vfio` (DPDK) devices are supported. Engineering considerations:: * Customer variation on the configuration and number of `SriovNetwork` and `SriovNetworkNodePolicy` custom resources (CRs) is expected. -* IOMMU kernel command line settings are applied with a `MachineConfig` CR at install time. This ensures that the `SriovOperator` CR does not cause a reboot of the node when adding them. +* IOMMU kernel command-line settings are applied with a `MachineConfig` CR at install time. This ensures that the `SriovOperator` CR does not cause a reboot of the node when adding them. diff --git a/modules/using-aws-cli-create-iam-role-alb-controller.adoc b/modules/using-aws-cli-create-iam-role-alb-controller.adoc index d3895b7397..5401c13f45 100644 --- a/modules/using-aws-cli-create-iam-role-alb-controller.adoc +++ b/modules/using-aws-cli-create-iam-role-alb-controller.adoc @@ -6,11 +6,11 @@ [id="using-aws-cli-create-iam-role-alb-controller_{context}"] = Creating an AWS IAM role for the controller by using the AWS CLI -You can use the AWS command line interface to create an AWS IAM role for the AWS Load Balancer Controller. An AWS IAM role is used to interact with subnets and Virtual Private Clouds (VPCs). +You can use the AWS command-line interface to create an AWS IAM role for the AWS Load Balancer Controller. An AWS IAM role is used to interact with subnets and Virtual Private Clouds (VPCs). .Prerequisites -* You must have access to the AWS command line interface (`aws`). +* You must have access to the AWS command-line interface (`aws`). .Procedure diff --git a/modules/virt-connecting-vm-virtctl.adoc b/modules/virt-connecting-vm-virtctl.adoc index b2b8d460df..e69f0c4443 100644 --- a/modules/virt-connecting-vm-virtctl.adoc +++ b/modules/virt-connecting-vm-virtctl.adoc @@ -15,7 +15,7 @@ endif::[] [id="virt-connecting-vm-virtctl_{context}"] = Connecting to the {console} by using virtctl -You can use the `virtctl` command line tool to connect to the {console} of a running virtual machine. +You can use the `virtctl` command-line tool to connect to the {console} of a running virtual machine. ifdef::vnc-console[] [NOTE] diff --git a/modules/virt-creating-service-virtctl.adoc b/modules/virt-creating-service-virtctl.adoc index 56c71e9db4..7482105d92 100644 --- a/modules/virt-creating-service-virtctl.adoc +++ b/modules/virt-creating-service-virtctl.adoc @@ -6,11 +6,11 @@ [id="virt-creating-service-virtctl_{context}"] = Creating a service by using virtctl -You can create a service for a virtual machine (VM) by using the `virtctl` command line tool. +You can create a service for a virtual machine (VM) by using the `virtctl` command-line tool. .Prerequisites -* You installed the `virtctl` command line tool. +* You installed the `virtctl` command-line tool. * You configured the cluster network to support the service. * The environment where you installed `virtctl` has the cluster permissions required to access the VM. For example, you ran `oc login` or you set the `KUBECONFIG` environment variable. diff --git a/modules/virt-deleting-vms.adoc b/modules/virt-deleting-vms.adoc index f1ce99cf4f..fafb63449e 100644 --- a/modules/virt-deleting-vms.adoc +++ b/modules/virt-deleting-vms.adoc @@ -7,7 +7,7 @@ = Deleting a virtual machine by using the CLI -You can delete a virtual machine by using the `oc` command line interface (CLI). The `oc` client enables you to perform actions on multiple virtual machines. +You can delete a virtual machine by using the `oc` command-line interface (CLI). The `oc` client enables you to perform actions on multiple virtual machines. .Prerequisites diff --git a/modules/virt-uploading-image-virtctl.adoc b/modules/virt-uploading-image-virtctl.adoc index e331d05945..5b8d312979 100644 --- a/modules/virt-uploading-image-virtctl.adoc +++ b/modules/virt-uploading-image-virtctl.adoc @@ -6,7 +6,7 @@ [id="virt-uploading-image-virtctl_{context}"] = Creating a VM from an uploaded image by using the command line -You can upload an operating system image by using the `virtctl` command line tool. You can use an existing data volume or create a new data volume for the image. +You can upload an operating system image by using the `virtctl` command-line tool. You can use an existing data volume or create a new data volume for the image. .Prerequisites diff --git a/modules/virt-using-virtctl-ssh-command.adoc b/modules/virt-using-virtctl-ssh-command.adoc index 2b0c3cc172..efbafb8bdf 100644 --- a/modules/virt-using-virtctl-ssh-command.adoc +++ b/modules/virt-using-virtctl-ssh-command.adoc @@ -10,7 +10,7 @@ You can access a running virtual machine (VM) by using the `virtcl ssh` command. .Prerequisites -* You installed the `virtctl` command line tool. +* You installed the `virtctl` command-line tool. * You added a public SSH key to the VM. * You have an SSH client installed. * The environment where you installed the `virtctl` tool has the cluster permissions required to access the VM. For example, you ran `oc login` or you set the `KUBECONFIG` environment variable. diff --git a/modules/ztp-checking-du-cluster-config.adoc b/modules/ztp-checking-du-cluster-config.adoc index 2bc29683b9..6d7fd85ccb 100644 --- a/modules/ztp-checking-du-cluster-config.adoc +++ b/modules/ztp-checking-du-cluster-config.adoc @@ -554,7 +554,7 @@ $ oc get route -n openshift-monitoring grafana + Both queries should return `Error from server (NotFound)` messages. -. Check that there is a minimum of 4 CPUs allocated as `reserved` for each of the `PerformanceProfile`, `Tuned` performance-patch, workload partitioning, and kernel command line arguments by running the following command: +. Check that there is a minimum of 4 CPUs allocated as `reserved` for each of the `PerformanceProfile`, `Tuned` performance-patch, workload partitioning, and kernel command-line arguments by running the following command: + [source,terminal] ---- diff --git a/nodes/containers/nodes-containers-using.adoc b/nodes/containers/nodes-containers-using.adoc index 18eb3ab830..583f548aed 100644 --- a/nodes/containers/nodes-containers-using.adoc +++ b/nodes/containers/nodes-containers-using.adoc @@ -42,7 +42,7 @@ $(nproc) X 1/2 MiB [id="nodes-containers-runtimes"] == About the container engine and container runtime -A _container engine_ is a piece of software that processes user requests, including command line options and image pulls. The container engine uses a _container runtime_, also called a _lower-level container runtime_, to run and manage the components required to deploy and operate containers. You likely will not need to interact with the container engine or container runtime. +A _container engine_ is a piece of software that processes user requests, including command-line options and image pulls. The container engine uses a _container runtime_, also called a _lower-level container runtime_, to run and manage the components required to deploy and operate containers. You likely will not need to interact with the container engine or container runtime. [NOTE] ==== diff --git a/nodes/nodes/ecosystems/eco-about-remediation-fencing-maintenance.adoc b/nodes/nodes/ecosystems/eco-about-remediation-fencing-maintenance.adoc index 249c14e143..4258935241 100644 --- a/nodes/nodes/ecosystems/eco-about-remediation-fencing-maintenance.adoc +++ b/nodes/nodes/ecosystems/eco-about-remediation-fencing-maintenance.adoc @@ -36,4 +36,4 @@ Administrators face situations where they need to interrupt the cluster, for exa In advance of this maintenance, affected nodes should be cordoned and drained. When a node is cordoned, new workloads cannot be scheduled on that node. When a node is drained, to avoid or minimize downtime, workloads on the affected node are transferred to other nodes. -While this maintenance can be achieved using command line tools, the Node Maintenance Operator offers a declarative approach to achieve this by using a custom resource. When such a resource exists for a node, the operator cordons and drains the node until the resource is deleted. +While this maintenance can be achieved using command-line tools, the Node Maintenance Operator offers a declarative approach to achieve this by using a custom resource. When such a resource exists for a node, the operator cordons and drains the node until the resource is deleted. diff --git a/observability/monitoring/accessing-third-party-monitoring-apis.adoc b/observability/monitoring/accessing-third-party-monitoring-apis.adoc index a3cbb8028f..f1f9190f88 100644 --- a/observability/monitoring/accessing-third-party-monitoring-apis.adoc +++ b/observability/monitoring/accessing-third-party-monitoring-apis.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -In {product-title}, you can access web service APIs for some monitoring components from the command line interface (CLI). +In {product-title}, you can access web service APIs for some monitoring components from the command-line interface (CLI). [IMPORTANT] ==== diff --git a/observability/network_observability/netobserv_cli/netobserv-cli-reference.adoc b/observability/network_observability/netobserv_cli/netobserv-cli-reference.adoc index 937200e527..84a9b93b10 100644 --- a/observability/network_observability/netobserv_cli/netobserv-cli-reference.adoc +++ b/observability/network_observability/netobserv_cli/netobserv-cli-reference.adoc @@ -6,6 +6,6 @@ include::_attributes/common-attributes.adoc[] toc::[] -The Network Observability CLI (`oc netobserv`) has most features and filtering options that are available for the Network Observability Operator. You can pass command line arguments to enable features or filtering options. +The Network Observability CLI (`oc netobserv`) has most features and filtering options that are available for the Network Observability Operator. You can pass command-line arguments to enable features or filtering options. include::modules/network-observability-netobserv-cli-reference.adoc[leveloffset=+1] \ No newline at end of file diff --git a/rest_api/monitoring_apis/alertmanager-monitoring-coreos-com-v1.adoc b/rest_api/monitoring_apis/alertmanager-monitoring-coreos-com-v1.adoc index 666c5cf376..c9e7097fb8 100644 --- a/rest_api/monitoring_apis/alertmanager-monitoring-coreos-com-v1.adoc +++ b/rest_api/monitoring_apis/alertmanager-monitoring-coreos-com-v1.adoc @@ -10516,7 +10516,7 @@ Required:: Description:: + -- -Defines the web command line flags when starting Alertmanager. +Defines the web command-line flags when starting Alertmanager. -- Type:: diff --git a/rosa_architecture/rosa-understanding.adoc b/rosa_architecture/rosa-understanding.adoc index c159dfb2fa..bca9f8f36c 100644 --- a/rosa_architecture/rosa-understanding.adoc +++ b/rosa_architecture/rosa-understanding.adoc @@ -5,7 +5,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[] :context: rosa-understanding toc::[] -Learn about {product-title} (ROSA), interacting with ROSA by using {cluster-manager-first} and command line interface (CLI) tools, consumption experience, and integration with Amazon Web Services (AWS) services. +Learn about {product-title} (ROSA), interacting with ROSA by using {cluster-manager-first} and command-line interface (CLI) tools, consumption experience, and integration with Amazon Web Services (AWS) services. [id="rosa-understanding-about_{context}"] == About ROSA diff --git a/rosa_hcp/rosa-hcp-deleting-cluster.adoc b/rosa_hcp/rosa-hcp-deleting-cluster.adoc index 80ea903d27..5f3518d0f3 100644 --- a/rosa_hcp/rosa-hcp-deleting-cluster.adoc +++ b/rosa_hcp/rosa-hcp-deleting-cluster.adoc @@ -6,7 +6,7 @@ include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] -If you want to delete a {hcp-title-first} cluster, you can use either the {cluster-manager-first} or the ROSA command line interface (CLI) (`rosa`). After deleting your cluster, you can also delete the AWS Identity and Access Management (IAM) resources that are used by the cluster. +If you want to delete a {hcp-title-first} cluster, you can use either the {cluster-manager-first} or the ROSA command-line interface (CLI) (`rosa`). After deleting your cluster, you can also delete the AWS Identity and Access Management (IAM) resources that are used by the cluster. include::modules/rosa-hcp-deleting-cluster.adoc[leveloffset=+1] diff --git a/security/cert_manager_operator/cert-manager-operator-release-notes.adoc b/security/cert_manager_operator/cert-manager-operator-release-notes.adoc index edf9f139e5..6c9d2f7463 100644 --- a/security/cert_manager_operator/cert-manager-operator-release-notes.adoc +++ b/security/cert_manager_operator/cert-manager-operator-release-notes.adoc @@ -211,7 +211,7 @@ For more information, see xref:../../security/cert_manager_operator/cert-manager * With this release, the scope of the {cert-manager-operator}, which was previously limited to the {product-title} on AMD64 architecture, has now been expanded to include support for managing certificates on {product-title} running on {ibm-z-name} (`s390x`), {ibm-power-name} (`ppc64le`) and ARM64 architectures. -* With this release, you can use DNS over HTTPS (DoH) for performing the self-checks during the ACME DNS-01 challenge verification. The DNS self-check method can be controlled by using the command line flags, `--dns01-recursive-nameservers-only` and `--dns01-recursive-nameservers`. +* With this release, you can use DNS over HTTPS (DoH) for performing the self-checks during the ACME DNS-01 challenge verification. The DNS self-check method can be controlled by using the command-line flags, `--dns01-recursive-nameservers-only` and `--dns01-recursive-nameservers`. For more information, see xref:../../security/cert_manager_operator/cert-manager-customizing-api-fields.html#cert-manager-override-arguments_cert-manager-customizing-api-fields[Customizing cert-manager by overriding arguments from the cert-manager Operator API]. [id="cert-manager-operator-1-13-0-CVEs"] diff --git a/serverless/functions/serverless-functions-yaml.adoc b/serverless/functions/serverless-functions-yaml.adoc index d69fd0ebf8..78be8e8a2c 100644 --- a/serverless/functions/serverless-functions-yaml.adoc +++ b/serverless/functions/serverless-functions-yaml.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -The `func.yaml` file contains the configuration for your function project. Values specified in `func.yaml` are used when you execute a `kn func` command. For example, when you run the `kn func build` command, the value in the `build` field is used. In some cases, you can override these values with command line flags or environment variables. +The `func.yaml` file contains the configuration for your function project. Values specified in `func.yaml` are used when you execute a `kn func` command. For example, when you run the `kn func build` command, the value in the `build` field is used. In some cases, you can override these values with command-line flags or environment variables. include::modules/serverless-functions-func-yaml-fields.adoc[leveloffset=+1] include::modules/serverless-functions-func-yaml-environment-variables.adoc[leveloffset=+1] diff --git a/service_mesh/v1x/preparing-ossm-installation.adoc b/service_mesh/v1x/preparing-ossm-installation.adoc index 56cd0ddf93..e446c35353 100644 --- a/service_mesh/v1x/preparing-ossm-installation.adoc +++ b/service_mesh/v1x/preparing-ossm-installation.adoc @@ -29,7 +29,7 @@ If you are installing {SMProductName} on a xref:../../installing/overview/instal + endif::[] -* Install the version of the {product-title} command line utility (the `oc` client tool) that matches your {product-title} version and add it to your path. +* Install the version of the {product-title} command-line utility (the `oc` client tool) that matches your {product-title} version and add it to your path. ifdef::openshift-enterprise[] ** If you are using {product-title} {product-version}, see xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-about-cli_cli-developer-commands[About the OpenShift CLI]. diff --git a/service_mesh/v2x/preparing-ossm-installation.adoc b/service_mesh/v2x/preparing-ossm-installation.adoc index dc0a67c1b9..0f4df720e2 100644 --- a/service_mesh/v2x/preparing-ossm-installation.adoc +++ b/service_mesh/v2x/preparing-ossm-installation.adoc @@ -23,7 +23,7 @@ ifdef::openshift-enterprise[] ** xref:../../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[Install {product-title} {product-version} on {ibm-power-name}] endif::[] -* Install the version of the {product-title} command line utility (the `oc` client tool) that matches your {product-title} version and add it to your path. +* Install the version of the {product-title} command-line utility (the `oc` client tool) that matches your {product-title} version and add it to your path. ifdef::openshift-enterprise[] ** If you are using {product-title} {product-version}, see xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-about-cli_cli-developer-commands[About the OpenShift CLI]. endif::[] diff --git a/virt/getting_started/virt-getting-started.adoc b/virt/getting_started/virt-getting-started.adoc index f9565a53a4..2883a3c08e 100644 --- a/virt/getting_started/virt-getting-started.adoc +++ b/virt/getting_started/virt-getting-started.adoc @@ -24,7 +24,7 @@ ifndef::openshift-rosa,openshift-dedicated[] endif::openshift-rosa,openshift-dedicated[] * xref:../../virt/install/preparing-cluster-for-virt.adoc#preparing-cluster-for-virt[Prepare your cluster for {VirtProductName}]. * xref:../../virt/install/installing-virt.adoc#virt-installing-virt-operator_installing-virt[Install the {VirtProductName} Operator]. -* xref:../../virt/getting_started/virt-using-the-cli-tools.adoc#installing-virtctl_virt-using-the-cli-tools[Install the `virtctl` command line interface (CLI) tool]. +* xref:../../virt/getting_started/virt-using-the-cli-tools.adoc#installing-virtctl_virt-using-the-cli-tools[Install the `virtctl` command-line interface (CLI) tool]. [discrete] diff --git a/virt/getting_started/virt-using-the-cli-tools.adoc b/virt/getting_started/virt-using-the-cli-tools.adoc index 73c1184471..3f3d29d226 100644 --- a/virt/getting_started/virt-using-the-cli-tools.adoc +++ b/virt/getting_started/virt-using-the-cli-tools.adoc @@ -7,9 +7,9 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can manage {VirtProductName} resources by using the `virtctl` command line tool. +You can manage {VirtProductName} resources by using the `virtctl` command-line tool. -You can access and modify virtual machine (VM) disk images by using the link:https://libguestfs.org[`libguestfs`] command line tool. You deploy `libguestfs` by using the `virtctl libguestfs` command. +You can access and modify virtual machine (VM) disk images by using the link:https://libguestfs.org[`libguestfs`] command-line tool. You deploy `libguestfs` by using the `virtctl libguestfs` command. [id="installing-virtctl_virt-using-the-cli-tools"] == Installing virtctl diff --git a/virt/install/uninstalling-virt.adoc b/virt/install/uninstalling-virt.adoc index c54667a584..eb247304ba 100644 --- a/virt/install/uninstalling-virt.adoc +++ b/virt/install/uninstalling-virt.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You uninstall {VirtProductName} by using the web console or the command line interface (CLI) to delete the {VirtProductName} workloads, the Operator, and its resources. +You uninstall {VirtProductName} by using the web console or the command-line interface (CLI) to delete the {VirtProductName} workloads, the Operator, and its resources. [id='uninstalling-virt-web-console_{context}'] == Uninstalling {VirtProductName} by using the web console diff --git a/virt/virtual_machines/virt-accessing-vm-consoles.adoc b/virt/virtual_machines/virt-accessing-vm-consoles.adoc index 373cf1b828..25dbc6b6d1 100644 --- a/virt/virtual_machines/virt-accessing-vm-consoles.adoc +++ b/virt/virtual_machines/virt-accessing-vm-consoles.adoc @@ -16,7 +16,7 @@ You can connect to the following consoles to access running virtual machines (VM [id="vnc-console_virt-accessing-vm-consoles"] == Connecting to the VNC console -You can connect to the VNC console of a virtual machine by using the {product-title} web console or the `virtctl` command line tool. +You can connect to the VNC console of a virtual machine by using the {product-title} web console or the `virtctl` command-line tool. :context: vnc-console include::modules/virt-connecting-to-vm-console-web.adoc[leveloffset=+2] @@ -32,7 +32,7 @@ include::modules/virt-temporary-token-VNC.adoc[leveloffset=+2] [id="serial-console_virt-accessing-vm-consoles"] == Connecting to the serial console -You can connect to the serial console of a virtual machine by using the {product-title} web console or the `virtctl` command line tool. +You can connect to the serial console of a virtual machine by using the {product-title} web console or the `virtctl` command-line tool. [NOTE] ==== diff --git a/virt/virtual_machines/virt-accessing-vm-ssh.adoc b/virt/virtual_machines/virt-accessing-vm-ssh.adoc index fb37a22729..cf573dab5b 100644 --- a/virt/virtual_machines/virt-accessing-vm-ssh.adoc +++ b/virt/virtual_machines/virt-accessing-vm-ssh.adoc @@ -109,7 +109,7 @@ include::modules/virt-about-services.adoc[leveloffset=+2] [id="creating-services-ssh_virt-accessing-vm-ssh"] === Creating a service -You can create a service to expose a virtual machine (VM) by using the {product-title} web console, `virtctl` command line tool, or a YAML file. +You can create a service to expose a virtual machine (VM) by using the {product-title} web console, `virtctl` command-line tool, or a YAML file. include::modules/virt-enabling-load-balancer-service-web.adoc[leveloffset=+3] diff --git a/virt/virtual_machines/virt-delete-vms.adoc b/virt/virtual_machines/virt-delete-vms.adoc index f8ccd34245..9af40d4a62 100644 --- a/virt/virtual_machines/virt-delete-vms.adoc +++ b/virt/virtual_machines/virt-delete-vms.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can delete a virtual machine from the web console or by using the `oc` command line interface. +You can delete a virtual machine from the web console or by using the `oc` command-line interface. include::modules/virt-delete-vm-web.adoc[leveloffset=+1] include::modules/virt-deleting-vms.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/virt-exporting-vms.adoc b/virt/virtual_machines/virt-exporting-vms.adoc index 092cfac8a6..e6439af396 100644 --- a/virt/virtual_machines/virt-exporting-vms.adoc +++ b/virt/virtual_machines/virt-exporting-vms.adoc @@ -8,7 +8,7 @@ toc::[] You can export a virtual machine (VM) and its associated disks in order to import a VM into another cluster or to analyze the volume for forensic purposes. -You create a `VirtualMachineExport` custom resource (CR) by using the command line interface. +You create a `VirtualMachineExport` custom resource (CR) by using the command-line interface. Alternatively, you can use the xref:../../virt/getting_started/virt-using-the-cli-tools.adoc#vm-volume-export-commands_virt-using-the-cli-tools[`virtctl vmexport` command] to create a `VirtualMachineExport` CR and to download exported volumes. diff --git a/web_console/customizing-the-web-console.adoc b/web_console/customizing-the-web-console.adoc index cfd12ef574..9fa9010734 100644 --- a/web_console/customizing-the-web-console.adoc +++ b/web_console/customizing-the-web-console.adoc @@ -7,7 +7,7 @@ include::_attributes/common-attributes.adoc[] toc::[] You can customize the {product-title} web console to set a custom logo, -product name, links, notifications, and command line downloads. This is +product name, links, notifications, and command-line downloads. This is especially helpful if you need to tailor the web console to meet specific corporate or government requirements. diff --git a/web_console/web_terminal/installing-web-terminal.adoc b/web_console/web_terminal/installing-web-terminal.adoc index 23bc4bec4b..f8a954095a 100644 --- a/web_console/web_terminal/installing-web-terminal.adoc +++ b/web_console/web_terminal/installing-web-terminal.adoc @@ -37,4 +37,4 @@ You can install the web terminal by using the {web-terminal-op} listed in the {p The {web-terminal-op} installs the DevWorkspace Operator as a dependency. ==== -. After the Operator is installed, refresh your page to see the command line terminal icon (image:odc-wto-icon.png[title="web terminal icon"]) in the masthead of the console. \ No newline at end of file +. After the Operator is installed, refresh your page to see the command-line terminal icon (image:odc-wto-icon.png[title="web terminal icon"]) in the masthead of the console. \ No newline at end of file diff --git a/web_console/web_terminal/odc-using-web-terminal.adoc b/web_console/web_terminal/odc-using-web-terminal.adoc index 8691638575..e4d4c9ec9d 100644 --- a/web_console/web_terminal/odc-using-web-terminal.adoc +++ b/web_console/web_terminal/odc-using-web-terminal.adoc @@ -6,6 +6,6 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can launch an embedded command line terminal instance in the web console. This terminal instance is preinstalled with common CLI tools for interacting with the cluster, such as `oc`, `kubectl`,`odo`, `kn`, `tkn`, `helm`, and `subctl`. It also has the context of the project you are working on and automatically logs you in using your credentials. +You can launch an embedded command-line terminal instance in the web console. This terminal instance is preinstalled with common CLI tools for interacting with the cluster, such as `oc`, `kubectl`,`odo`, `kn`, `tkn`, `helm`, and `subctl`. It also has the context of the project you are working on and automatically logs you in using your credentials. include::modules/odc-access-web-terminal.adoc[leveloffset=+1] \ No newline at end of file diff --git a/welcome/cloud-experts-rosa-hcp-sts-explained.adoc b/welcome/cloud-experts-rosa-hcp-sts-explained.adoc index 4f8bb5f4e6..17905688df 100644 --- a/welcome/cloud-experts-rosa-hcp-sts-explained.adoc +++ b/welcome/cloud-experts-rosa-hcp-sts-explained.adoc @@ -20,7 +20,7 @@ As part of {hcp-title}, Red{nbsp}Hat must be granted the necessary permissions t The STS method uses predefined roles and policies to grant temporary, least-privilege permissions to IAM roles. The credentials typically expire an hour after being requested. Once expired, they are no longer recognized by AWS and no longer have account access from API requests made with them. For more information, see the link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS documentation]. -AWS IAM STS roles must be created for each {hcp-title} cluster. The ROSA command line interface (CLI) (`rosa`) manages the STS roles and helps you attach the ROSA-specific, AWS-managed policies to each role. The CLI provides the commands and files to create the roles, attach the AWS-managed policies, and an option to allow the CLI to automatically create the roles and attach the policies. +AWS IAM STS roles must be created for each {hcp-title} cluster. The ROSA command-line interface (CLI) (`rosa`) manages the STS roles and helps you attach the ROSA-specific, AWS-managed policies to each role. The CLI provides the commands and files to create the roles, attach the AWS-managed policies, and an option to allow the CLI to automatically create the roles and attach the policies. //See [insert new xref when we have one for HCP] for more information about the different `--mode` options. [id="hcp-sts-security"] diff --git a/welcome/oke_about.adoc b/welcome/oke_about.adoc index 5b98afbf23..6a8b8b3458 100644 --- a/welcome/oke_about.adoc +++ b/welcome/oke_about.adoc @@ -158,7 +158,7 @@ out of the box Kubernetes Ingress Controller. === Core user experience {oke} users have full access to Kubernetes Operators, pod deployment strategies, Helm, and {product-title} templates. {oke} users can use both the `oc` and -`kubectl` command line interfaces. {oke} also offers an administrator web-based +`kubectl` command-line interfaces. {oke} also offers an administrator web-based console that shows all aspects of the deployed container services and offers a container-as-a service experience. {oke} grants access to the Operator Life Cycle Manager that helps you control access to content on the cluster and life