diff --git a/_topic_maps/_topic_map_ms.yml b/_topic_maps/_topic_map_ms.yml index 0a66bac032..a69c86fe7d 100644 --- a/_topic_maps/_topic_map_ms.yml +++ b/_topic_maps/_topic_map_ms.yml @@ -88,8 +88,8 @@ Name: CLI tools Dir: microshift_cli_ref Distros: microshift Topics: -- Name: CLI tools overview - File: microshift-cli-overview +- Name: CLI tools introduction + File: microshift-cli-tools-introduction - Name: Installing the OpenShift CLI File: microshift-oc-cli-install - Name: Configuring the OpenShift CLI diff --git a/microshift_cli_ref/microshift-cli-overview.adoc b/microshift_cli_ref/microshift-cli-overview.adoc deleted file mode 100644 index a4520a692e..0000000000 --- a/microshift_cli_ref/microshift-cli-overview.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_content-type: ASSEMBLY -[id="microshift-cli-overview"] -= {product-title} CLI tools -include::_attributes/attributes-microshift.adoc[] -:context: cli-tools-overview - -toc::[] - -A user builds, deploys, and manages boths applications and clusters while working with {product-title}. - -{product-title} can use different command-line interface (CLI) tools that simplify these tasks by enabling users to perform various administration and development operations from the terminal. -These tools expose simple commands to manage the deployments, as well as interact with each component of the system. - -In addition to built-in `microshift` command types and Linux CLI tools, the optional OpenShift CLI (`oc`) tool with an enabled subset of commands is available for you to use if you are already familiar with {OCP} and Kubernetes. -//more info on these tools is expected in the future, hence this overview assembly - -[role="_additional-resources"] -[id="additional-resources_cli-tools-overview"] -.Additional resources - -* xref:..//microshift_cli_ref/microshift-oc-cli-install.adoc#microshift-oc-cli-install[Installing the OpenShift CLI tool for MicroShift]. - -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html/cli_tools/openshift-cli-oc[OpenShift CLI (oc)]: A full description of `oc` as provided by the {OCP} documentation. Commands focused on multi-node deployments, projects, and developer tooling are not supported by {product-title}. - -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9[Red Hat Enterprise Linux (RHEL)]: The RHEL documentation for your specific use case. diff --git a/microshift_cli_ref/microshift-cli-tools-introduction.adoc b/microshift_cli_ref/microshift-cli-tools-introduction.adoc new file mode 100644 index 0000000000..d4414fe9d9 --- /dev/null +++ b/microshift_cli_ref/microshift-cli-tools-introduction.adoc @@ -0,0 +1,31 @@ +:_content-type: ASSEMBLY +[id="microshift-cli-tools"] += {product-title} CLI tools introduction +include::_attributes/attributes-microshift.adoc[] +:context: microshift-cli-tools-introduction + +toc::[] + +You can use different command-line interface (CLI) tools to build, deploy, and manage {product-title} clusters and workloads. With CLI tools, you can complete various administration and development operations from the terminal to manage deployments and interact with each component of the system. + +CLI tools available for use with {product-title} are the following: + +* Built-in `microshift` command types +* Linux CLI tools +* Kubernetes CLI (`kubectl`) +* The OpenShift CLI (`oc`) tool with an enabled subset of commands + +[NOTE] +==== +Commands for multi-node deployments, projects, and developer tooling are not supported by {product-title}. +==== + +[role="_additional-resources"] +[id="additional-resources_microshift-cli-tools"] +.Additional resources + +* xref:..//microshift_cli_ref/microshift-oc-cli-install.adoc#microshift-oc-cli-install[Installing the OpenShift CLI tool for MicroShift]. + +* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html/cli_tools/openshift-cli-oc[Detailed description of the OpenShift CLI (oc)]. + +* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9[Red Hat Enterprise Linux (RHEL) documentation for specific use cases]. \ No newline at end of file diff --git a/microshift_cli_ref/microshift-cli-using-oc.adoc b/microshift_cli_ref/microshift-cli-using-oc.adoc index bf916443a1..68b8a08ae5 100644 --- a/microshift_cli_ref/microshift-cli-using-oc.adoc +++ b/microshift_cli_ref/microshift-cli-using-oc.adoc @@ -1,12 +1,12 @@ :_content-type: ASSEMBLY [id="microshift-cli-using-oc"] -= Using the `oc` tool += Using the oc tool include::_attributes/attributes-microshift.adoc[] :context: microshift-using-oc toc::[] -The optional OpenShift CLI (`oc`) tool is available for you to use if you are already familiar with {OCP} and Kubernetes. +The optional OpenShift CLI (`oc`) tool provides a subset of `oc` commands for {product-title} deployments. Using `oc` is convenient if you are familiar with {OCP} and Kubernetes. include::modules/microshift-cli-oc-about.adoc[leveloffset=+1] diff --git a/microshift_cli_ref/microshift-oc-cli-commands-list.adoc b/microshift_cli_ref/microshift-oc-cli-commands-list.adoc index ed44c30a50..65b735b52e 100644 --- a/microshift_cli_ref/microshift-oc-cli-commands-list.adoc +++ b/microshift_cli_ref/microshift-oc-cli-commands-list.adoc @@ -2,21 +2,36 @@ [id="microshift-oc-cli-commands"] = OpenShift CLI command reference include::_attributes/attributes-microshift.adoc[] -:context: cli-administrator-commands +:context: microshift-oc-cli-commands toc::[] -This reference provides descriptions and example commands for OpenShift CLI (`oc`) commands. You must have `cluster-admin` or equivalent permissions to use these commands. +Descriptions and example commands for OpenShift CLI (`oc`) commands are included in this reference document. You must have `cluster-admin` or equivalent permissions to use these commands. To list administrator commands and information about them, use the following commands: -Run `oc adm -h` to list all administrator commands or run `oc --help` to get additional details for a specific command. +* Enter the `oc adm -h` command to list all administrator commands: ++ +.Command syntax ++ +[source,terminal] +---- +$ oc adm -h +---- + +* Enter the `oc --help` command to get additional details for a specific command: ++ +.Command syntax ++ +[source,terminal] +---- +$ oc --help +---- [IMPORTANT] ==== Using `oc --help` lists details for any `oc` command. Not all `oc` commands apply to using {product-title}. ==== -// The OCP file is auto-generated from the openshift/oc repository; MicroShift is made manually -// OpenShift CLI (oc) administrator commands -//include::modules/microshift-oc-by-example-content.adoc[leveloffset=+1] -include::modules/microshift-oc-by-example-content-gen.adoc[leveloffset=+1] +// The OCP files are auto-generated from the openshift/oc repository; use the MicroShift-specific flags to generate MicroShift command files from the same repo +include::modules/microshift-oc-by-example-content.adoc[leveloffset=+1] + include::modules/microshift-oc-adm-by-example-content.adoc[leveloffset=+1] diff --git a/microshift_cli_ref/microshift-usage-oc-kubectl.adoc b/microshift_cli_ref/microshift-usage-oc-kubectl.adoc index d7e380a370..a1016ff3ab 100644 --- a/microshift_cli_ref/microshift-usage-oc-kubectl.adoc +++ b/microshift_cli_ref/microshift-usage-oc-kubectl.adoc @@ -6,12 +6,21 @@ include::_attributes/attributes-microshift.adoc[] toc::[] -The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title} is a certified Kubernetes distribution, you can use the supported `kubectl` binaries that ship with {product-title}, or you can gain extended functionality by using the `oc` binary. +The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title} is a certified Kubernetes distribution, you can use the supported `kubectl` CLI tool that ships with {product-title}, or you can gain extended functionality by using the `oc` CLI tool. + +[id="microshift-kubectl-binary_{context}"] +== The kubectl CLI tool + +You can use the `kubectl` CLI tool to interact with Kubernetes primitives on your {product-title} cluster. You can also use existing `kubectl` workflows and scripts for new {product-title} users coming from another Kubernetes environment, or for those who prefer to use the `kubectl` CLI. + +The `kubectl` CLI tool is included in the archive if you download the `oc` CLI tool. + +For more information, read the link:https://kubernetes.io/docs/reference/kubectl/overview/[Kubernetes CLI tool documentation]. [id="microshift-oc-binary_{context}"] -== The oc binary +== The oc CLI tool -The `oc` binary offers the same capabilities as the `kubectl` binary, but it extends to natively support additional {product-title} features, including: +The `oc` CLI tool offers the same capabilities as the `kubectl` CLI tool, but it extends to natively support additional {product-title} features, including: * **Route resource** + @@ -23,7 +32,7 @@ The additional command `oc new-app`, for example, makes it easier to get new app [IMPORTANT] ==== -If you installed an earlier version of the `oc` binary, you cannot use it to complete all of the commands in {product-title} {product-version}. If you want the latest features, you must download and install the latest version of the `oc` binary corresponding to your {product-title} server version. +If you installed an earlier version of the `oc` CLI tool, you cannot use it to complete all of the commands in {product-title} {ocp-version}. If you want the latest features, you must download and install the latest version of the `oc` CLI tool corresponding to your {product-title} version. ==== Non-security API changes will involve, at minimum, two minor releases (4.1 to 4.2 to 4.3, for example) to allow older `oc` binaries to update. Using new capabilities might require newer `oc` binaries. A 4.3 server might have additional capabilities that a 4.2 `oc` binary cannot use and a 4.3 `oc` binary might have additional capabilities that are unsupported by a 4.2 server. @@ -51,12 +60,3 @@ image:redcircle-1.png[] Fully compatible. image:redcircle-2.png[] `oc` client might not be able to access server features. image:redcircle-3.png[] `oc` client might provide options and features that might not be compatible with the accessed server. - -[id="microshift-kubectl-binary_{context}"] -== The kubectl binary - -The `kubectl` binary is provided as a means to support existing workflows and scripts for new {product-title} users coming from a standard Kubernetes environment, or for those who prefer to use the `kubectl` CLI. Existing users of `kubectl` can continue to use the binary to interact with Kubernetes primitives, with no changes required to the {product-title} cluster. - -The `kubectl` binary is included in the archive if you download the `oc` binary. - -For more information, see the link:https://kubernetes.io/docs/reference/kubectl/overview/[kubectl documentation]. diff --git a/microshift_networking/microshift-networking.adoc b/microshift_networking/microshift-networking.adoc index d60f94ce69..29d3bc1735 100644 --- a/microshift_networking/microshift-networking.adoc +++ b/microshift_networking/microshift-networking.adoc @@ -22,12 +22,23 @@ To troubleshoot connection problems with the NodePort service, read about the kn ==== include::modules/microshift-cni.adoc[leveloffset=+1] + include::modules/microshift-configuring-ovn.adoc[leveloffset=+1] + include::modules/microshift-restart-ovnkube-master.adoc[leveloffset=+1] + //include::modules/microshift-man-config-ovs-bridge.adoc[leveloffset=+1] + include::modules/microshift-http-proxy.adoc[leveloffset=+1] + +include::modules/microshift-rpm-ostree-https.adoc[leveloffset=+1] + include::modules/microshift-cri-o-container-runtime.adoc[leveloffset=+1] + include::modules/microshift-ovs-snapshot.adoc[leveloffset=+1] -include::modules/microshift-deploying-a-load-balancer.adoc[leveloffset=+1] -include::modules/microshift-blocking-nodeport-access.adoc[leveloffset=+1] + +include::modules/microshift-deploying-a-load-balancer.adoc[leveloffset=+1] + +include::modules/microshift-blocking-nodeport-access.adoc[leveloffset=+1] + include::modules/microshift-mDNS.adoc[leveloffset=+1] diff --git a/microshift_support/microshift-etcd.adoc b/microshift_support/microshift-etcd.adoc index ba87cf1e9b..f8edbb6b69 100644 --- a/microshift_support/microshift-etcd.adoc +++ b/microshift_support/microshift-etcd.adoc @@ -13,4 +13,4 @@ toc::[] include::snippets/microshift-tech-preview-snip.adoc[leveloffset=+1] include::modules/microshift-observe-debug-etcd-server.adoc[leveloffset=+1] -include::modules/microshift-configuration.adoc[leveloffset=+1] \ No newline at end of file +include::modules/microshift-config-etcd.adoc[leveloffset=+1] \ No newline at end of file diff --git a/modules/microshift-accessing-cluster-locally.adoc b/modules/microshift-accessing-cluster-locally.adoc index a94458923a..c9126296e9 100644 --- a/modules/microshift-accessing-cluster-locally.adoc +++ b/modules/microshift-accessing-cluster-locally.adoc @@ -37,7 +37,9 @@ $ sudo cat /var/lib/microshift/resources/kubeadmin/kubeconfig > ~/.kube/config $ chmod go-r ~/.kube/config ---- -. Verify that {product-title} is running by entering the following command: +.Verification + +* Verify that {product-title} is running by entering the following command: + [source,terminal] ---- diff --git a/modules/microshift-accessing-cluster-open-firewall.adoc b/modules/microshift-accessing-cluster-open-firewall.adoc index 87a159cbb1..41e102916b 100644 --- a/modules/microshift-accessing-cluster-open-firewall.adoc +++ b/modules/microshift-accessing-cluster-open-firewall.adoc @@ -8,26 +8,28 @@ [id="microshift-accessing-cluster-open-firewall_{context}"] = Opening the firewall for remote access to the {product-title} cluster -Use the following procedure to open the firewall so that a remote user can access the {product-title} cluster. Your account must have cluster admin privileges. This procedure must be completed before a workstation user can access the cluster remotely. +Use the following procedure to open the firewall so that a remote user can access the {product-title} cluster. This procedure must be completed before a workstation user can access the cluster remotely. -* `user@microshift`, is the user on the {product-title} host machine and is responsible for setting up that machine so that it can be accessed by a remote user on a separate workstation. +For this procedure, `user@microshift` is the user on the {product-title} host machine and is responsible for setting up that machine so that it can be accessed by a remote user on a separate workstation. .Prerequisites * You have installed the `oc` binary. +* Your account has cluster administration privileges. + .Procedure -. As `user@microshift` on the {product-title} host, open the firewall port for the Kubernetes API server (`6443/tcp`) by running the following command: +* As `user@microshift` on the {product-title} host, open the firewall port for the Kubernetes API server (`6443/tcp`) by running the following command: + [source,terminal] ---- [user@microshift]$ sudo firewall-cmd --permanent --zone=public --add-port=6443/tcp && sudo firewall-cmd --reload ---- -.Verify {product-title} is running +.Verification -. As `user@microshift`, verify that {product-title} is running by entering the following command: +* As `user@microshift`, verify that {product-title} is running by entering the following command: + [source,terminal] ---- diff --git a/modules/microshift-accessing-cluster-remotely.adoc b/modules/microshift-accessing-cluster-remotely.adoc index 8eab627c5f..7c6fcf68cf 100644 --- a/modules/microshift-accessing-cluster-remotely.adoc +++ b/modules/microshift-accessing-cluster-remotely.adoc @@ -10,7 +10,7 @@ Use the following procedure to access the {product-title} cluster from a remote workstation by using a `kubeconfig` file. -* The `user@workstation` login is used to access the host machine remotely. The `` value in the procedure is the name of the user that `user@workstation` logs in with to the {product-title} host. +The `user@workstation` login is used to access the host machine remotely. The `` value in the procedure is the name of the user that `user@workstation` logs in with to the {product-title} host. .Prerequisites @@ -48,7 +48,9 @@ Use the following procedure to access the {product-title} cluster from a remote $ chmod go-r ~/.kube/config ---- -. As `user@workstation`, verify that {product-title} is running by entering the following command: +.Verification + +* As `user@workstation`, verify that {product-title} is running by entering the following command: + [source,terminal] ---- diff --git a/modules/microshift-accessing.adoc b/modules/microshift-accessing.adoc index cfc889bd89..1846875894 100644 --- a/modules/microshift-accessing.adoc +++ b/modules/microshift-accessing.adoc @@ -7,4 +7,4 @@ [id="accessing-microshift-cluster_{context}"] = How to access the {product-title} cluster -Use the procedures in this section to access the {product-title} cluster, either from the same machine running the {product-title} service or remotely from a workstation. You can use this access to observe and administrate workloads. When using these steps, choose the `kubeconfig` file that contains the host name or IP address you want to connect with and place it in the relevant directory. The {OCP} CLI tool (`oc`) is employed for cluster activities. +Use the procedures in this section to access the {product-title} cluster, either from the same machine running the {product-title} service or remotely from a workstation. You can use this access to observe and administrate workloads. When using these steps, choose the `kubeconfig` file that contains the host name or IP address you want to connect with and place it in the relevant directory. As listed in each procedure, you use the {OCP} CLI tool (`oc`) for cluster activities. diff --git a/modules/microshift-adding-repos-to-image-builder.adoc b/modules/microshift-adding-repos-to-image-builder.adoc index c40d7f0264..2799c9a1b5 100644 --- a/modules/microshift-adding-repos-to-image-builder.adoc +++ b/modules/microshift-adding-repos-to-image-builder.adoc @@ -15,7 +15,7 @@ Use the following procedure to add the {product-title} repositories to Image Bui .Procedure -. Enable the {product-title} RPM repositories on the build host by running the following command: +* Enable the {product-title} RPM repositories on the build host by running the following command: + [source,terminal,subs="attributes+"] ---- diff --git a/modules/microshift-blocking-nodeport-access.adoc b/modules/microshift-blocking-nodeport-access.adoc index dc824e9a1e..09ad9db37f 100644 --- a/modules/microshift-blocking-nodeport-access.adoc +++ b/modules/microshift-blocking-nodeport-access.adoc @@ -6,27 +6,27 @@ [id="microshift-blocking-nodeport-access_{context}"] = Blocking external access to the NodePort service on a specific host interface -OVN-Kubernetes does not restrict the host interface where a NodePort service can be accessed from outside a {product-title} node. -The following procedure explains how to block the NodePort service on a specific host interface and restrict external access. +OVN-Kubernetes does not restrict the host interface where a NodePort service can be accessed from outside a {product-title} node. The following procedure explains how to block the NodePort service on a specific host interface and restrict external access. .Prerequisites -* You need access to the cluster as a user with the cluster-admin role. +* You must have an account with root privileges. .Procedure -. Change the `NODEPORT` variable to the host port number assigned to your Kubernetes NodePort service by running the following command: + +. Change the `NODEPORT` variable to the host port number assigned to your Kubernetes NodePort service by running the following command: + [source,terminal] ---- -$ export NODEPORT=30700 +# export NODEPORT=30700 ---- -. Change the `INTERFACE_IP` value to the IP address from the host interface that you want to block. For example: +. Change the `INTERFACE_IP` value to the IP address from the host interface that you want to block. For example: + [source,terminal] ---- -$ export INTERFACE_IP=192.168.150.33 +# export INTERFACE_IP=192.168.150.33 ---- -. Insert a new rule in the `nat` table PREROUTING chain to drop all packets that match the destination port and ip. +. Insert a new rule in the `nat` table PREROUTING chain to drop all packets that match the destination port and IP address. For example: + [source,terminal] ---- @@ -50,9 +50,9 @@ table ip nat { + [NOTE] ==== -Note your `handle` number of the newly added rule. You need to remove the `handle` number in the following step +Note the `handle` number of the newly added rule. You need to remove the `handle` number in the following step. ==== -. Remove the custom rule with the following sample command: +. Remove the custom rule with the following sample command: + [source,terminal] ---- diff --git a/modules/microshift-cli-oc-about.adoc b/modules/microshift-cli-oc-about.adoc index 694760e9ff..192c97f6de 100644 --- a/modules/microshift-cli-oc-about.adoc +++ b/modules/microshift-cli-oc-about.adoc @@ -6,7 +6,7 @@ [id="microshift-cli-oc-about_{context}"] = About the OpenShift CLI -With the OpenShift command-line interface (CLI), the `oc` command, you can deploy and manage {product-title} projects from a terminal. The OpenShift CLI is ideal in the following situations: +With the OpenShift command-line interface (CLI), the `oc` command, you can deploy and manage {product-title} projects from a terminal. The CLI `oc` tool is ideal in the following situations: * Working directly with project source code * Scripting {product-title} operations diff --git a/modules/microshift-config-cli-manifests.adoc b/modules/microshift-config-cli-manifests.adoc index 63f1e30522..95cc498aaa 100644 --- a/modules/microshift-config-cli-manifests.adoc +++ b/modules/microshift-config-cli-manifests.adoc @@ -6,7 +6,7 @@ [id="microshift-config-cli-manifests_{context}"] = Using CLI tools and creating manifests -Configure your {product-title} using the supported command line arguments and environment variables. +Configure your {product-title} using the supported command line (CLI) arguments and environment variables. [id="microshift-config-cli-environ-vars_{context}"] == Supported command-line arguments and environment variables diff --git a/modules/microshift-configuration.adoc b/modules/microshift-config-etcd.adoc similarity index 66% rename from modules/microshift-configuration.adoc rename to modules/microshift-config-etcd.adoc index 6e276afdf6..ee11287004 100644 --- a/modules/microshift-configuration.adoc +++ b/modules/microshift-config-etcd.adoc @@ -3,8 +3,8 @@ //* microshift_support/microshift-etcd.adoc :_content-type: PROCEDURE -[id="microshift-configuration_{context}"] -= Configuring the `memoryLimitMB` value to set parameters for the {product-title} etcd server +[id="microshift-config-etcd_{context}"] += Configuring the memoryLimitMB value to set parameters for the {product-title} etcd server By default, etcd will use as much memory as necessary to handle the load on the system. In some memory constrained systems, it might be necessary to limit the amount of memory etcd is allowed to use at a given time. @@ -20,7 +20,7 @@ etcd: + [NOTE] ==== -128mb is the minimum permissible value for memoryLimitMB on {product-title}. Values close to the minimum value are more likely to impact etcd performance. The lower the limit, the longer it will take to respond to queries. In addition, if the limit is low or the etcd usage is high, queries will time request out. +The minimum permissible value for `memoryLimitMB` on {product-title} is 128 MB. Values close to the minimum value are more likely to impact etcd performance. The lower the limit, the longer etcd takes to respond to queries. If the limit is too low or the etcd usage is high, queries time out. ==== .Verification diff --git a/modules/microshift-config-nodeport-limits.adoc b/modules/microshift-config-nodeport-limits.adoc index 024e56fb47..9bf50baee2 100644 --- a/modules/microshift-config-nodeport-limits.adoc +++ b/modules/microshift-config-nodeport-limits.adoc @@ -6,19 +6,21 @@ [id="microshift-nodeport-range-limits_{context}"] = Extending the port range for NodePort services -The `serviceNodePortRange` setting allows the extension of the port range available to NodePort services. This option is useful when specific standard ports under the `30000-32767` need to be exposed. For example, your device needs to expose the `1883/tcp` MQ Telemetry Transport (MQTT) port on the network because some client devices cannot use a different port. +The `serviceNodePortRange` setting extends the port range available to NodePort services. This option is useful when specific standard ports under the `30000-32767` range need to be exposed. For example, if your device needs to expose the `1883/tcp` MQ Telemetry Transport (MQTT) port on the network because client devices cannot use a different port. [IMPORTANT] +==== NodePorts can overlap with system ports, causing a malfunction of the system or {product-title}. +==== -Considerations when configuring the NodePort service ranges: +Consider the following when configuring the NodePort service ranges: -* Do not create any NodePort service without an explicit `nodePort` selection. In this case, the port is assigned randomly by the `kube-apiserver`. +* Do not create any NodePort service without an explicit `nodePort` selection. When an explicit `nodePort` is not specified, the port is assigned randomly by the `kube-apiserver` and cannot be predicted. * Do not create any NodePort service for any system service port, {product-title} port, or other services you expose on your device `HostNetwork`. * Table one specifies ports to avoid when extending the port range: - ++ .Ports to avoid. [cols="2",options="header"] |=== diff --git a/modules/microshift-config-yaml.adoc b/modules/microshift-config-yaml.adoc index 47f666df30..a53845a4b4 100644 --- a/modules/microshift-config-yaml.adoc +++ b/modules/microshift-config-yaml.adoc @@ -40,7 +40,7 @@ debugging: <7> Subject Alternative Names for API server certificates. <8> Log verbosity. Valid values for this field are `Normal`, `Debug`, `Trace`, or `TraceAll`. -[NOTE] +[IMPORTANT] ==== -{product-title} only reads the configuration file on startup. Restart {product-title} after changing any configuration settings to have them take effect. +Restart {product-title} after changing any configuration settings to have them take effect. {product-title} reads the configuration file only on start. ==== diff --git a/modules/microshift-cri-o-container-runtime.adoc b/modules/microshift-cri-o-container-runtime.adoc index 257b17ea98..f79ea297f2 100644 --- a/modules/microshift-cri-o-container-runtime.adoc +++ b/modules/microshift-cri-o-container-runtime.adoc @@ -18,13 +18,14 @@ Environment=NO_PROXY="localhost,127.0.0.1" Environment=HTTP_PROXY="http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_PORT/" Environment=HTTPS_PROXY="http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_PORT/" ---- -//Q: was this file created during installation? if not, do we need to create it? + . Reload the configuration settings: + [source, terminal] ---- $ sudo systemctl daemon-reload ---- + . Restart the CRI-O service to apply the settings: + [source, terminal] diff --git a/modules/microshift-deploying-a-load-balancer.adoc b/modules/microshift-deploying-a-load-balancer.adoc index c5202aeb67..205aca8d40 100644 --- a/modules/microshift-deploying-a-load-balancer.adoc +++ b/modules/microshift-deploying-a-load-balancer.adoc @@ -4,18 +4,18 @@ :_content-type: PROCEDURE [id="microshift-deploying-a-load-balancer_{context}"] -= Deploying a TCP load balancer on a workload += Deploying a load balancer for a workload -{product-title} offers a built-in implementation of network load balancers. The following example procedure uses the node IP address as the external IP address for the `LoadBalancer` service configuration file. +{product-title} offers a built-in implementation of network load balancers. The following example procedure uses the node IP address as the external IP address for the `LoadBalancer` service configuration file. -.Prerequisites +.Prerequisites -* You installed the OpenShift CLI (`oc`) -* You need access to the cluster as a user with the cluster-admin role. +* The OpenShift CLI (`oc`) is installed. +* You have access to the cluster as a user with the cluster administration role. * You installed a cluster on an infrastructure configured with the OVN-Kubernetes network plugin. * The `KUBECONFIG` environment variable is set. -.Procedure +.Procedure . Verify that your pods are running by running the following command: + @@ -24,8 +24,8 @@ $ oc get pods -A ---- -. Create a namespace by running the following commands: -+ +. Create the example namespace by running the following commands: ++ [source,terminal] ---- $ NAMESPACE=nginx-lb-test @@ -35,7 +35,8 @@ $ NAMESPACE=nginx-lb-test ---- $ oc create ns $NAMESPACE ---- -. The following example deploys three replicas of the test `nginx` application in your namespace. + +. The following example deploys three replicas of the test `nginx` application in your namespace: + [source,terminal] ---- @@ -117,26 +118,26 @@ EOF + [NOTE] ==== -You must ensure that the `port` parameter is a host port that is not occupied by other `LoadBalancer` services or {product-title} components +You must ensure that the `port` parameter is a host port that is not occupied by other `LoadBalancer` services or {product-title} components. ==== -. To verify that the service file exists and the external IP address is properly assigned, and the external IP is identical to the node IP, run the following command: +. Verify that the service file exists, that the external IP address is properly assigned, and that the external IP is identical to the node IP by running the following command: + [source,terminal] ---- $ oc get svc -n $NAMESPACE ---- + -.Example output +.Example output [source,terminal] ---- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx LoadBalancer 10.43.183.104 192.168.1.241 81:32434/TCP 2m ---- -.Verification +.Verification -* The following command forms five connections to the `nginx` application using the external IP address of the `LoadBalancer` service config. You can verify that the load balancer sends requests to all the running applications with the following command: +* The following command forms five connections to the example `nginx` application using the external IP address of the `LoadBalancer` service configuration. The result of the command is a list of those server IP addresses. Verify that the load balancer sends requests to all the running applications with the following command: + [source,terminal] ---- @@ -144,7 +145,7 @@ EXTERNAL_IP=192.168.1.241 seq 5 | xargs -Iz curl -s -I http://$EXTERNAL_IP:81 | grep X-Server-IP ---- + -Your output should contain different IP addresses, this shows that the load balancer is successfully distributing the traffic to the applications. +The output of the previous command contains different IP addresses if the load balancer is successfully distributing the traffic to the applications, for example: + .Example output [source,terminal] diff --git a/modules/microshift-firewall-allow-traffic.adoc b/modules/microshift-firewall-allow-traffic.adoc index 021a7214ed..79d1723fb7 100644 --- a/modules/microshift-firewall-allow-traffic.adoc +++ b/modules/microshift-firewall-allow-traffic.adoc @@ -6,22 +6,20 @@ [id="microshift-firewall-allow-traffic_{context}"] = Allowing network traffic through the firewall -You can allow network traffic through the firewall by first configuring the IP address range with either default or custom values, and then allow internal traffic from pods through the network gateway by inserting the DNS server. +You can allow network traffic through the firewall by configuring the IP address range and inserting the DNS server to allow internal traffic from pods through the network gateway. .Procedure -Set the default values or a custom IP address range. After setting the IP address range, allow internal traffic from the pods through the network gateway. +. Use one of the following commands to set the IP address range: -. To set the IP address range: - -.. To configure the IP address range with default values, run the following command: +.. Configure the IP address range with default values by running the following command: + [source,terminal] ---- $ sudo firewall-offline-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 ---- -.. Alternatively, you can configure the IP address range with custom values by running the following command: +.. Configure the IP address range with custom values by running the following command: + [source,terminal] ---- diff --git a/modules/microshift-firewall-apply-settings.adoc b/modules/microshift-firewall-apply-settings.adoc index 627d34f3ad..9bcfb36f8f 100644 --- a/modules/microshift-firewall-apply-settings.adoc +++ b/modules/microshift-firewall-apply-settings.adoc @@ -9,7 +9,8 @@ To apply firewall settings, use the following one-step procedure: .Procedure -After you have finished configuring network access through the firewall, run the following command to restart the firewall and apply settings: + +* After you have finished configuring network access through the firewall, run the following command to restart the firewall and apply the settings: [source,terminal] ---- diff --git a/modules/microshift-firewall-known-issue.adoc b/modules/microshift-firewall-known-issue.adoc index 7396f0bab7..45b0a44c7e 100644 --- a/modules/microshift-firewall-known-issue.adoc +++ b/modules/microshift-firewall-known-issue.adoc @@ -7,3 +7,5 @@ = Known firewall issue * To avoid breaking traffic flows with a firewall reload or restart, execute firewall commands before starting {product-title}. The CNI driver in {product-title} makes use of iptable rules for some traffic flows, such as those using the NodePort service. The iptable rules are generated and inserted by the CNI driver, but are deleted when the firewall reloads or restarts. The absence of the iptable rules breaks traffic flows. If firewall commands have to be executed after {product-title} is running, manually restart `ovnkube-master` pod in the `openshift-ovn-kubernetes` namespace to reset the rules controlled by the CNI driver. + +//Revise and use the unused ki-cni-iptables-deleted procedure in release notes? Need to verify status for 4.14 \ No newline at end of file diff --git a/modules/microshift-firewalld-install.adoc b/modules/microshift-firewalld-install.adoc index 7a286218b6..8f21136033 100644 --- a/modules/microshift-firewalld-install.adoc +++ b/modules/microshift-firewalld-install.adoc @@ -8,6 +8,7 @@ If you are using {op-system-ostree}, firewalld should be installed. To use the service, you can simply configure it. The following procedure can be used if you do not have firewalld, but want to use it. +Install and run the `firewalld` service for {product-title} by using the following steps. .Procedure @@ -25,7 +26,7 @@ $ rpm -q firewalld $ sudo dnf install -y firewalld ---- -. To initiate the firewall, run the following command: +. To start the firewall, run the following command: + [source,terminal] ---- diff --git a/modules/microshift-greenboot-prerollback-log.adoc b/modules/microshift-greenboot-prerollback-log.adoc index b52c115f3c..da151df888 100644 --- a/modules/microshift-greenboot-prerollback-log.adoc +++ b/modules/microshift-greenboot-prerollback-log.adoc @@ -7,7 +7,7 @@ [id="microshift-greenboot-access-prerollback-check_{context}"] = Accessing prerollback health check output in the system log -You can access the output of health check scripts in the system log. For example, to check the results of a prerollback script, use the following procedure. +You can access the output of health check scripts in the system log. For example, check the results of a prerollback script using the following procedure. .Procedure @@ -19,6 +19,7 @@ $ sudo journalctl -o cat -u redboot-task-runner.service ---- .Example output of a prerollback script + [source, terminal] ---- ... diff --git a/modules/microshift-greenboot-systemd-journal-data.adoc b/modules/microshift-greenboot-systemd-journal-data.adoc index 388f3f13ad..a0675cc078 100644 --- a/modules/microshift-greenboot-systemd-journal-data.adoc +++ b/modules/microshift-greenboot-systemd-journal-data.adoc @@ -6,7 +6,7 @@ [id="microshift-greenboot-systemd-journal-data_{context}"] = Enabling systemd journal service data persistency -The default configuration of the `systemd` journal service stores the data in the volatile `/run/log/journal` directory. To persist system logs across system starts and restarts, enable log persistence and set limits on the maximal journal data size. The following steps allow you to enable `systemd` journal service data persistency. +The default configuration of the `systemd` journal service stores the data in the volatile `/run/log/journal` directory. To persist system logs across system starts and restarts, you must enable log persistence and set limits on the maximal journal data size. .Procedure diff --git a/modules/microshift-ki-cni-iptables-deleted.adoc b/modules/microshift-ki-cni-iptables-deleted.adoc index ecfc36f7c0..ebb47c1a9e 100644 --- a/modules/microshift-ki-cni-iptables-deleted.adoc +++ b/modules/microshift-ki-cni-iptables-deleted.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * microshift_troubleshooting/microshift-known-issues.adoc +// * this module is unused as of the 4.13 release; it can be kept for the procedure of deleting the ovnkube master pod if the iptables flush issue with the firewall persists :_content-type: PROCEDURE [id="microshift-ki-cni-iptables-deleted_{context}"] diff --git a/modules/microshift-lvms-config-example-basic.adoc b/modules/microshift-lvms-config-example-basic.adoc index dc268b6974..3b32a967f0 100644 --- a/modules/microshift-lvms-config-example-basic.adoc +++ b/modules/microshift-lvms-config-example-basic.adoc @@ -27,7 +27,6 @@ device-classes: <2> <4> String. The group where the `device-class` creates the logical volumes. <5> Unsigned 64-bit integer. Storage capacity in GiB to be left unallocated in the volume group. Defaults to `0`. <6> Boolean. Indicates that the `device-class` is used by default. Defaults to `false`. At least one value must be entered in the YAML file values when this is set to `true`. -//Q: still true that value defaults to false? [IMPORTANT] ==== diff --git a/modules/microshift-mDNS.adoc b/modules/microshift-mDNS.adoc index 982195a4db..a4d57253a6 100644 --- a/modules/microshift-mDNS.adoc +++ b/modules/microshift-mDNS.adoc @@ -6,6 +6,6 @@ [id="microshift-mDNS_{context}"] = The multicast DNS protocol -The multicast DNS protocol (mDNS) allows name resolution and service discovery within a Local Area Network (LAN) using multicast exposed on the `5353/UDP` port. +You can use the multicast DNS protocol (mDNS) to allow name resolution and service discovery within a Local Area Network (LAN) using multicast exposed on the `5353/UDP` port. {product-title} includes an embedded mDNS server for deployment scenarios in which the authoritative DNS server cannot be reconfigured to point clients to services on {product-title}. The embedded DNS server allows `.local` domains exposed by {product-title} to be discovered by other elements on the LAN. diff --git a/modules/microshift-man-config-ovs-bridge.adoc b/modules/microshift-man-config-ovs-bridge.adoc index 85bc6ade19..bf6226f997 100644 --- a/modules/microshift-man-config-ovs-bridge.adoc +++ b/modules/microshift-man-config-ovs-bridge.adoc @@ -1,4 +1,5 @@ //FIXME: need updated config procedure for customers that will persist across reboots +//this module content is unused as of 4.13 //=== Manually configuring OVS bridge br-ex //.Procedure diff --git a/modules/microshift-nodeport-unreachable-workaround.adoc b/modules/microshift-nodeport-unreachable-workaround.adoc index a4cc2bd8e6..4bef2a62fc 100644 --- a/modules/microshift-nodeport-unreachable-workaround.adoc +++ b/modules/microshift-nodeport-unreachable-workaround.adoc @@ -1,6 +1,7 @@ // Module included in the following assemblies: // -// * microshift_troubleshooting/microshift-known-issues.adoc +// * module may be unused in 4.13 + :_content-type: PROCEDURE [id="microshift-nodeport-unreachable-workaround_{context}"] = Manually restarting the `ovnkube-master` pod to resume node port traffic diff --git a/modules/microshift-observe-debug-etcd-server.adoc b/modules/microshift-observe-debug-etcd-server.adoc index 113edb0202..943f7d3550 100644 --- a/modules/microshift-observe-debug-etcd-server.adoc +++ b/modules/microshift-observe-debug-etcd-server.adoc @@ -10,7 +10,7 @@ You can gather `journalctl` logs to observe and debug the etcd server logs. .Prerequisites -* You must have the {product-title} service running. +* The {product-title} service is running. .Procedure diff --git a/modules/microshift-oc-adm-by-example-content.adoc b/modules/microshift-oc-adm-by-example-content.adoc index a14056337c..670459b44c 100644 --- a/modules/microshift-oc-adm-by-example-content.adoc +++ b/modules/microshift-oc-adm-by-example-content.adoc @@ -6,7 +6,7 @@ [id="microshift-oc-cli-admin_{context}"] = OpenShift CLI (oc) administrator commands -//hand-edited for relevance to MicroShift in lieu of working PR--can replace when auto-generate is ready +//IMPORTANT: QE'd and hand-edited for relevance to MicroShift; use this version to check auto-generated files for 4.14 //== oc adm build-chain @@ -53,21 +53,21 @@ Mirror an operator-registry catalog == oc adm inspect Collect debugging data for a given resource - +//NOTE: This was hand-edited per QE in 4.13. This section is correct as is. .Example usage [source,bash,options="nowrap"] ---- - # Collect debugging data for the "openshift-apiserver" clusteroperator - oc adm inspect clusteroperator/openshift-apiserver + # Collect debugging data for the "microshift-apiserver" + oc adm inspect service/kubernetes - # Collect debugging data for the "openshift-apiserver" and "kube-apiserver" clusteroperators - oc adm inspect clusteroperator/openshift-apiserver clusteroperator/kube-apiserver + # Collect debugging data for the "microshift-apiserver" and "toptlvm-apiserver" + oc adm inspect service/kubernetes crd/logicalvolumes.topolvm.io - # Collect debugging data for all clusteroperators - oc adm inspect clusteroperator + # Collect debugging data for services + oc adm inspect service - # Collect debugging data for all clusteroperators and clusterversions - oc adm inspect clusteroperators,clusterversions + # Collect debugging data for all clusterversions + oc adm inspect service,crd ---- == oc adm migrate icsp diff --git a/modules/microshift-oc-by-example-content-gen.adoc b/modules/microshift-oc-by-example-content-gen.adoc deleted file mode 100644 index aae63daafd..0000000000 --- a/modules/microshift-oc-by-example-content-gen.adoc +++ /dev/null @@ -1,2371 +0,0 @@ -// NOTE: The contents of this file are auto-generated -// This template is for non-admin (not 'oc adm ...') commands -// Uses 'source,bash' for proper syntax highlighting for comments in examples - -:_content-type: REFERENCE -[id="microshift-oc-cli-developer_{context}"] -= OpenShift CLI (oc) developer commands - -//NOTE: this is the autogenerated version, one command edited out - -== oc annotate -Update the annotations on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Update pod 'foo' with the annotation 'description' and the value 'my frontend' - # If the same annotation is set multiple times, only the last value will be applied - oc annotate pods foo description='my frontend' - - # Update a pod identified by type and name in "pod.json" - oc annotate -f pod.json description='my frontend' - - # Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value - oc annotate --overwrite pods foo description='my frontend running nginx' - - # Update all pods in the namespace - oc annotate pods --all description='my frontend running nginx' - - # Update pod 'foo' only if the resource is unchanged from version 1 - oc annotate pods foo description='my frontend running nginx' --resource-version=1 - - # Update pod 'foo' by removing an annotation named 'description' if it exists - # Does not require the --overwrite flag - oc annotate pods foo description- ----- - - - -== oc api-resources -Print the supported API resources on the server - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the supported API resources - oc api-resources - - # Print the supported API resources with more information - oc api-resources -o wide - - # Print the supported API resources sorted by a column - oc api-resources --sort-by=name - - # Print the supported namespaced resources - oc api-resources --namespaced=true - - # Print the supported non-namespaced resources - oc api-resources --namespaced=false - - # Print the supported API resources with a specific APIGroup - oc api-resources --api-group=rbac.authorization.k8s.io ----- - - - -== oc api-versions -Print the supported API versions on the server, in the form of "group/version" - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the supported API versions - oc api-versions ----- - - - -== oc apply -Apply a configuration to a resource by file name or stdin - -.Example usage -[source,bash,options="nowrap"] ----- - # Apply the configuration in pod.json to a pod - oc apply -f ./pod.json - - # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml - oc apply -k dir/ - - # Apply the JSON passed into stdin to a pod - cat pod.json | oc apply -f - - - # Apply the configuration from all files that end with '.json' - i.e. expand wildcard characters in file names - oc apply -f '*.json' - - # Note: --prune is still in Alpha - # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx - oc apply --prune -f manifest.yaml -l app=nginx - - # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file - oc apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/ConfigMap ----- - - - -== oc apply edit-last-applied -Edit latest last-applied-configuration annotations of a resource/object - -.Example usage -[source,bash,options="nowrap"] ----- - # Edit the last-applied-configuration annotations by type/name in YAML - oc apply edit-last-applied deployment/nginx - - # Edit the last-applied-configuration annotations by file in JSON - oc apply edit-last-applied -f deploy.yaml -o json ----- - - - -== oc apply set-last-applied -Set the last-applied-configuration annotation on a live object to match the contents of a file - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the last-applied-configuration of a resource to match the contents of a file - oc apply set-last-applied -f deploy.yaml - - # Execute set-last-applied against each configuration file in a directory - oc apply set-last-applied -f path/ - - # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist - oc apply set-last-applied -f deploy.yaml --create-annotation=true ----- - - - -== oc apply view-last-applied -View the latest last-applied-configuration annotations of a resource/object - -.Example usage -[source,bash,options="nowrap"] ----- - # View the last-applied-configuration annotations by type/name in YAML - oc apply view-last-applied deployment/nginx - - # View the last-applied-configuration annotations by file in JSON - oc apply view-last-applied -f deploy.yaml -o json ----- - - - -== oc attach -Attach to a running container - -.Example usage -[source,bash,options="nowrap"] ----- - # Get output from running pod mypod; use the 'oc.kubernetes.io/default-container' annotation - # for selecting the container to be attached or the first container in the pod will be chosen - oc attach mypod - - # Get output from ruby-container from pod mypod - oc attach mypod -c ruby-container - - # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod - # and sends stdout/stderr from 'bash' back to the client - oc attach mypod -c ruby-container -i -t - - # Get output from the first pod of a replica set named nginx - oc attach rs/nginx ----- - - - -== oc auth can-i -Check whether an action is allowed - -.Example usage -[source,bash,options="nowrap"] ----- - # Check to see if I can create pods in any namespace - oc auth can-i create pods --all-namespaces - - # Check to see if I can list deployments in my current namespace - oc auth can-i list deployments.apps - - # Check to see if I can do everything in my current namespace ("*" means all) - oc auth can-i '*' '*' - - # Check to see if I can get the job named "bar" in namespace "foo" - oc auth can-i list jobs.batch/bar -n foo - - # Check to see if I can read pod logs - oc auth can-i get pods --subresource=log - - # Check to see if I can access the URL /logs/ - oc auth can-i get /logs/ - - # List all allowed actions in namespace "foo" - oc auth can-i --list --namespace=foo ----- - - - -== oc auth reconcile -Reconciles rules for RBAC role, role binding, cluster role, and cluster role binding objects - -.Example usage -[source,bash,options="nowrap"] ----- - # Reconcile RBAC resources from a file - oc auth reconcile -f my-rbac-rules.yaml ----- - -//== oc autoscale -//removed, does not apply to MicroShift - -== oc cluster-info -Display cluster information - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the address of the control plane and cluster services - oc cluster-info ----- - - - -== oc cluster-info dump -Dump relevant information for debugging and diagnosis - -.Example usage -[source,bash,options="nowrap"] ----- - # Dump current cluster state to stdout - oc cluster-info dump - - # Dump current cluster state to /path/to/cluster-state - oc cluster-info dump --output-directory=/path/to/cluster-state - - # Dump all namespaces to stdout - oc cluster-info dump --all-namespaces - - # Dump a set of namespaces to /path/to/cluster-state - oc cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state ----- - - - -== oc completion -Output shell completion code for the specified shell (bash, zsh, fish, or powershell) - -.Example usage -[source,bash,options="nowrap"] ----- - # Installing bash completion on macOS using homebrew - ## If running Bash 3.2 included with macOS - brew install bash-completion - ## or, if running Bash 4.1+ - brew install bash-completion@2 - ## If oc is installed via homebrew, this should start working immediately - ## If you've installed via other means, you may need add the completion to your completion directory - oc completion bash > $(brew --prefix)/etc/bash_completion.d/oc - - - # Installing bash completion on Linux - ## If bash-completion is not installed on Linux, install the 'bash-completion' package - ## via your distribution's package manager. - ## Load the oc completion code for bash into the current shell - source <(oc completion bash) - ## Write bash completion code to a file and source it from .bash_profile - oc completion bash > ~/.kube/completion.bash.inc - printf " - # Kubectl shell completion - source '$HOME/.kube/completion.bash.inc' - " >> $HOME/.bash_profile - source $HOME/.bash_profile - - # Load the oc completion code for zsh[1] into the current shell - source <(oc completion zsh) - # Set the oc completion code for zsh[1] to autoload on startup - oc completion zsh > "${fpath[1]}/_oc" - - - # Load the oc completion code for fish[2] into the current shell - oc completion fish | source - # To load completions for each session, execute once: - oc completion fish > ~/.config/fish/completions/oc.fish - - # Load the oc completion code for powershell into the current shell - oc completion powershell | Out-String | Invoke-Expression - # Set oc completion code for powershell to run on startup - ## Save completion code to a script and execute in the profile - oc completion powershell > $HOME\.kube\completion.ps1 - Add-Content $PROFILE "$HOME\.kube\completion.ps1" - ## Execute completion code in the profile - Add-Content $PROFILE "if (Get-Command oc -ErrorAction SilentlyContinue) { - oc completion powershell | Out-String | Invoke-Expression - }" - ## Add completion code directly to the $PROFILE script - oc completion powershell >> $PROFILE ----- - - - -== oc config current-context -Display the current-context - -.Example usage -[source,bash,options="nowrap"] ----- - # Display the current-context - oc config current-context ----- - - - -== oc config delete-cluster -Delete the specified cluster from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the minikube cluster - oc config delete-cluster minikube ----- - - - -== oc config delete-context -Delete the specified context from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the context for the minikube cluster - oc config delete-context minikube ----- - - - -== oc config delete-user -Delete the specified user from the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete the minikube user - oc config delete-user minikube ----- - - - -== oc config get-clusters -Display clusters defined in the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # List the clusters that oc knows about - oc config get-clusters ----- - - - -== oc config get-contexts -Describe one or many contexts - -.Example usage -[source,bash,options="nowrap"] ----- - # List all the contexts in your kubeconfig file - oc config get-contexts - - # Describe one context in your kubeconfig file - oc config get-contexts my-context ----- - - - -== oc config get-users -Display users defined in the kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # List the users that oc knows about - oc config get-users ----- - - - -== oc config rename-context -Rename a context from the kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Rename the context 'old-name' to 'new-name' in your kubeconfig file - oc config rename-context old-name new-name ----- - - - -== oc config set -Set an individual value in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the server field on the my-cluster cluster to https://1.2.3.4 - oc config set clusters.my-cluster.server https://1.2.3.4 - - # Set the certificate-authority-data field on the my-cluster cluster - oc config set clusters.my-cluster.certificate-authority-data $(echo "cert_data_here" | base64 -i -) - - # Set the cluster field in the my-context context to my-cluster - oc config set contexts.my-context.cluster my-cluster - - # Set the client-key-data field in the cluster-admin user using --set-raw-bytes option - oc config set users.cluster-admin.client-key-data cert_data_here --set-raw-bytes=true ----- - - - -== oc config set-cluster -Set a cluster entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set only the server field on the e2e cluster entry without touching other values - oc config set-cluster e2e --server=https://1.2.3.4 - - # Embed certificate authority data for the e2e cluster entry - oc config set-cluster e2e --embed-certs --certificate-authority=~/.kube/e2e/kubernetes.ca.crt - - # Disable cert checking for the e2e cluster entry - oc config set-cluster e2e --insecure-skip-tls-verify=true - - # Set custom TLS server name to use for validation for the e2e cluster entry - oc config set-cluster e2e --tls-server-name=my-cluster-name - - # Set proxy url for the e2e cluster entry - oc config set-cluster e2e --proxy-url=https://1.2.3.4 ----- - - - -== oc config set-context -Set a context entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the user field on the gce context entry without touching other values - oc config set-context gce --user=cluster-admin ----- - - - -== oc config set-credentials -Set a user entry in kubeconfig - -.Example usage -[source,bash,options="nowrap"] ----- - # Set only the "client-key" field on the "cluster-admin" - # entry, without touching other values - oc config set-credentials cluster-admin --client-key=~/.kube/admin.key - - # Set basic auth for the "cluster-admin" entry - oc config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif - - # Embed client certificate data in the "cluster-admin" entry - oc config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true - - # Enable the Google Compute Platform auth provider for the "cluster-admin" entry - oc config set-credentials cluster-admin --auth-provider=gcp - - # Enable the OpenID Connect auth provider for the "cluster-admin" entry with additional args - oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-id=foo --auth-provider-arg=client-secret=bar - - # Remove the "client-secret" config value for the OpenID Connect auth provider for the "cluster-admin" entry - oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-secret- - - # Enable new exec auth plugin for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-command=/path/to/the/executable --exec-api-version=client.authentication.k8s.io/v1beta1 - - # Define new exec auth plugin args for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-arg=arg1 --exec-arg=arg2 - - # Create or update exec auth plugin environment variables for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-env=key1=val1 --exec-env=key2=val2 - - # Remove exec auth plugin environment variables for the "cluster-admin" entry - oc config set-credentials cluster-admin --exec-env=var-to-remove- ----- - - - -== oc config unset -Unset an individual value in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Unset the current-context - oc config unset current-context - - # Unset namespace in foo context - oc config unset contexts.foo.namespace ----- - - - -== oc config use-context -Set the current-context in a kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Use the context for the minikube cluster - oc config use-context minikube ----- - - - -== oc config view -Display merged kubeconfig settings or a specified kubeconfig file - -.Example usage -[source,bash,options="nowrap"] ----- - # Show merged kubeconfig settings - oc config view - - # Show merged kubeconfig settings and raw certificate data and exposed secrets - oc config view --raw - - # Get the password for the e2e user - oc config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' ----- - - - -== oc cp -Copy files and directories to and from containers - -.Example usage -[source,bash,options="nowrap"] ----- - # !!!Important Note!!! - # Requires that the 'tar' binary is present in your container - # image. If 'tar' is not present, 'oc cp' will fail. - # - # For advanced use cases, such as symlinks, wildcard expansion or - # file mode preservation, consider using 'oc exec'. - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace - tar cf - /tmp/foo | oc exec -i -n -- tar xf - -C /tmp/bar - - # Copy /tmp/foo from a remote pod to /tmp/bar locally - oc exec -n -- tar cf - /tmp/foo | tar xf - -C /tmp/bar - - # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace - oc cp /tmp/foo_dir :/tmp/bar_dir - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container - oc cp /tmp/foo :/tmp/bar -c - - # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace - oc cp /tmp/foo /:/tmp/bar - - # Copy /tmp/foo from a remote pod to /tmp/bar locally - oc cp /:/tmp/foo /tmp/bar ----- - - - -== oc create -Create a resource from a file or from stdin - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a pod using the data in pod.json - oc create -f ./pod.json - - # Create a pod based on the JSON passed into stdin - cat pod.json | oc create -f - - - # Edit the data in registry.yaml in JSON then create the resource using the edited data - oc create -f registry.yaml --edit -o json ----- - - - -== oc create clusterrole -Create a cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cluster role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods - oc create clusterrole pod-reader --verb=get,list,watch --resource=pods - - # Create a cluster role named "pod-reader" with ResourceName specified - oc create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - - # Create a cluster role named "foo" with API Group specified - oc create clusterrole foo --verb=get,list,watch --resource=rs.apps - - # Create a cluster role named "foo" with SubResource specified - oc create clusterrole foo --verb=get,list,watch --resource=pods,pods/status - - # Create a cluster role name "foo" with NonResourceURL specified - oc create clusterrole "foo" --verb=get --non-resource-url=/logs/* - - # Create a cluster role name "monitoring" with AggregationRule specified - oc create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true" ----- - - - -== oc create clusterrolebinding -Create a cluster role binding for a particular cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cluster role binding for user1, user2, and group1 using the cluster-admin cluster role - oc create clusterrolebinding cluster-admin --clusterrole=cluster-admin --user=user1 --user=user2 --group=group1 ----- - - - -== oc create configmap -Create a config map from a local file, directory or literal value - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new config map named my-config based on folder bar - oc create configmap my-config --from-file=path/to/bar - - # Create a new config map named my-config with specified keys instead of file basenames on disk - oc create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt - - # Create a new config map named my-config with key1=config1 and key2=config2 - oc create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2 - - # Create a new config map named my-config from the key=value pairs in the file - oc create configmap my-config --from-file=path/to/bar - - # Create a new config map named my-config from an env file - oc create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ----- - - - -== oc create cronjob -Create a cron job with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a cron job - oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" - - # Create a cron job with a command - oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" -- date ----- - - - -== oc create deployment -Create a deployment with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a deployment named my-dep that runs the busybox image - oc create deployment my-dep --image=busybox - - # Create a deployment with a command - oc create deployment my-dep --image=busybox -- date - - # Create a deployment named my-dep that runs the nginx image with 3 replicas - oc create deployment my-dep --image=nginx --replicas=3 - - # Create a deployment named my-dep that runs the busybox image and expose port 5701 - oc create deployment my-dep --image=busybox --port=5701 ----- - - - -== oc create ingress -Create an ingress with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a single ingress called 'simple' that directs requests to foo.com/bar to svc - # svc1:8080 with a tls secret "my-cert" - oc create ingress simple --rule="foo.com/bar=svc1:8080,tls=my-cert" - - # Create a catch all ingress of "/path" pointing to service svc:port and Ingress Class as "otheringress" - oc create ingress catch-all --class=otheringress --rule="/path=svc:port" - - # Create an ingress with two annotations: ingress.annotation1 and ingress.annotations2 - oc create ingress annotated --class=default --rule="foo.com/bar=svc:port" \ - --annotation ingress.annotation1=foo \ - --annotation ingress.annotation2=bla - - # Create an ingress with the same host and multiple paths - oc create ingress multipath --class=default \ - --rule="foo.com/=svc:port" \ - --rule="foo.com/admin/=svcadmin:portadmin" - - # Create an ingress with multiple hosts and the pathType as Prefix - oc create ingress ingress1 --class=default \ - --rule="foo.com/path*=svc:8080" \ - --rule="bar.com/admin*=svc2:http" - - # Create an ingress with TLS enabled using the default ingress certificate and different path types - oc create ingress ingtls --class=default \ - --rule="foo.com/=svc:https,tls" \ - --rule="foo.com/path/subpath*=othersvc:8080" - - # Create an ingress with TLS enabled using a specific secret and pathType as Prefix - oc create ingress ingsecret --class=default \ - --rule="foo.com/*=svc:8080,tls=secret1" - - # Create an ingress with a default backend - oc create ingress ingdefault --class=default \ - --default-backend=defaultsvc:http \ - --rule="foo.com/*=svc:8080,tls=secret1" ----- - - - -== oc create job -Create a job with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a job - oc create job my-job --image=busybox - - # Create a job with a command - oc create job my-job --image=busybox -- date - - # Create a job from a cron job named "a-cronjob" - oc create job test-job --from=cronjob/a-cronjob ----- - - - -== oc create namespace -Create a namespace with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new namespace named my-namespace - oc create namespace my-namespace ----- - - - -== oc create poddisruptionbudget -Create a pod disruption budget with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a pod disruption budget named my-pdb that will select all pods with the app=rails label - # and require at least one of them being available at any point in time - oc create poddisruptionbudget my-pdb --selector=app=rails --min-available=1 - - # Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label - # and require at least half of the pods selected to be available at any point in time - oc create pdb my-pdb --selector=app=nginx --min-available=50% ----- - - - -== oc create priorityclass -Create a priority class with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a priority class named high-priority - oc create priorityclass high-priority --value=1000 --description="high priority" - - # Create a priority class named default-priority that is considered as the global default priority - oc create priorityclass default-priority --value=1000 --global-default=true --description="default priority" - - # Create a priority class named high-priority that cannot preempt pods with lower priority - oc create priorityclass high-priority --value=1000 --description="high priority" --preemption-policy="Never" ----- - - - -== oc create quota -Create a quota with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new resource quota named my-quota - oc create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10 - - # Create a new resource quota named best-effort - oc create quota best-effort --hard=pods=100 --scopes=BestEffort ----- - - - -== oc create role -Create a role with single rule - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods - oc create role pod-reader --verb=get --verb=list --verb=watch --resource=pods - - # Create a role named "pod-reader" with ResourceName specified - oc create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - - # Create a role named "foo" with API Group specified - oc create role foo --verb=get,list,watch --resource=rs.apps - - # Create a role named "foo" with SubResource specified - oc create role foo --verb=get,list,watch --resource=pods,pods/status ----- - - - -== oc create rolebinding -Create a role binding for a particular role or cluster role - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a role binding for user1, user2, and group1 using the admin cluster role - oc create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1 ----- - - - -== oc create route edge -Create a route that uses edge TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create an edge route named "my-route" that exposes the frontend service - oc create route edge my-route --service=frontend - - # Create an edge route that exposes the frontend service and specify a path - # If the route name is omitted, the service name will be used - oc create route edge --service=frontend --path /assets ----- - - - -== oc create route passthrough -Create a route that uses passthrough TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a passthrough route named "my-route" that exposes the frontend service - oc create route passthrough my-route --service=frontend - - # Create a passthrough route that exposes the frontend service and specify - # a host name. If the route name is omitted, the service name will be used - oc create route passthrough --service=frontend --hostname=www.example.com ----- - - - -== oc create route reencrypt -Create a route that uses reencrypt TLS termination - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a route named "my-route" that exposes the frontend service - oc create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert - - # Create a reencrypt route that exposes the frontend service, letting the - # route name default to the service name and the destination CA certificate - # default to the service CA - oc create route reencrypt --service=frontend ----- - - - -== oc create secret docker-registry -Create a secret for use with a Docker registry - -.Example usage -[source,bash,options="nowrap"] ----- - # If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using: - oc create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL - - # Create a new secret named my-secret from ~/.docker/config.json - oc create secret docker-registry my-secret --from-file=.dockerconfigjson=path/to/.docker/config.json ----- - - - -== oc create secret generic -Create a secret from a local file, directory, or literal value - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new secret named my-secret with keys for each file in folder bar - oc create secret generic my-secret --from-file=path/to/bar - - # Create a new secret named my-secret with specified keys instead of names on disk - oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub - - # Create a new secret named my-secret with key1=supersecret and key2=topsecret - oc create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret - - # Create a new secret named my-secret using a combination of a file and a literal - oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-literal=passphrase=topsecret - - # Create a new secret named my-secret from env files - oc create secret generic my-secret --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ----- - - - -== oc create secret tls -Create a TLS secret - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new TLS secret named tls-secret with the given key pair - oc create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key ----- - - - -== oc create service clusterip -Create a ClusterIP service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new ClusterIP service named my-cs - oc create service clusterip my-cs --tcp=5678:8080 - - # Create a new ClusterIP service named my-cs (in headless mode) - oc create service clusterip my-cs --clusterip="None" ----- - - - -== oc create service externalname -Create an ExternalName service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new ExternalName service named my-ns - oc create service externalname my-ns --external-name bar.com ----- - - - -== oc create service loadbalancer -Create a LoadBalancer service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new LoadBalancer service named my-lbs - oc create service loadbalancer my-lbs --tcp=5678:8080 ----- - - - -== oc create service nodeport -Create a NodePort service - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new NodePort service named my-ns - oc create service nodeport my-ns --tcp=5678:8080 ----- - - - -== oc create serviceaccount -Create a service account with the specified name - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a new service account named my-service-account - oc create serviceaccount my-service-account ----- - - - -== oc create token -Request a service account token - -.Example usage -[source,bash,options="nowrap"] ----- - # Request a token to authenticate to the kube-apiserver as the service account "myapp" in the current namespace - oc create token myapp - - # Request a token for a service account in a custom namespace - oc create token myapp --namespace myns - - # Request a token with a custom expiration - oc create token myapp --duration 10m - - # Request a token with a custom audience - oc create token myapp --audience https://example.com - - # Request a token bound to an instance of a Secret object - oc create token myapp --bound-object-kind Secret --bound-object-name mysecret - - # Request a token bound to an instance of a Secret object with a specific uid - oc create token myapp --bound-object-kind Secret --bound-object-name mysecret --bound-object-uid 0d4691ed-659b-4935-a832-355f77ee47cc ----- - - - -== oc debug -Launch a new instance of a pod for debugging - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a shell session into a pod using the OpenShift tools image - oc debug - - # Debug a currently running deployment by creating a new pod - oc debug deploy/test - - # Debug a node as an administrator - oc debug node/master-1 - - # Launch a shell in a pod using the provided image stream tag - oc debug istag/mysql:latest -n openshift - - # Test running a job as a non-root user - oc debug job/test --as-user=1000000 - - # Debug a specific failing container by running the env command in the 'second' container - oc debug daemonset/test -c second -- /bin/env - - # See the pod that would be created to debug - oc debug mypod-9xbc -o yaml - - # Debug a resource but launch the debug pod in another namespace - # Note: Not all resources can be debugged using --to-namespace without modification. For example, - # volumes and service accounts are namespace-dependent. Add '-o yaml' to output the debug pod definition - # to disk. If necessary, edit the definition then run 'oc debug -f -' or run without --to-namespace - oc debug mypod-9xbc --to-namespace testns ----- - - - -== oc delete -Delete resources by file names, stdin, resources and names, or by resources and label selector - -.Example usage -[source,bash,options="nowrap"] ----- - # Delete a pod using the type and name specified in pod.json - oc delete -f ./pod.json - - # Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml - oc delete -k dir - - # Delete resources from all files that end with '.json' - i.e. expand wildcard characters in file names - oc delete -f '*.json' - - # Delete a pod based on the type and name in the JSON passed into stdin - cat pod.json | oc delete -f - - - # Delete pods and services with same names "baz" and "foo" - oc delete pod,service baz foo - - # Delete pods and services with label name=myLabel - oc delete pods,services -l name=myLabel - - # Delete a pod with minimal delay - oc delete pod foo --now - - # Force delete a pod on a dead node - oc delete pod foo --force - - # Delete all pods - oc delete pods --all ----- - - - -== oc describe -Show details of a specific resource or group of resources - -.Example usage -[source,bash,options="nowrap"] ----- - # Describe a node - oc describe nodes kubernetes-node-emt8.c.myproject.internal - - # Describe a pod - oc describe pods/nginx - - # Describe a pod identified by type and name in "pod.json" - oc describe -f pod.json - - # Describe all pods - oc describe pods - - # Describe pods by label name=myLabel - oc describe po -l name=myLabel - - # Describe all pods managed by the 'frontend' replication controller - # (rc-created pods get the name of the rc as a prefix in the pod name) - oc describe pods frontend ----- - - - -== oc diff -Diff the live version against a would-be applied version - -.Example usage -[source,bash,options="nowrap"] ----- - # Diff resources included in pod.json - oc diff -f pod.json - - # Diff file read from stdin - cat service.yaml | oc diff -f - ----- - - - -== oc edit -Edit a resource on the server - -.Example usage -[source,bash,options="nowrap"] ----- - # Edit the service named 'registry' - oc edit svc/registry - - # Use an alternative editor - KUBE_EDITOR="nano" oc edit svc/registry - - # Edit the job 'myjob' in JSON using the v1 API format - oc edit job.v1.batch/myjob -o json - - # Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation - oc edit deployment/mydeployment -o yaml --save-config - - # Edit the deployment/mydeployment's status subresource - oc edit deployment mydeployment --subresource='status' ----- - - - -== oc events -List events - -.Example usage -[source,bash,options="nowrap"] ----- - # List recent events in the default namespace. - oc events - - # List recent events in all namespaces. - oc events --all-namespaces - - # List recent events for the specified pod, then wait for more events and list them as they arrive. - oc events --for pod/web-pod-13je7 --watch - - # List recent events in given format. Supported ones, apart from default, are json and yaml. - oc events -oyaml - - # List recent only events in given event types - oc events --types=Warning,Normal ----- - - - -== oc exec -Execute a command in a container - -.Example usage -[source,bash,options="nowrap"] ----- - # Get output from running the 'date' command from pod mypod, using the first container by default - oc exec mypod -- date - - # Get output from running the 'date' command in ruby-container from pod mypod - oc exec mypod -c ruby-container -- date - - # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod - # and sends stdout/stderr from 'bash' back to the client - oc exec mypod -c ruby-container -i -t -- bash -il - - # List contents of /usr from the first container of pod mypod and sort by modification time - # If the command you want to execute in the pod has any flags in common (e.g. -i), - # you must use two dashes (--) to separate your command's flags/arguments - # Also note, do not surround your command and its flags/arguments with quotes - # unless that is how you would execute it normally (i.e., do ls -t /usr, not "ls -t /usr") - oc exec mypod -i -t -- ls -t /usr - - # Get output from running 'date' command from the first pod of the deployment mydeployment, using the first container by default - oc exec deploy/mydeployment -- date - - # Get output from running 'date' command from the first pod of the service myservice, using the first container by default - oc exec svc/myservice -- date ----- - - - -== oc explain -Get documentation for a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Get the documentation of the resource and its fields - oc explain pods - - # Get the documentation of a specific field of a resource - oc explain pods.spec.containers ----- - - - -== oc expose -Expose a replicated application as a service or route - -.Example usage -[source,bash,options="nowrap"] ----- - # Create a route based on service nginx. The new route will reuse nginx's labels - oc expose service nginx - - # Create a route and specify your own label and route name - oc expose service nginx -l name=myroute --name=fromdowntown - - # Create a route and specify a host name - oc expose service nginx --hostname=www.example.com - - # Create a route with a wildcard - oc expose service nginx --hostname=x.example.com --wildcard-policy=Subdomain - # This would be equivalent to *.example.com. NOTE: only hosts are matched by the wildcard; subdomains would not be included - - # Expose a deployment configuration as a service and use the specified port - oc expose dc ruby-hello-world --port=8080 - - # Expose a service as a route in the specified path - oc expose service nginx --path=/nginx ----- - - - -== oc extract -Extract secrets or config maps to disk - -.Example usage -[source,bash,options="nowrap"] ----- - # Extract the secret "test" to the current directory - oc extract secret/test - - # Extract the config map "nginx" to the /tmp directory - oc extract configmap/nginx --to=/tmp - - # Extract the config map "nginx" to STDOUT - oc extract configmap/nginx --to=- - - # Extract only the key "nginx.conf" from config map "nginx" to the /tmp directory - oc extract configmap/nginx --to=/tmp --keys=nginx.conf ----- - - - -== oc get -Display one or many resources - -.Example usage -[source,bash,options="nowrap"] ----- - # List all pods in ps output format - oc get pods - - # List all pods in ps output format with more information (such as node name) - oc get pods -o wide - - # List a single replication controller with specified NAME in ps output format - oc get replicationcontroller web - - # List deployments in JSON output format, in the "v1" version of the "apps" API group - oc get deployments.v1.apps -o json - - # List a single pod in JSON output format - oc get -o json pod web-pod-13je7 - - # List a pod identified by type and name specified in "pod.yaml" in JSON output format - oc get -f pod.yaml -o json - - # List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml - oc get -k dir/ - - # Return only the phase value of the specified pod - oc get -o template pod/web-pod-13je7 --template={{.status.phase}} - - # List resource information in custom columns - oc get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0].name,IMAGE:.spec.containers[0].image - - # List all replication controllers and services together in ps output format - oc get rc,services - - # List one or more resources by their type and names - oc get rc/web service/frontend pods/web-pod-13je7 - - # List status subresource for a single pod. - oc get pod web-pod-13je7 --subresource status ----- - - - -== oc image append -Add layers to images and push them to a registry - -.Example usage -[source,bash,options="nowrap"] ----- - # Remove the entrypoint on the mysql:latest image - oc image append --from mysql:latest --to myregistry.com/myimage:latest --image '{"Entrypoint":null}' - - # Add a new layer to the image - oc image append --from mysql:latest --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to the image and store the result on disk - # This results in $(pwd)/v2/mysql/blobs,manifests - oc image append --from mysql:latest --to file://mysql:local layer.tar.gz - - # Add a new layer to the image and store the result on disk in a designated directory - # This will result in $(pwd)/mysql-local/v2/mysql/blobs,manifests - oc image append --from mysql:latest --to file://mysql:local --dir mysql-local layer.tar.gz - - # Add a new layer to an image that is stored on disk (~/mysql-local/v2/image exists) - oc image append --from-dir ~/mysql-local --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to an image that was mirrored to the current directory on disk ($(pwd)/v2/image exists) - oc image append --from-dir v2 --to myregistry.com/myimage:latest layer.tar.gz - - # Add a new layer to a multi-architecture image for an os/arch that is different from the system's os/arch - # Note: Wildcard filter is not supported with append. Pass a single os/arch to append - oc image append --from docker.io/library/busybox:latest --filter-by-os=linux/s390x --to myregistry.com/myimage:latest layer.tar.gz ----- - - - -== oc image extract -Copy files from an image to the file system - -.Example usage -[source,bash,options="nowrap"] ----- - # Extract the busybox image into the current directory - oc image extract docker.io/library/busybox:latest - - # Extract the busybox image into a designated directory (must exist) - oc image extract docker.io/library/busybox:latest --path /:/tmp/busybox - - # Extract the busybox image into the current directory for linux/s390x platform - # Note: Wildcard filter is not supported with extract. Pass a single os/arch to extract - oc image extract docker.io/library/busybox:latest --filter-by-os=linux/s390x - - # Extract a single file from the image into the current directory - oc image extract docker.io/library/centos:7 --path /bin/bash:. - - # Extract all .repo files from the image's /etc/yum.repos.d/ folder into the current directory - oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:. - - # Extract all .repo files from the image's /etc/yum.repos.d/ folder into a designated directory (must exist) - # This results in /tmp/yum.repos.d/*.repo on local system - oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:/tmp/yum.repos.d - - # Extract an image stored on disk into the current directory ($(pwd)/v2/busybox/blobs,manifests exists) - # --confirm is required because the current directory is not empty - oc image extract file://busybox:local --confirm - - # Extract an image stored on disk in a directory other than $(pwd)/v2 into the current directory - # --confirm is required because the current directory is not empty ($(pwd)/busybox-mirror-dir/v2/busybox exists) - oc image extract file://busybox:local --dir busybox-mirror-dir --confirm - - # Extract an image stored on disk in a directory other than $(pwd)/v2 into a designated directory (must exist) - oc image extract file://busybox:local --dir busybox-mirror-dir --path /:/tmp/busybox - - # Extract the last layer in the image - oc image extract docker.io/library/centos:7[-1] - - # Extract the first three layers of the image - oc image extract docker.io/library/centos:7[:3] - - # Extract the last three layers of the image - oc image extract docker.io/library/centos:7[-3:] ----- - - - -== oc image info -Display information about an image - -.Example usage -[source,bash,options="nowrap"] ----- - # Show information about an image - oc image info quay.io/openshift/cli:latest - - # Show information about images matching a wildcard - oc image info quay.io/openshift/cli:4.* - - # Show information about a file mirrored to disk under DIR - oc image info --dir=DIR file://library/busybox:latest - - # Select which image from a multi-OS image to show - oc image info library/busybox:latest --filter-by-os=linux/arm64 ----- - - - -== oc image mirror -Mirror images from one repository to another - -.Example usage -[source,bash,options="nowrap"] ----- - # Copy image to another tag - oc image mirror myregistry.com/myimage:latest myregistry.com/myimage:stable - - # Copy image to another registry - oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable - - # Copy all tags starting with mysql to the destination repository - oc image mirror myregistry.com/myimage:mysql* docker.io/myrepository/myimage - - # Copy image to disk, creating a directory structure that can be served as a registry - oc image mirror myregistry.com/myimage:latest file://myrepository/myimage:latest - - # Copy image to S3 (pull from .s3.amazonaws.com/image:latest) - oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image:latest - - # Copy image to S3 without setting a tag (pull via @) - oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image - - # Copy image to multiple locations - oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable \ - docker.io/myrepository/myimage:dev - - # Copy multiple images - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - myregistry.com/myimage:new=myregistry.com/other:target - - # Copy manifest list of a multi-architecture image, even if only a single image is found - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --keep-manifest-list=true - - # Copy specific os/arch manifest of a multi-architecture image - # Run 'oc image info myregistry.com/myimage:latest' to see available os/arch for multi-arch images - # Note that with multi-arch images, this results in a new manifest list digest that includes only - # the filtered manifests - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --filter-by-os=os/arch - - # Copy all os/arch manifests of a multi-architecture image - # Run 'oc image info myregistry.com/myimage:latest' to see list of os/arch manifests that will be mirrored - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --keep-manifest-list=true - - # Note the above command is equivalent to - oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ - --filter-by-os=.* ----- - - - -== oc kustomize -Build a kustomization target from a directory or URL. - -.Example usage -[source,bash,options="nowrap"] ----- - # Build the current working directory - oc kustomize - - # Build some shared configuration directory - oc kustomize /home/config/production - - # Build from github - oc kustomize https://github.com/kubernetes-sigs/kustomize.git/examples/helloWorld?ref=v1.0.6 ----- - - - -== oc label -Update the labels on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Update pod 'foo' with the label 'unhealthy' and the value 'true' - oc label pods foo unhealthy=true - - # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value - oc label --overwrite pods foo status=unhealthy - - # Update all pods in the namespace - oc label pods --all status=unhealthy - - # Update a pod identified by the type and name in "pod.json" - oc label -f pod.json status=unhealthy - - # Update pod 'foo' only if the resource is unchanged from version 1 - oc label pods foo status=unhealthy --resource-version=1 - - # Update pod 'foo' by removing a label named 'bar' if it exists - # Does not require the --overwrite flag - oc label pods foo bar- ----- - - - -== oc logs -Print the logs for a container in a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Start streaming the logs of the most recent build of the openldap build config - oc logs -f bc/openldap - - # Start streaming the logs of the latest deployment of the mysql deployment config - oc logs -f dc/mysql - - # Get the logs of the first deployment for the mysql deployment config. Note that logs - # from older deployments may not exist either because the deployment was successful - # or due to deployment pruning or manual deletion of the deployment - oc logs --version=1 dc/mysql - - # Return a snapshot of ruby-container logs from pod backend - oc logs backend -c ruby-container - - # Start streaming of ruby-container logs from pod backend - oc logs -f pod/backend -c ruby-container ----- - - - -== oc observe -Observe changes to resources and react to them (experimental) - -.Example usage -[source,bash,options="nowrap"] ----- - # Observe changes to services - oc observe services - - # Observe changes to services, including the clusterIP and invoke a script for each - oc observe services --template '{ .spec.clusterIP }' -- register_dns.sh - - # Observe changes to services filtered by a label selector - oc observe namespaces -l regist-dns=true --template '{ .spec.clusterIP }' -- register_dns.sh ----- - - - -== oc patch -Update fields of a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Partially update a node using a strategic merge patch, specifying the patch as JSON - oc patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' - - # Partially update a node using a strategic merge patch, specifying the patch as YAML - oc patch node k8s-node-1 -p $'spec:\n unschedulable: true' - - # Partially update a node identified by the type and name specified in "node.json" using strategic merge patch - oc patch -f node.json -p '{"spec":{"unschedulable":true}}' - - # Update a container's image; spec.containers[*].name is required because it's a merge key - oc patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}' - - # Update a container's image using a JSON patch with positional arrays - oc patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]' - - # Update a deployment's replicas through the scale subresource using a merge patch. - oc patch deployment nginx-deployment --subresource='scale' --type='merge' -p '{"spec":{"replicas":2}}' ----- - - - -== oc plugin list -List all visible plugin executables on a user's PATH - -.Example usage -[source,bash,options="nowrap"] ----- - # List all available plugins - oc plugin list ----- - - - -== oc policy add-role-to-user -Add a role to users or service accounts for the current project - -.Example usage -[source,bash,options="nowrap"] ----- - # Add the 'view' role to user1 for the current project - oc policy add-role-to-user view user1 - - # Add the 'edit' role to serviceaccount1 for the current project - oc policy add-role-to-user edit -z serviceaccount1 ----- - - - -== oc policy scc-review -Check which service account can create a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Check whether service accounts sa1 and sa2 can admit a pod with a template pod spec specified in my_resource.yaml - # Service Account specified in myresource.yaml file is ignored - oc policy scc-review -z sa1,sa2 -f my_resource.yaml - - # Check whether service accounts system:serviceaccount:bob:default can admit a pod with a template pod spec specified in my_resource.yaml - oc policy scc-review -z system:serviceaccount:bob:default -f my_resource.yaml - - # Check whether the service account specified in my_resource_with_sa.yaml can admit the pod - oc policy scc-review -f my_resource_with_sa.yaml - - # Check whether the default service account can admit the pod; default is taken since no service account is defined in myresource_with_no_sa.yaml - oc policy scc-review -f myresource_with_no_sa.yaml ----- - - - -== oc policy scc-subject-review -Check whether a user or a service account can create a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Check whether user bob can create a pod specified in myresource.yaml - oc policy scc-subject-review -u bob -f myresource.yaml - - # Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml - oc policy scc-subject-review -u bob -g projectAdmin -f myresource.yaml - - # Check whether a service account specified in the pod template spec in myresourcewithsa.yaml can create the pod - oc policy scc-subject-review -f myresourcewithsa.yaml ----- - - - -== oc port-forward -Forward one or more local ports to a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod - oc port-forward pod/mypod 5000 6000 - - # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment - oc port-forward deployment/mydeployment 5000 6000 - - # Listen on port 8443 locally, forwarding to the targetPort of the service's port named "https" in a pod selected by the service - oc port-forward service/myservice 8443:https - - # Listen on port 8888 locally, forwarding to 5000 in the pod - oc port-forward pod/mypod 8888:5000 - - # Listen on port 8888 on all addresses, forwarding to 5000 in the pod - oc port-forward --address 0.0.0.0 pod/mypod 8888:5000 - - # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod - oc port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 - - # Listen on a random port locally, forwarding to 5000 in the pod - oc port-forward pod/mypod :5000 ----- - - - -== oc proxy -Run a proxy to the Kubernetes API server - -.Example usage -[source,bash,options="nowrap"] ----- - # To proxy all of the Kubernetes API and nothing else - oc proxy --api-prefix=/ - - # To proxy only part of the Kubernetes API and also some static files - # You can get pods info with 'curl localhost:8001/api/v1/pods' - oc proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/ - - # To proxy the entire Kubernetes API at a different root - # You can get pods info with 'curl localhost:8001/custom/api/v1/pods' - oc proxy --api-prefix=/custom/ - - # Run a proxy to the Kubernetes API server on port 8011, serving static content from ./local/www/ - oc proxy --port=8011 --www=./local/www/ - - # Run a proxy to the Kubernetes API server on an arbitrary local port - # The chosen port for the server will be output to stdout - oc proxy --port=0 - - # Run a proxy to the Kubernetes API server, changing the API prefix to k8s-api - # This makes e.g. the pods API available at localhost:8001/k8s-api/v1/pods/ - oc proxy --api-prefix=/k8s-api ----- - - - -== oc rollback -Revert part of an application back to a previous deployment - -.Example usage -[source,bash,options="nowrap"] ----- - # Perform a rollback to the last successfully completed deployment for a deployment config - oc rollback frontend - - # See what a rollback to version 3 will look like, but do not perform the rollback - oc rollback frontend --to-version=3 --dry-run - - # Perform a rollback to a specific deployment - oc rollback frontend-2 - - # Perform the rollback manually by piping the JSON of the new config back to oc - oc rollback frontend -o json | oc replace dc/frontend -f - - - # Print the updated deployment configuration in JSON format instead of performing the rollback - oc rollback frontend -o json ----- - - - -== oc rollout cancel -Cancel the in-progress deployment - -.Example usage -[source,bash,options="nowrap"] ----- - # Cancel the in-progress deployment based on 'nginx' - oc rollout cancel dc/nginx ----- - - - -== oc rollout history -View rollout history - -.Example usage -[source,bash,options="nowrap"] ----- - # View the rollout history of a deployment - oc rollout history dc/nginx - - # View the details of deployment revision 3 - oc rollout history dc/nginx --revision=3 ----- - - - -== oc rollout latest -Start a new rollout for a deployment config with the latest state from its triggers - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a new rollout based on the latest images defined in the image change triggers - oc rollout latest dc/nginx - - # Print the rolled out deployment config - oc rollout latest dc/nginx -o json ----- - - - -== oc rollout pause -Mark the provided resource as paused - -.Example usage -[source,bash,options="nowrap"] ----- - # Mark the nginx deployment as paused. Any current state of - # the deployment will continue its function, new updates to the deployment will not - # have an effect as long as the deployment is paused - oc rollout pause dc/nginx ----- - - - -== oc rollout restart -Restart a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Restart a deployment - oc rollout restart deployment/nginx - - # Restart a daemon set - oc rollout restart daemonset/abc - - # Restart deployments with the app=nginx label - oc rollout restart deployment --selector=app=nginx ----- - - - -== oc rollout resume -Resume a paused resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Resume an already paused deployment - oc rollout resume dc/nginx ----- - - - -== oc rollout retry -Retry the latest failed rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Retry the latest failed deployment based on 'frontend' - # The deployer pod and any hook pods are deleted for the latest failed deployment - oc rollout retry dc/frontend ----- - - - -== oc rollout status -Show the status of the rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Watch the status of the latest rollout - oc rollout status dc/nginx ----- - - - -== oc rollout undo -Undo a previous rollout - -.Example usage -[source,bash,options="nowrap"] ----- - # Roll back to the previous deployment - oc rollout undo dc/nginx - - # Roll back to deployment revision 3. The replication controller for that version must exist - oc rollout undo dc/nginx --to-revision=3 ----- - - - -== oc rsh -Start a shell session in a container - -.Example usage -[source,bash,options="nowrap"] ----- - # Open a shell session on the first container in pod 'foo' - oc rsh foo - - # Open a shell session on the first container in pod 'foo' and namespace 'bar' - # (Note that oc client specific arguments must come before the resource name and its arguments) - oc rsh -n bar foo - - # Run the command 'cat /etc/resolv.conf' inside pod 'foo' - oc rsh foo cat /etc/resolv.conf - - # See the configuration of your internal registry - oc rsh dc/docker-registry cat config.yml - - # Open a shell session on the container named 'index' inside a pod of your job - oc rsh -c index job/sheduled ----- - - - -== oc rsync -Copy files between a local file system and a pod - -.Example usage -[source,bash,options="nowrap"] ----- - # Synchronize a local directory with a pod directory - oc rsync ./local/dir/ POD:/remote/dir - - # Synchronize a pod directory with a local directory - oc rsync POD:/remote/dir/ ./local/dir ----- - - - -== oc run -Run a particular image on the cluster - -.Example usage -[source,bash,options="nowrap"] ----- - # Start a nginx pod - oc run nginx --image=nginx - - # Start a hazelcast pod and let the container expose port 5701 - oc run hazelcast --image=hazelcast/hazelcast --port=5701 - - # Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container - oc run hazelcast --image=hazelcast/hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default" - - # Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container - oc run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod" - - # Dry run; print the corresponding API objects without creating them - oc run nginx --image=nginx --dry-run=client - - # Start a nginx pod, but overload the spec with a partial set of values parsed from JSON - oc run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }' - - # Start a busybox pod and keep it in the foreground, don't restart it if it exits - oc run -i -t busybox --image=busybox --restart=Never - - # Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command - oc run nginx --image=nginx -- ... - - # Start the nginx pod using a different command and custom arguments - oc run nginx --image=nginx --command -- ... ----- - - - -== oc scale -Set a new size for a deployment, replica set, or replication controller - -.Example usage -[source,bash,options="nowrap"] ----- - # Scale a replica set named 'foo' to 3 - oc scale --replicas=3 rs/foo - - # Scale a resource identified by type and name specified in "foo.yaml" to 3 - oc scale --replicas=3 -f foo.yaml - - # If the deployment named mysql's current size is 2, scale mysql to 3 - oc scale --current-replicas=2 --replicas=3 deployment/mysql - - # Scale multiple replication controllers - oc scale --replicas=5 rc/foo rc/bar rc/baz - - # Scale stateful set named 'web' to 3 - oc scale --replicas=3 statefulset/web ----- - - - -== oc secrets link -Link secrets to a service account - -.Example usage -[source,bash,options="nowrap"] ----- - # Add an image pull secret to a service account to automatically use it for pulling pod images - oc secrets link serviceaccount-name pull-secret --for=pull - - # Add an image pull secret to a service account to automatically use it for both pulling and pushing build images - oc secrets link builder builder-image-secret --for=pull,mount ----- - - - -== oc secrets unlink -Detach secrets from a service account - -.Example usage -[source,bash,options="nowrap"] ----- - # Unlink a secret currently associated with a service account - oc secrets unlink serviceaccount-name secret-name another-secret-name ... ----- - - - -== oc set data -Update the data within a config map or secret - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the 'password' key of a secret - oc set data secret/foo password=this_is_secret - - # Remove the 'password' key from a secret - oc set data secret/foo password- - - # Update the 'haproxy.conf' key of a config map from a file on disk - oc set data configmap/bar --from-file=../haproxy.conf - - # Update a secret with the contents of a directory, one key per file - oc set data secret/foo --from-file=secret-dir ----- - - - -== oc set env -Update environment variables on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Update deployment config 'myapp' with a new environment variable - oc set env dc/myapp STORAGE_DIR=/local - - # List the environment variables defined on a build config 'sample-build' - oc set env bc/sample-build --list - - # List the environment variables defined on all pods - oc set env pods --all --list - - # Output modified build config in YAML - oc set env bc/sample-build STORAGE_DIR=/data -o yaml - - # Update all containers in all replication controllers in the project to have ENV=prod - oc set env rc --all ENV=prod - - # Import environment from a secret - oc set env --from=secret/mysecret dc/myapp - - # Import environment from a config map with a prefix - oc set env --from=configmap/myconfigmap --prefix=MYSQL_ dc/myapp - - # Remove the environment variable ENV from container 'c1' in all deployment configs - oc set env dc --all --containers="c1" ENV- - - # Remove the environment variable ENV from a deployment config definition on disk and - # update the deployment config on the server - oc set env -f dc.json ENV- - - # Set some of the local shell environment into a deployment config on the server - oc set env | grep RAILS_ | oc env -e - dc/myapp ----- - - - -== oc set image -Update the image of a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Set a deployment configs's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'. - oc set image dc/nginx busybox=busybox nginx=nginx:1.9.1 - - # Set a deployment configs's app container image to the image referenced by the imagestream tag 'openshift/ruby:2.3'. - oc set image dc/myapp app=openshift/ruby:2.3 --source=imagestreamtag - - # Update all deployments' and rc's nginx container's image to 'nginx:1.9.1' - oc set image deployments,rc nginx=nginx:1.9.1 --all - - # Update image of all containers of daemonset abc to 'nginx:1.9.1' - oc set image daemonset abc *=nginx:1.9.1 - - # Print result (in yaml format) of updating nginx container image from local file, without hitting the server - oc set image -f path/to/file.yaml nginx=nginx:1.9.1 --local -o yaml ----- - - - -== oc set image-lookup -Change how images are resolved when deploying applications - -.Example usage -[source,bash,options="nowrap"] ----- - # Print all of the image streams and whether they resolve local names - oc set image-lookup - - # Use local name lookup on image stream mysql - oc set image-lookup mysql - - # Force a deployment to use local name lookup - oc set image-lookup deploy/mysql - - # Show the current status of the deployment lookup - oc set image-lookup deploy/mysql --list - - # Disable local name lookup on image stream mysql - oc set image-lookup mysql --enabled=false - - # Set local name lookup on all image streams - oc set image-lookup --all ----- - - - -== oc set probe -Update a probe on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # Clear both readiness and liveness probes off all containers - oc set probe dc/myapp --remove --readiness --liveness - - # Set an exec action as a liveness probe to run 'echo ok' - oc set probe dc/myapp --liveness -- echo ok - - # Set a readiness probe to try to open a TCP socket on 3306 - oc set probe rc/mysql --readiness --open-tcp=3306 - - # Set an HTTP startup probe for port 8080 and path /healthz over HTTP on the pod IP - oc set probe dc/webapp --startup --get-url=http://:8080/healthz - - # Set an HTTP readiness probe for port 8080 and path /healthz over HTTP on the pod IP - oc set probe dc/webapp --readiness --get-url=http://:8080/healthz - - # Set an HTTP readiness probe over HTTPS on 127.0.0.1 for a hostNetwork pod - oc set probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats - - # Set only the initial-delay-seconds field on all deployments - oc set probe dc --all --readiness --initial-delay-seconds=30 ----- - - - -== oc set resources -Update resource requests/limits on objects with pod templates - -.Example usage -[source,bash,options="nowrap"] ----- - # Set a deployments nginx container CPU limits to "200m and memory to 512Mi" - oc set resources deployment nginx -c=nginx --limits=cpu=200m,memory=512Mi - - # Set the resource request and limits for all containers in nginx - oc set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi - - # Remove the resource requests for resources on containers in nginx - oc set resources deployment nginx --limits=cpu=0,memory=0 --requests=cpu=0,memory=0 - - # Print the result (in YAML format) of updating nginx container limits locally, without hitting the server - oc set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml ----- - - - -== oc set route-backends -Update the backends for a route - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the backends on the route 'web' - oc set route-backends web - - # Set two backend services on route 'web' with 2/3rds of traffic going to 'a' - oc set route-backends web a=2 b=1 - - # Increase the traffic percentage going to b by 10%% relative to a - oc set route-backends web --adjust b=+10%% - - # Set traffic percentage going to b to 10%% of the traffic going to a - oc set route-backends web --adjust b=10%% - - # Set weight of b to 10 - oc set route-backends web --adjust b=10 - - # Set the weight to all backends to zero - oc set route-backends web --zero ----- - - - -== oc set selector -Set the selector on a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Set the labels and selector before creating a deployment/service pair. - oc create service clusterip my-svc --clusterip="None" -o yaml --dry-run | oc set selector --local -f - 'environment=qa' -o yaml | oc create -f - - oc create deployment my-dep -o yaml --dry-run | oc label --local -f - environment=qa -o yaml | oc create -f - ----- - - - -== oc set serviceaccount -Update the service account of a resource - -.Example usage -[source,bash,options="nowrap"] ----- - # Set deployment nginx-deployment's service account to serviceaccount1 - oc set serviceaccount deployment nginx-deployment serviceaccount1 - - # Print the result (in YAML format) of updated nginx deployment with service account from a local file, without hitting the API server - oc set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run -o yaml ----- - - - -== oc set subject -Update the user, group, or service account in a role binding or cluster role binding - -.Example usage -[source,bash,options="nowrap"] ----- - # Update a cluster role binding for serviceaccount1 - oc set subject clusterrolebinding admin --serviceaccount=namespace:serviceaccount1 - - # Update a role binding for user1, user2, and group1 - oc set subject rolebinding admin --user=user1 --user=user2 --group=group1 - - # Print the result (in YAML format) of updating role binding subjects locally, without hitting the server - oc create rolebinding admin --role=admin --user=admin -o yaml --dry-run | oc set subject --local -f - --user=foo -o yaml ----- - - - -== oc set volumes -Update volumes on a pod template - -.Example usage -[source,bash,options="nowrap"] ----- - # List volumes defined on all deployment configs in the current project - oc set volume dc --all - - # Add a new empty dir volume to deployment config (dc) 'myapp' mounted under - # /var/lib/myapp - oc set volume dc/myapp --add --mount-path=/var/lib/myapp - - # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1' - oc set volume dc/myapp --add --name=v1 -t pvc --claim-name=pvc1 --overwrite - - # Remove volume 'v1' from deployment config 'myapp' - oc set volume dc/myapp --remove --name=v1 - - # Create a new persistent volume claim that overwrites an existing volume 'v1' - oc set volume dc/myapp --add --name=v1 -t pvc --claim-size=1G --overwrite - - # Change the mount point for volume 'v1' to /data - oc set volume dc/myapp --add --name=v1 -m /data --overwrite - - # Modify the deployment config by removing volume mount "v1" from container "c1" - # (and by removing the volume "v1" if no other containers have volume mounts that reference it) - oc set volume dc/myapp --remove --name=v1 --containers=c1 - - # Add new volume based on a more complex volume source (AWS EBS, GCE PD, - # Ceph, Gluster, NFS, ISCSI, ...) - oc set volume dc/myapp --add -m /data --source= ----- - - - -== oc tag -Tag existing images into image streams - -.Example usage -[source,bash,options="nowrap"] ----- - # Tag the current image for the image stream 'openshift/ruby' and tag '2.0' into the image stream 'yourproject/ruby with tag 'tip' - oc tag openshift/ruby:2.0 yourproject/ruby:tip - - # Tag a specific image - oc tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip - - # Tag an external container image - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip - - # Tag an external container image and request pullthrough for it - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --reference-policy=local - - # Tag an external container image and include the full manifest list - oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --import-mode=PreserveOriginal - - # Remove the specified spec tag from an image stream - oc tag openshift/origin-control-plane:latest -d ----- - - - -== oc version -Print the client and server version information - -.Example usage -[source,bash,options="nowrap"] ----- - # Print the OpenShift client, kube-apiserver, and openshift-apiserver version information for the current context - oc version - - # Print the OpenShift client, kube-apiserver, and openshift-apiserver version numbers for the current context - oc version --short - - # Print the OpenShift client version information for the current context - oc version --client ----- - - - -== oc wait -Experimental: Wait for a specific condition on one or many resources - -.Example usage -[source,bash,options="nowrap"] ----- - # Wait for the pod "busybox1" to contain the status condition of type "Ready" - oc wait --for=condition=Ready pod/busybox1 - - # The default value of status condition is true; you can wait for other targets after an equal delimiter (compared after Unicode simple case folding, which is a more general form of case-insensitivity): - oc wait --for=condition=Ready=false pod/busybox1 - - # Wait for the pod "busybox1" to contain the status phase to be "Running". - oc wait --for=jsonpath='{.status.phase}'=Running pod/busybox1 - - # Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command - oc delete pod/busybox1 - oc wait --for=delete pod/busybox1 --timeout=60s ----- - - diff --git a/modules/microshift-oc-by-example-content.adoc b/modules/microshift-oc-by-example-content.adoc index e889333c4f..aae63daafd 100644 --- a/modules/microshift-oc-by-example-content.adoc +++ b/modules/microshift-oc-by-example-content.adoc @@ -1,12 +1,80 @@ -// Module included in the following assemblies: -// -//* microshift-oc-cli-commands-list/microshift-oc-by-example-content.adoc +// NOTE: The contents of this file are auto-generated +// This template is for non-admin (not 'oc adm ...') commands +// Uses 'source,bash' for proper syntax highlighting for comments in examples :_content-type: REFERENCE -[id="microshift-oc-by-example-content_{context}"] -= Brief oc commands list for {product-title} +[id="microshift-oc-cli-developer_{context}"] += OpenShift CLI (oc) developer commands + +//NOTE: this is the autogenerated version, one command edited out + +== oc annotate +Update the annotations on a resource + +.Example usage +[source,bash,options="nowrap"] +---- + # Update pod 'foo' with the annotation 'description' and the value 'my frontend' + # If the same annotation is set multiple times, only the last value will be applied + oc annotate pods foo description='my frontend' + + # Update a pod identified by type and name in "pod.json" + oc annotate -f pod.json description='my frontend' + + # Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value + oc annotate --overwrite pods foo description='my frontend running nginx' + + # Update all pods in the namespace + oc annotate pods --all description='my frontend running nginx' + + # Update pod 'foo' only if the resource is unchanged from version 1 + oc annotate pods foo description='my frontend running nginx' --resource-version=1 + + # Update pod 'foo' by removing an annotation named 'description' if it exists + # Does not require the --overwrite flag + oc annotate pods foo description- +---- + + + +== oc api-resources +Print the supported API resources on the server + +.Example usage +[source,bash,options="nowrap"] +---- + # Print the supported API resources + oc api-resources + + # Print the supported API resources with more information + oc api-resources -o wide + + # Print the supported API resources sorted by a column + oc api-resources --sort-by=name + + # Print the supported namespaced resources + oc api-resources --namespaced=true + + # Print the supported non-namespaced resources + oc api-resources --namespaced=false + + # Print the supported API resources with a specific APIGroup + oc api-resources --api-group=rbac.authorization.k8s.io +---- + + + +== oc api-versions +Print the supported API versions on the server, in the form of "group/version" + +.Example usage +[source,bash,options="nowrap"] +---- + # Print the supported API versions + oc api-versions +---- + -The following lists a few examples of `oc` commands you can use to administer, deploy, and observe a {product-title} node. == oc apply Apply a configuration to a resource by file name or stdin @@ -31,10 +99,990 @@ Apply a configuration to a resource by file name or stdin oc apply --prune -f manifest.yaml -l app=nginx # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file - oc apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/ConfigMap + oc apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/ConfigMap ---- -[id="oc-delete"_{context}] + + +== oc apply edit-last-applied +Edit latest last-applied-configuration annotations of a resource/object + +.Example usage +[source,bash,options="nowrap"] +---- + # Edit the last-applied-configuration annotations by type/name in YAML + oc apply edit-last-applied deployment/nginx + + # Edit the last-applied-configuration annotations by file in JSON + oc apply edit-last-applied -f deploy.yaml -o json +---- + + + +== oc apply set-last-applied +Set the last-applied-configuration annotation on a live object to match the contents of a file + +.Example usage +[source,bash,options="nowrap"] +---- + # Set the last-applied-configuration of a resource to match the contents of a file + oc apply set-last-applied -f deploy.yaml + + # Execute set-last-applied against each configuration file in a directory + oc apply set-last-applied -f path/ + + # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist + oc apply set-last-applied -f deploy.yaml --create-annotation=true +---- + + + +== oc apply view-last-applied +View the latest last-applied-configuration annotations of a resource/object + +.Example usage +[source,bash,options="nowrap"] +---- + # View the last-applied-configuration annotations by type/name in YAML + oc apply view-last-applied deployment/nginx + + # View the last-applied-configuration annotations by file in JSON + oc apply view-last-applied -f deploy.yaml -o json +---- + + + +== oc attach +Attach to a running container + +.Example usage +[source,bash,options="nowrap"] +---- + # Get output from running pod mypod; use the 'oc.kubernetes.io/default-container' annotation + # for selecting the container to be attached or the first container in the pod will be chosen + oc attach mypod + + # Get output from ruby-container from pod mypod + oc attach mypod -c ruby-container + + # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod + # and sends stdout/stderr from 'bash' back to the client + oc attach mypod -c ruby-container -i -t + + # Get output from the first pod of a replica set named nginx + oc attach rs/nginx +---- + + + +== oc auth can-i +Check whether an action is allowed + +.Example usage +[source,bash,options="nowrap"] +---- + # Check to see if I can create pods in any namespace + oc auth can-i create pods --all-namespaces + + # Check to see if I can list deployments in my current namespace + oc auth can-i list deployments.apps + + # Check to see if I can do everything in my current namespace ("*" means all) + oc auth can-i '*' '*' + + # Check to see if I can get the job named "bar" in namespace "foo" + oc auth can-i list jobs.batch/bar -n foo + + # Check to see if I can read pod logs + oc auth can-i get pods --subresource=log + + # Check to see if I can access the URL /logs/ + oc auth can-i get /logs/ + + # List all allowed actions in namespace "foo" + oc auth can-i --list --namespace=foo +---- + + + +== oc auth reconcile +Reconciles rules for RBAC role, role binding, cluster role, and cluster role binding objects + +.Example usage +[source,bash,options="nowrap"] +---- + # Reconcile RBAC resources from a file + oc auth reconcile -f my-rbac-rules.yaml +---- + +//== oc autoscale +//removed, does not apply to MicroShift + +== oc cluster-info +Display cluster information + +.Example usage +[source,bash,options="nowrap"] +---- + # Print the address of the control plane and cluster services + oc cluster-info +---- + + + +== oc cluster-info dump +Dump relevant information for debugging and diagnosis + +.Example usage +[source,bash,options="nowrap"] +---- + # Dump current cluster state to stdout + oc cluster-info dump + + # Dump current cluster state to /path/to/cluster-state + oc cluster-info dump --output-directory=/path/to/cluster-state + + # Dump all namespaces to stdout + oc cluster-info dump --all-namespaces + + # Dump a set of namespaces to /path/to/cluster-state + oc cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state +---- + + + +== oc completion +Output shell completion code for the specified shell (bash, zsh, fish, or powershell) + +.Example usage +[source,bash,options="nowrap"] +---- + # Installing bash completion on macOS using homebrew + ## If running Bash 3.2 included with macOS + brew install bash-completion + ## or, if running Bash 4.1+ + brew install bash-completion@2 + ## If oc is installed via homebrew, this should start working immediately + ## If you've installed via other means, you may need add the completion to your completion directory + oc completion bash > $(brew --prefix)/etc/bash_completion.d/oc + + + # Installing bash completion on Linux + ## If bash-completion is not installed on Linux, install the 'bash-completion' package + ## via your distribution's package manager. + ## Load the oc completion code for bash into the current shell + source <(oc completion bash) + ## Write bash completion code to a file and source it from .bash_profile + oc completion bash > ~/.kube/completion.bash.inc + printf " + # Kubectl shell completion + source '$HOME/.kube/completion.bash.inc' + " >> $HOME/.bash_profile + source $HOME/.bash_profile + + # Load the oc completion code for zsh[1] into the current shell + source <(oc completion zsh) + # Set the oc completion code for zsh[1] to autoload on startup + oc completion zsh > "${fpath[1]}/_oc" + + + # Load the oc completion code for fish[2] into the current shell + oc completion fish | source + # To load completions for each session, execute once: + oc completion fish > ~/.config/fish/completions/oc.fish + + # Load the oc completion code for powershell into the current shell + oc completion powershell | Out-String | Invoke-Expression + # Set oc completion code for powershell to run on startup + ## Save completion code to a script and execute in the profile + oc completion powershell > $HOME\.kube\completion.ps1 + Add-Content $PROFILE "$HOME\.kube\completion.ps1" + ## Execute completion code in the profile + Add-Content $PROFILE "if (Get-Command oc -ErrorAction SilentlyContinue) { + oc completion powershell | Out-String | Invoke-Expression + }" + ## Add completion code directly to the $PROFILE script + oc completion powershell >> $PROFILE +---- + + + +== oc config current-context +Display the current-context + +.Example usage +[source,bash,options="nowrap"] +---- + # Display the current-context + oc config current-context +---- + + + +== oc config delete-cluster +Delete the specified cluster from the kubeconfig + +.Example usage +[source,bash,options="nowrap"] +---- + # Delete the minikube cluster + oc config delete-cluster minikube +---- + + + +== oc config delete-context +Delete the specified context from the kubeconfig + +.Example usage +[source,bash,options="nowrap"] +---- + # Delete the context for the minikube cluster + oc config delete-context minikube +---- + + + +== oc config delete-user +Delete the specified user from the kubeconfig + +.Example usage +[source,bash,options="nowrap"] +---- + # Delete the minikube user + oc config delete-user minikube +---- + + + +== oc config get-clusters +Display clusters defined in the kubeconfig + +.Example usage +[source,bash,options="nowrap"] +---- + # List the clusters that oc knows about + oc config get-clusters +---- + + + +== oc config get-contexts +Describe one or many contexts + +.Example usage +[source,bash,options="nowrap"] +---- + # List all the contexts in your kubeconfig file + oc config get-contexts + + # Describe one context in your kubeconfig file + oc config get-contexts my-context +---- + + + +== oc config get-users +Display users defined in the kubeconfig + +.Example usage +[source,bash,options="nowrap"] +---- + # List the users that oc knows about + oc config get-users +---- + + + +== oc config rename-context +Rename a context from the kubeconfig file + +.Example usage +[source,bash,options="nowrap"] +---- + # Rename the context 'old-name' to 'new-name' in your kubeconfig file + oc config rename-context old-name new-name +---- + + + +== oc config set +Set an individual value in a kubeconfig file + +.Example usage +[source,bash,options="nowrap"] +---- + # Set the server field on the my-cluster cluster to https://1.2.3.4 + oc config set clusters.my-cluster.server https://1.2.3.4 + + # Set the certificate-authority-data field on the my-cluster cluster + oc config set clusters.my-cluster.certificate-authority-data $(echo "cert_data_here" | base64 -i -) + + # Set the cluster field in the my-context context to my-cluster + oc config set contexts.my-context.cluster my-cluster + + # Set the client-key-data field in the cluster-admin user using --set-raw-bytes option + oc config set users.cluster-admin.client-key-data cert_data_here --set-raw-bytes=true +---- + + + +== oc config set-cluster +Set a cluster entry in kubeconfig + +.Example usage +[source,bash,options="nowrap"] +---- + # Set only the server field on the e2e cluster entry without touching other values + oc config set-cluster e2e --server=https://1.2.3.4 + + # Embed certificate authority data for the e2e cluster entry + oc config set-cluster e2e --embed-certs --certificate-authority=~/.kube/e2e/kubernetes.ca.crt + + # Disable cert checking for the e2e cluster entry + oc config set-cluster e2e --insecure-skip-tls-verify=true + + # Set custom TLS server name to use for validation for the e2e cluster entry + oc config set-cluster e2e --tls-server-name=my-cluster-name + + # Set proxy url for the e2e cluster entry + oc config set-cluster e2e --proxy-url=https://1.2.3.4 +---- + + + +== oc config set-context +Set a context entry in kubeconfig + +.Example usage +[source,bash,options="nowrap"] +---- + # Set the user field on the gce context entry without touching other values + oc config set-context gce --user=cluster-admin +---- + + + +== oc config set-credentials +Set a user entry in kubeconfig + +.Example usage +[source,bash,options="nowrap"] +---- + # Set only the "client-key" field on the "cluster-admin" + # entry, without touching other values + oc config set-credentials cluster-admin --client-key=~/.kube/admin.key + + # Set basic auth for the "cluster-admin" entry + oc config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif + + # Embed client certificate data in the "cluster-admin" entry + oc config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true + + # Enable the Google Compute Platform auth provider for the "cluster-admin" entry + oc config set-credentials cluster-admin --auth-provider=gcp + + # Enable the OpenID Connect auth provider for the "cluster-admin" entry with additional args + oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-id=foo --auth-provider-arg=client-secret=bar + + # Remove the "client-secret" config value for the OpenID Connect auth provider for the "cluster-admin" entry + oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-secret- + + # Enable new exec auth plugin for the "cluster-admin" entry + oc config set-credentials cluster-admin --exec-command=/path/to/the/executable --exec-api-version=client.authentication.k8s.io/v1beta1 + + # Define new exec auth plugin args for the "cluster-admin" entry + oc config set-credentials cluster-admin --exec-arg=arg1 --exec-arg=arg2 + + # Create or update exec auth plugin environment variables for the "cluster-admin" entry + oc config set-credentials cluster-admin --exec-env=key1=val1 --exec-env=key2=val2 + + # Remove exec auth plugin environment variables for the "cluster-admin" entry + oc config set-credentials cluster-admin --exec-env=var-to-remove- +---- + + + +== oc config unset +Unset an individual value in a kubeconfig file + +.Example usage +[source,bash,options="nowrap"] +---- + # Unset the current-context + oc config unset current-context + + # Unset namespace in foo context + oc config unset contexts.foo.namespace +---- + + + +== oc config use-context +Set the current-context in a kubeconfig file + +.Example usage +[source,bash,options="nowrap"] +---- + # Use the context for the minikube cluster + oc config use-context minikube +---- + + + +== oc config view +Display merged kubeconfig settings or a specified kubeconfig file + +.Example usage +[source,bash,options="nowrap"] +---- + # Show merged kubeconfig settings + oc config view + + # Show merged kubeconfig settings and raw certificate data and exposed secrets + oc config view --raw + + # Get the password for the e2e user + oc config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' +---- + + + +== oc cp +Copy files and directories to and from containers + +.Example usage +[source,bash,options="nowrap"] +---- + # !!!Important Note!!! + # Requires that the 'tar' binary is present in your container + # image. If 'tar' is not present, 'oc cp' will fail. + # + # For advanced use cases, such as symlinks, wildcard expansion or + # file mode preservation, consider using 'oc exec'. + + # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace + tar cf - /tmp/foo | oc exec -i -n -- tar xf - -C /tmp/bar + + # Copy /tmp/foo from a remote pod to /tmp/bar locally + oc exec -n -- tar cf - /tmp/foo | tar xf - -C /tmp/bar + + # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace + oc cp /tmp/foo_dir :/tmp/bar_dir + + # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container + oc cp /tmp/foo :/tmp/bar -c + + # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace + oc cp /tmp/foo /:/tmp/bar + + # Copy /tmp/foo from a remote pod to /tmp/bar locally + oc cp /:/tmp/foo /tmp/bar +---- + + + +== oc create +Create a resource from a file or from stdin + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a pod using the data in pod.json + oc create -f ./pod.json + + # Create a pod based on the JSON passed into stdin + cat pod.json | oc create -f - + + # Edit the data in registry.yaml in JSON then create the resource using the edited data + oc create -f registry.yaml --edit -o json +---- + + + +== oc create clusterrole +Create a cluster role + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a cluster role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods + oc create clusterrole pod-reader --verb=get,list,watch --resource=pods + + # Create a cluster role named "pod-reader" with ResourceName specified + oc create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod + + # Create a cluster role named "foo" with API Group specified + oc create clusterrole foo --verb=get,list,watch --resource=rs.apps + + # Create a cluster role named "foo" with SubResource specified + oc create clusterrole foo --verb=get,list,watch --resource=pods,pods/status + + # Create a cluster role name "foo" with NonResourceURL specified + oc create clusterrole "foo" --verb=get --non-resource-url=/logs/* + + # Create a cluster role name "monitoring" with AggregationRule specified + oc create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true" +---- + + + +== oc create clusterrolebinding +Create a cluster role binding for a particular cluster role + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a cluster role binding for user1, user2, and group1 using the cluster-admin cluster role + oc create clusterrolebinding cluster-admin --clusterrole=cluster-admin --user=user1 --user=user2 --group=group1 +---- + + + +== oc create configmap +Create a config map from a local file, directory or literal value + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new config map named my-config based on folder bar + oc create configmap my-config --from-file=path/to/bar + + # Create a new config map named my-config with specified keys instead of file basenames on disk + oc create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt + + # Create a new config map named my-config with key1=config1 and key2=config2 + oc create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2 + + # Create a new config map named my-config from the key=value pairs in the file + oc create configmap my-config --from-file=path/to/bar + + # Create a new config map named my-config from an env file + oc create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env +---- + + + +== oc create cronjob +Create a cron job with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a cron job + oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" + + # Create a cron job with a command + oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" -- date +---- + + + +== oc create deployment +Create a deployment with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a deployment named my-dep that runs the busybox image + oc create deployment my-dep --image=busybox + + # Create a deployment with a command + oc create deployment my-dep --image=busybox -- date + + # Create a deployment named my-dep that runs the nginx image with 3 replicas + oc create deployment my-dep --image=nginx --replicas=3 + + # Create a deployment named my-dep that runs the busybox image and expose port 5701 + oc create deployment my-dep --image=busybox --port=5701 +---- + + + +== oc create ingress +Create an ingress with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a single ingress called 'simple' that directs requests to foo.com/bar to svc + # svc1:8080 with a tls secret "my-cert" + oc create ingress simple --rule="foo.com/bar=svc1:8080,tls=my-cert" + + # Create a catch all ingress of "/path" pointing to service svc:port and Ingress Class as "otheringress" + oc create ingress catch-all --class=otheringress --rule="/path=svc:port" + + # Create an ingress with two annotations: ingress.annotation1 and ingress.annotations2 + oc create ingress annotated --class=default --rule="foo.com/bar=svc:port" \ + --annotation ingress.annotation1=foo \ + --annotation ingress.annotation2=bla + + # Create an ingress with the same host and multiple paths + oc create ingress multipath --class=default \ + --rule="foo.com/=svc:port" \ + --rule="foo.com/admin/=svcadmin:portadmin" + + # Create an ingress with multiple hosts and the pathType as Prefix + oc create ingress ingress1 --class=default \ + --rule="foo.com/path*=svc:8080" \ + --rule="bar.com/admin*=svc2:http" + + # Create an ingress with TLS enabled using the default ingress certificate and different path types + oc create ingress ingtls --class=default \ + --rule="foo.com/=svc:https,tls" \ + --rule="foo.com/path/subpath*=othersvc:8080" + + # Create an ingress with TLS enabled using a specific secret and pathType as Prefix + oc create ingress ingsecret --class=default \ + --rule="foo.com/*=svc:8080,tls=secret1" + + # Create an ingress with a default backend + oc create ingress ingdefault --class=default \ + --default-backend=defaultsvc:http \ + --rule="foo.com/*=svc:8080,tls=secret1" +---- + + + +== oc create job +Create a job with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a job + oc create job my-job --image=busybox + + # Create a job with a command + oc create job my-job --image=busybox -- date + + # Create a job from a cron job named "a-cronjob" + oc create job test-job --from=cronjob/a-cronjob +---- + + + +== oc create namespace +Create a namespace with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new namespace named my-namespace + oc create namespace my-namespace +---- + + + +== oc create poddisruptionbudget +Create a pod disruption budget with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a pod disruption budget named my-pdb that will select all pods with the app=rails label + # and require at least one of them being available at any point in time + oc create poddisruptionbudget my-pdb --selector=app=rails --min-available=1 + + # Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label + # and require at least half of the pods selected to be available at any point in time + oc create pdb my-pdb --selector=app=nginx --min-available=50% +---- + + + +== oc create priorityclass +Create a priority class with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a priority class named high-priority + oc create priorityclass high-priority --value=1000 --description="high priority" + + # Create a priority class named default-priority that is considered as the global default priority + oc create priorityclass default-priority --value=1000 --global-default=true --description="default priority" + + # Create a priority class named high-priority that cannot preempt pods with lower priority + oc create priorityclass high-priority --value=1000 --description="high priority" --preemption-policy="Never" +---- + + + +== oc create quota +Create a quota with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new resource quota named my-quota + oc create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10 + + # Create a new resource quota named best-effort + oc create quota best-effort --hard=pods=100 --scopes=BestEffort +---- + + + +== oc create role +Create a role with single rule + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods + oc create role pod-reader --verb=get --verb=list --verb=watch --resource=pods + + # Create a role named "pod-reader" with ResourceName specified + oc create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod + + # Create a role named "foo" with API Group specified + oc create role foo --verb=get,list,watch --resource=rs.apps + + # Create a role named "foo" with SubResource specified + oc create role foo --verb=get,list,watch --resource=pods,pods/status +---- + + + +== oc create rolebinding +Create a role binding for a particular role or cluster role + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a role binding for user1, user2, and group1 using the admin cluster role + oc create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1 +---- + + + +== oc create route edge +Create a route that uses edge TLS termination + +.Example usage +[source,bash,options="nowrap"] +---- + # Create an edge route named "my-route" that exposes the frontend service + oc create route edge my-route --service=frontend + + # Create an edge route that exposes the frontend service and specify a path + # If the route name is omitted, the service name will be used + oc create route edge --service=frontend --path /assets +---- + + + +== oc create route passthrough +Create a route that uses passthrough TLS termination + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a passthrough route named "my-route" that exposes the frontend service + oc create route passthrough my-route --service=frontend + + # Create a passthrough route that exposes the frontend service and specify + # a host name. If the route name is omitted, the service name will be used + oc create route passthrough --service=frontend --hostname=www.example.com +---- + + + +== oc create route reencrypt +Create a route that uses reencrypt TLS termination + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a route named "my-route" that exposes the frontend service + oc create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert + + # Create a reencrypt route that exposes the frontend service, letting the + # route name default to the service name and the destination CA certificate + # default to the service CA + oc create route reencrypt --service=frontend +---- + + + +== oc create secret docker-registry +Create a secret for use with a Docker registry + +.Example usage +[source,bash,options="nowrap"] +---- + # If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using: + oc create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL + + # Create a new secret named my-secret from ~/.docker/config.json + oc create secret docker-registry my-secret --from-file=.dockerconfigjson=path/to/.docker/config.json +---- + + + +== oc create secret generic +Create a secret from a local file, directory, or literal value + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new secret named my-secret with keys for each file in folder bar + oc create secret generic my-secret --from-file=path/to/bar + + # Create a new secret named my-secret with specified keys instead of names on disk + oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub + + # Create a new secret named my-secret with key1=supersecret and key2=topsecret + oc create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret + + # Create a new secret named my-secret using a combination of a file and a literal + oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-literal=passphrase=topsecret + + # Create a new secret named my-secret from env files + oc create secret generic my-secret --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env +---- + + + +== oc create secret tls +Create a TLS secret + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new TLS secret named tls-secret with the given key pair + oc create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key +---- + + + +== oc create service clusterip +Create a ClusterIP service + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new ClusterIP service named my-cs + oc create service clusterip my-cs --tcp=5678:8080 + + # Create a new ClusterIP service named my-cs (in headless mode) + oc create service clusterip my-cs --clusterip="None" +---- + + + +== oc create service externalname +Create an ExternalName service + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new ExternalName service named my-ns + oc create service externalname my-ns --external-name bar.com +---- + + + +== oc create service loadbalancer +Create a LoadBalancer service + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new LoadBalancer service named my-lbs + oc create service loadbalancer my-lbs --tcp=5678:8080 +---- + + + +== oc create service nodeport +Create a NodePort service + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new NodePort service named my-ns + oc create service nodeport my-ns --tcp=5678:8080 +---- + + + +== oc create serviceaccount +Create a service account with the specified name + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a new service account named my-service-account + oc create serviceaccount my-service-account +---- + + + +== oc create token +Request a service account token + +.Example usage +[source,bash,options="nowrap"] +---- + # Request a token to authenticate to the kube-apiserver as the service account "myapp" in the current namespace + oc create token myapp + + # Request a token for a service account in a custom namespace + oc create token myapp --namespace myns + + # Request a token with a custom expiration + oc create token myapp --duration 10m + + # Request a token with a custom audience + oc create token myapp --audience https://example.com + + # Request a token bound to an instance of a Secret object + oc create token myapp --bound-object-kind Secret --bound-object-name mysecret + + # Request a token bound to an instance of a Secret object with a specific uid + oc create token myapp --bound-object-kind Secret --bound-object-name mysecret --bound-object-uid 0d4691ed-659b-4935-a832-355f77ee47cc +---- + + + +== oc debug +Launch a new instance of a pod for debugging + +.Example usage +[source,bash,options="nowrap"] +---- + # Start a shell session into a pod using the OpenShift tools image + oc debug + + # Debug a currently running deployment by creating a new pod + oc debug deploy/test + + # Debug a node as an administrator + oc debug node/master-1 + + # Launch a shell in a pod using the provided image stream tag + oc debug istag/mysql:latest -n openshift + + # Test running a job as a non-root user + oc debug job/test --as-user=1000000 + + # Debug a specific failing container by running the env command in the 'second' container + oc debug daemonset/test -c second -- /bin/env + + # See the pod that would be created to debug + oc debug mypod-9xbc -o yaml + + # Debug a resource but launch the debug pod in another namespace + # Note: Not all resources can be debugged using --to-namespace without modification. For example, + # volumes and service accounts are namespace-dependent. Add '-o yaml' to output the debug pod definition + # to disk. If necessary, edit the definition then run 'oc debug -f -' or run without --to-namespace + oc debug mypod-9xbc --to-namespace testns +---- + + + == oc delete Delete resources by file names, stdin, resources and names, or by resources and label selector @@ -69,7 +1117,195 @@ Delete resources by file names, stdin, resources and names, or by resources and oc delete pods --all ---- -[id="oc-get"_{context}] + + +== oc describe +Show details of a specific resource or group of resources + +.Example usage +[source,bash,options="nowrap"] +---- + # Describe a node + oc describe nodes kubernetes-node-emt8.c.myproject.internal + + # Describe a pod + oc describe pods/nginx + + # Describe a pod identified by type and name in "pod.json" + oc describe -f pod.json + + # Describe all pods + oc describe pods + + # Describe pods by label name=myLabel + oc describe po -l name=myLabel + + # Describe all pods managed by the 'frontend' replication controller + # (rc-created pods get the name of the rc as a prefix in the pod name) + oc describe pods frontend +---- + + + +== oc diff +Diff the live version against a would-be applied version + +.Example usage +[source,bash,options="nowrap"] +---- + # Diff resources included in pod.json + oc diff -f pod.json + + # Diff file read from stdin + cat service.yaml | oc diff -f - +---- + + + +== oc edit +Edit a resource on the server + +.Example usage +[source,bash,options="nowrap"] +---- + # Edit the service named 'registry' + oc edit svc/registry + + # Use an alternative editor + KUBE_EDITOR="nano" oc edit svc/registry + + # Edit the job 'myjob' in JSON using the v1 API format + oc edit job.v1.batch/myjob -o json + + # Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation + oc edit deployment/mydeployment -o yaml --save-config + + # Edit the deployment/mydeployment's status subresource + oc edit deployment mydeployment --subresource='status' +---- + + + +== oc events +List events + +.Example usage +[source,bash,options="nowrap"] +---- + # List recent events in the default namespace. + oc events + + # List recent events in all namespaces. + oc events --all-namespaces + + # List recent events for the specified pod, then wait for more events and list them as they arrive. + oc events --for pod/web-pod-13je7 --watch + + # List recent events in given format. Supported ones, apart from default, are json and yaml. + oc events -oyaml + + # List recent only events in given event types + oc events --types=Warning,Normal +---- + + + +== oc exec +Execute a command in a container + +.Example usage +[source,bash,options="nowrap"] +---- + # Get output from running the 'date' command from pod mypod, using the first container by default + oc exec mypod -- date + + # Get output from running the 'date' command in ruby-container from pod mypod + oc exec mypod -c ruby-container -- date + + # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod + # and sends stdout/stderr from 'bash' back to the client + oc exec mypod -c ruby-container -i -t -- bash -il + + # List contents of /usr from the first container of pod mypod and sort by modification time + # If the command you want to execute in the pod has any flags in common (e.g. -i), + # you must use two dashes (--) to separate your command's flags/arguments + # Also note, do not surround your command and its flags/arguments with quotes + # unless that is how you would execute it normally (i.e., do ls -t /usr, not "ls -t /usr") + oc exec mypod -i -t -- ls -t /usr + + # Get output from running 'date' command from the first pod of the deployment mydeployment, using the first container by default + oc exec deploy/mydeployment -- date + + # Get output from running 'date' command from the first pod of the service myservice, using the first container by default + oc exec svc/myservice -- date +---- + + + +== oc explain +Get documentation for a resource + +.Example usage +[source,bash,options="nowrap"] +---- + # Get the documentation of the resource and its fields + oc explain pods + + # Get the documentation of a specific field of a resource + oc explain pods.spec.containers +---- + + + +== oc expose +Expose a replicated application as a service or route + +.Example usage +[source,bash,options="nowrap"] +---- + # Create a route based on service nginx. The new route will reuse nginx's labels + oc expose service nginx + + # Create a route and specify your own label and route name + oc expose service nginx -l name=myroute --name=fromdowntown + + # Create a route and specify a host name + oc expose service nginx --hostname=www.example.com + + # Create a route with a wildcard + oc expose service nginx --hostname=x.example.com --wildcard-policy=Subdomain + # This would be equivalent to *.example.com. NOTE: only hosts are matched by the wildcard; subdomains would not be included + + # Expose a deployment configuration as a service and use the specified port + oc expose dc ruby-hello-world --port=8080 + + # Expose a service as a route in the specified path + oc expose service nginx --path=/nginx +---- + + + +== oc extract +Extract secrets or config maps to disk + +.Example usage +[source,bash,options="nowrap"] +---- + # Extract the secret "test" to the current directory + oc extract secret/test + + # Extract the config map "nginx" to the /tmp directory + oc extract configmap/nginx --to=/tmp + + # Extract the config map "nginx" to STDOUT + oc extract configmap/nginx --to=- + + # Extract only the key "nginx.conf" from config map "nginx" to the /tmp directory + oc extract configmap/nginx --to=/tmp --keys=nginx.conf +---- + + + == oc get Display one or many resources @@ -94,7 +1330,7 @@ Display one or many resources # List a pod identified by type and name specified in "pod.yaml" in JSON output format oc get -f pod.yaml -o json - # List resources from a directory with kustomization.yaml - e.g. dir/kustomization. + # List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml oc get -k dir/ # Return only the phase value of the specified pod @@ -112,3 +1348,1024 @@ Display one or many resources # List status subresource for a single pod. oc get pod web-pod-13je7 --subresource status ---- + + + +== oc image append +Add layers to images and push them to a registry + +.Example usage +[source,bash,options="nowrap"] +---- + # Remove the entrypoint on the mysql:latest image + oc image append --from mysql:latest --to myregistry.com/myimage:latest --image '{"Entrypoint":null}' + + # Add a new layer to the image + oc image append --from mysql:latest --to myregistry.com/myimage:latest layer.tar.gz + + # Add a new layer to the image and store the result on disk + # This results in $(pwd)/v2/mysql/blobs,manifests + oc image append --from mysql:latest --to file://mysql:local layer.tar.gz + + # Add a new layer to the image and store the result on disk in a designated directory + # This will result in $(pwd)/mysql-local/v2/mysql/blobs,manifests + oc image append --from mysql:latest --to file://mysql:local --dir mysql-local layer.tar.gz + + # Add a new layer to an image that is stored on disk (~/mysql-local/v2/image exists) + oc image append --from-dir ~/mysql-local --to myregistry.com/myimage:latest layer.tar.gz + + # Add a new layer to an image that was mirrored to the current directory on disk ($(pwd)/v2/image exists) + oc image append --from-dir v2 --to myregistry.com/myimage:latest layer.tar.gz + + # Add a new layer to a multi-architecture image for an os/arch that is different from the system's os/arch + # Note: Wildcard filter is not supported with append. Pass a single os/arch to append + oc image append --from docker.io/library/busybox:latest --filter-by-os=linux/s390x --to myregistry.com/myimage:latest layer.tar.gz +---- + + + +== oc image extract +Copy files from an image to the file system + +.Example usage +[source,bash,options="nowrap"] +---- + # Extract the busybox image into the current directory + oc image extract docker.io/library/busybox:latest + + # Extract the busybox image into a designated directory (must exist) + oc image extract docker.io/library/busybox:latest --path /:/tmp/busybox + + # Extract the busybox image into the current directory for linux/s390x platform + # Note: Wildcard filter is not supported with extract. Pass a single os/arch to extract + oc image extract docker.io/library/busybox:latest --filter-by-os=linux/s390x + + # Extract a single file from the image into the current directory + oc image extract docker.io/library/centos:7 --path /bin/bash:. + + # Extract all .repo files from the image's /etc/yum.repos.d/ folder into the current directory + oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:. + + # Extract all .repo files from the image's /etc/yum.repos.d/ folder into a designated directory (must exist) + # This results in /tmp/yum.repos.d/*.repo on local system + oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:/tmp/yum.repos.d + + # Extract an image stored on disk into the current directory ($(pwd)/v2/busybox/blobs,manifests exists) + # --confirm is required because the current directory is not empty + oc image extract file://busybox:local --confirm + + # Extract an image stored on disk in a directory other than $(pwd)/v2 into the current directory + # --confirm is required because the current directory is not empty ($(pwd)/busybox-mirror-dir/v2/busybox exists) + oc image extract file://busybox:local --dir busybox-mirror-dir --confirm + + # Extract an image stored on disk in a directory other than $(pwd)/v2 into a designated directory (must exist) + oc image extract file://busybox:local --dir busybox-mirror-dir --path /:/tmp/busybox + + # Extract the last layer in the image + oc image extract docker.io/library/centos:7[-1] + + # Extract the first three layers of the image + oc image extract docker.io/library/centos:7[:3] + + # Extract the last three layers of the image + oc image extract docker.io/library/centos:7[-3:] +---- + + + +== oc image info +Display information about an image + +.Example usage +[source,bash,options="nowrap"] +---- + # Show information about an image + oc image info quay.io/openshift/cli:latest + + # Show information about images matching a wildcard + oc image info quay.io/openshift/cli:4.* + + # Show information about a file mirrored to disk under DIR + oc image info --dir=DIR file://library/busybox:latest + + # Select which image from a multi-OS image to show + oc image info library/busybox:latest --filter-by-os=linux/arm64 +---- + + + +== oc image mirror +Mirror images from one repository to another + +.Example usage +[source,bash,options="nowrap"] +---- + # Copy image to another tag + oc image mirror myregistry.com/myimage:latest myregistry.com/myimage:stable + + # Copy image to another registry + oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable + + # Copy all tags starting with mysql to the destination repository + oc image mirror myregistry.com/myimage:mysql* docker.io/myrepository/myimage + + # Copy image to disk, creating a directory structure that can be served as a registry + oc image mirror myregistry.com/myimage:latest file://myrepository/myimage:latest + + # Copy image to S3 (pull from .s3.amazonaws.com/image:latest) + oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image:latest + + # Copy image to S3 without setting a tag (pull via @) + oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image + + # Copy image to multiple locations + oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable \ + docker.io/myrepository/myimage:dev + + # Copy multiple images + oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ + myregistry.com/myimage:new=myregistry.com/other:target + + # Copy manifest list of a multi-architecture image, even if only a single image is found + oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ + --keep-manifest-list=true + + # Copy specific os/arch manifest of a multi-architecture image + # Run 'oc image info myregistry.com/myimage:latest' to see available os/arch for multi-arch images + # Note that with multi-arch images, this results in a new manifest list digest that includes only + # the filtered manifests + oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ + --filter-by-os=os/arch + + # Copy all os/arch manifests of a multi-architecture image + # Run 'oc image info myregistry.com/myimage:latest' to see list of os/arch manifests that will be mirrored + oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ + --keep-manifest-list=true + + # Note the above command is equivalent to + oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ + --filter-by-os=.* +---- + + + +== oc kustomize +Build a kustomization target from a directory or URL. + +.Example usage +[source,bash,options="nowrap"] +---- + # Build the current working directory + oc kustomize + + # Build some shared configuration directory + oc kustomize /home/config/production + + # Build from github + oc kustomize https://github.com/kubernetes-sigs/kustomize.git/examples/helloWorld?ref=v1.0.6 +---- + + + +== oc label +Update the labels on a resource + +.Example usage +[source,bash,options="nowrap"] +---- + # Update pod 'foo' with the label 'unhealthy' and the value 'true' + oc label pods foo unhealthy=true + + # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value + oc label --overwrite pods foo status=unhealthy + + # Update all pods in the namespace + oc label pods --all status=unhealthy + + # Update a pod identified by the type and name in "pod.json" + oc label -f pod.json status=unhealthy + + # Update pod 'foo' only if the resource is unchanged from version 1 + oc label pods foo status=unhealthy --resource-version=1 + + # Update pod 'foo' by removing a label named 'bar' if it exists + # Does not require the --overwrite flag + oc label pods foo bar- +---- + + + +== oc logs +Print the logs for a container in a pod + +.Example usage +[source,bash,options="nowrap"] +---- + # Start streaming the logs of the most recent build of the openldap build config + oc logs -f bc/openldap + + # Start streaming the logs of the latest deployment of the mysql deployment config + oc logs -f dc/mysql + + # Get the logs of the first deployment for the mysql deployment config. Note that logs + # from older deployments may not exist either because the deployment was successful + # or due to deployment pruning or manual deletion of the deployment + oc logs --version=1 dc/mysql + + # Return a snapshot of ruby-container logs from pod backend + oc logs backend -c ruby-container + + # Start streaming of ruby-container logs from pod backend + oc logs -f pod/backend -c ruby-container +---- + + + +== oc observe +Observe changes to resources and react to them (experimental) + +.Example usage +[source,bash,options="nowrap"] +---- + # Observe changes to services + oc observe services + + # Observe changes to services, including the clusterIP and invoke a script for each + oc observe services --template '{ .spec.clusterIP }' -- register_dns.sh + + # Observe changes to services filtered by a label selector + oc observe namespaces -l regist-dns=true --template '{ .spec.clusterIP }' -- register_dns.sh +---- + + + +== oc patch +Update fields of a resource + +.Example usage +[source,bash,options="nowrap"] +---- + # Partially update a node using a strategic merge patch, specifying the patch as JSON + oc patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' + + # Partially update a node using a strategic merge patch, specifying the patch as YAML + oc patch node k8s-node-1 -p $'spec:\n unschedulable: true' + + # Partially update a node identified by the type and name specified in "node.json" using strategic merge patch + oc patch -f node.json -p '{"spec":{"unschedulable":true}}' + + # Update a container's image; spec.containers[*].name is required because it's a merge key + oc patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}' + + # Update a container's image using a JSON patch with positional arrays + oc patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]' + + # Update a deployment's replicas through the scale subresource using a merge patch. + oc patch deployment nginx-deployment --subresource='scale' --type='merge' -p '{"spec":{"replicas":2}}' +---- + + + +== oc plugin list +List all visible plugin executables on a user's PATH + +.Example usage +[source,bash,options="nowrap"] +---- + # List all available plugins + oc plugin list +---- + + + +== oc policy add-role-to-user +Add a role to users or service accounts for the current project + +.Example usage +[source,bash,options="nowrap"] +---- + # Add the 'view' role to user1 for the current project + oc policy add-role-to-user view user1 + + # Add the 'edit' role to serviceaccount1 for the current project + oc policy add-role-to-user edit -z serviceaccount1 +---- + + + +== oc policy scc-review +Check which service account can create a pod + +.Example usage +[source,bash,options="nowrap"] +---- + # Check whether service accounts sa1 and sa2 can admit a pod with a template pod spec specified in my_resource.yaml + # Service Account specified in myresource.yaml file is ignored + oc policy scc-review -z sa1,sa2 -f my_resource.yaml + + # Check whether service accounts system:serviceaccount:bob:default can admit a pod with a template pod spec specified in my_resource.yaml + oc policy scc-review -z system:serviceaccount:bob:default -f my_resource.yaml + + # Check whether the service account specified in my_resource_with_sa.yaml can admit the pod + oc policy scc-review -f my_resource_with_sa.yaml + + # Check whether the default service account can admit the pod; default is taken since no service account is defined in myresource_with_no_sa.yaml + oc policy scc-review -f myresource_with_no_sa.yaml +---- + + + +== oc policy scc-subject-review +Check whether a user or a service account can create a pod + +.Example usage +[source,bash,options="nowrap"] +---- + # Check whether user bob can create a pod specified in myresource.yaml + oc policy scc-subject-review -u bob -f myresource.yaml + + # Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml + oc policy scc-subject-review -u bob -g projectAdmin -f myresource.yaml + + # Check whether a service account specified in the pod template spec in myresourcewithsa.yaml can create the pod + oc policy scc-subject-review -f myresourcewithsa.yaml +---- + + + +== oc port-forward +Forward one or more local ports to a pod + +.Example usage +[source,bash,options="nowrap"] +---- + # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod + oc port-forward pod/mypod 5000 6000 + + # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment + oc port-forward deployment/mydeployment 5000 6000 + + # Listen on port 8443 locally, forwarding to the targetPort of the service's port named "https" in a pod selected by the service + oc port-forward service/myservice 8443:https + + # Listen on port 8888 locally, forwarding to 5000 in the pod + oc port-forward pod/mypod 8888:5000 + + # Listen on port 8888 on all addresses, forwarding to 5000 in the pod + oc port-forward --address 0.0.0.0 pod/mypod 8888:5000 + + # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod + oc port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 + + # Listen on a random port locally, forwarding to 5000 in the pod + oc port-forward pod/mypod :5000 +---- + + + +== oc proxy +Run a proxy to the Kubernetes API server + +.Example usage +[source,bash,options="nowrap"] +---- + # To proxy all of the Kubernetes API and nothing else + oc proxy --api-prefix=/ + + # To proxy only part of the Kubernetes API and also some static files + # You can get pods info with 'curl localhost:8001/api/v1/pods' + oc proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/ + + # To proxy the entire Kubernetes API at a different root + # You can get pods info with 'curl localhost:8001/custom/api/v1/pods' + oc proxy --api-prefix=/custom/ + + # Run a proxy to the Kubernetes API server on port 8011, serving static content from ./local/www/ + oc proxy --port=8011 --www=./local/www/ + + # Run a proxy to the Kubernetes API server on an arbitrary local port + # The chosen port for the server will be output to stdout + oc proxy --port=0 + + # Run a proxy to the Kubernetes API server, changing the API prefix to k8s-api + # This makes e.g. the pods API available at localhost:8001/k8s-api/v1/pods/ + oc proxy --api-prefix=/k8s-api +---- + + + +== oc rollback +Revert part of an application back to a previous deployment + +.Example usage +[source,bash,options="nowrap"] +---- + # Perform a rollback to the last successfully completed deployment for a deployment config + oc rollback frontend + + # See what a rollback to version 3 will look like, but do not perform the rollback + oc rollback frontend --to-version=3 --dry-run + + # Perform a rollback to a specific deployment + oc rollback frontend-2 + + # Perform the rollback manually by piping the JSON of the new config back to oc + oc rollback frontend -o json | oc replace dc/frontend -f - + + # Print the updated deployment configuration in JSON format instead of performing the rollback + oc rollback frontend -o json +---- + + + +== oc rollout cancel +Cancel the in-progress deployment + +.Example usage +[source,bash,options="nowrap"] +---- + # Cancel the in-progress deployment based on 'nginx' + oc rollout cancel dc/nginx +---- + + + +== oc rollout history +View rollout history + +.Example usage +[source,bash,options="nowrap"] +---- + # View the rollout history of a deployment + oc rollout history dc/nginx + + # View the details of deployment revision 3 + oc rollout history dc/nginx --revision=3 +---- + + + +== oc rollout latest +Start a new rollout for a deployment config with the latest state from its triggers + +.Example usage +[source,bash,options="nowrap"] +---- + # Start a new rollout based on the latest images defined in the image change triggers + oc rollout latest dc/nginx + + # Print the rolled out deployment config + oc rollout latest dc/nginx -o json +---- + + + +== oc rollout pause +Mark the provided resource as paused + +.Example usage +[source,bash,options="nowrap"] +---- + # Mark the nginx deployment as paused. Any current state of + # the deployment will continue its function, new updates to the deployment will not + # have an effect as long as the deployment is paused + oc rollout pause dc/nginx +---- + + + +== oc rollout restart +Restart a resource + +.Example usage +[source,bash,options="nowrap"] +---- + # Restart a deployment + oc rollout restart deployment/nginx + + # Restart a daemon set + oc rollout restart daemonset/abc + + # Restart deployments with the app=nginx label + oc rollout restart deployment --selector=app=nginx +---- + + + +== oc rollout resume +Resume a paused resource + +.Example usage +[source,bash,options="nowrap"] +---- + # Resume an already paused deployment + oc rollout resume dc/nginx +---- + + + +== oc rollout retry +Retry the latest failed rollout + +.Example usage +[source,bash,options="nowrap"] +---- + # Retry the latest failed deployment based on 'frontend' + # The deployer pod and any hook pods are deleted for the latest failed deployment + oc rollout retry dc/frontend +---- + + + +== oc rollout status +Show the status of the rollout + +.Example usage +[source,bash,options="nowrap"] +---- + # Watch the status of the latest rollout + oc rollout status dc/nginx +---- + + + +== oc rollout undo +Undo a previous rollout + +.Example usage +[source,bash,options="nowrap"] +---- + # Roll back to the previous deployment + oc rollout undo dc/nginx + + # Roll back to deployment revision 3. The replication controller for that version must exist + oc rollout undo dc/nginx --to-revision=3 +---- + + + +== oc rsh +Start a shell session in a container + +.Example usage +[source,bash,options="nowrap"] +---- + # Open a shell session on the first container in pod 'foo' + oc rsh foo + + # Open a shell session on the first container in pod 'foo' and namespace 'bar' + # (Note that oc client specific arguments must come before the resource name and its arguments) + oc rsh -n bar foo + + # Run the command 'cat /etc/resolv.conf' inside pod 'foo' + oc rsh foo cat /etc/resolv.conf + + # See the configuration of your internal registry + oc rsh dc/docker-registry cat config.yml + + # Open a shell session on the container named 'index' inside a pod of your job + oc rsh -c index job/sheduled +---- + + + +== oc rsync +Copy files between a local file system and a pod + +.Example usage +[source,bash,options="nowrap"] +---- + # Synchronize a local directory with a pod directory + oc rsync ./local/dir/ POD:/remote/dir + + # Synchronize a pod directory with a local directory + oc rsync POD:/remote/dir/ ./local/dir +---- + + + +== oc run +Run a particular image on the cluster + +.Example usage +[source,bash,options="nowrap"] +---- + # Start a nginx pod + oc run nginx --image=nginx + + # Start a hazelcast pod and let the container expose port 5701 + oc run hazelcast --image=hazelcast/hazelcast --port=5701 + + # Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container + oc run hazelcast --image=hazelcast/hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default" + + # Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container + oc run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod" + + # Dry run; print the corresponding API objects without creating them + oc run nginx --image=nginx --dry-run=client + + # Start a nginx pod, but overload the spec with a partial set of values parsed from JSON + oc run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }' + + # Start a busybox pod and keep it in the foreground, don't restart it if it exits + oc run -i -t busybox --image=busybox --restart=Never + + # Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command + oc run nginx --image=nginx -- ... + + # Start the nginx pod using a different command and custom arguments + oc run nginx --image=nginx --command -- ... +---- + + + +== oc scale +Set a new size for a deployment, replica set, or replication controller + +.Example usage +[source,bash,options="nowrap"] +---- + # Scale a replica set named 'foo' to 3 + oc scale --replicas=3 rs/foo + + # Scale a resource identified by type and name specified in "foo.yaml" to 3 + oc scale --replicas=3 -f foo.yaml + + # If the deployment named mysql's current size is 2, scale mysql to 3 + oc scale --current-replicas=2 --replicas=3 deployment/mysql + + # Scale multiple replication controllers + oc scale --replicas=5 rc/foo rc/bar rc/baz + + # Scale stateful set named 'web' to 3 + oc scale --replicas=3 statefulset/web +---- + + + +== oc secrets link +Link secrets to a service account + +.Example usage +[source,bash,options="nowrap"] +---- + # Add an image pull secret to a service account to automatically use it for pulling pod images + oc secrets link serviceaccount-name pull-secret --for=pull + + # Add an image pull secret to a service account to automatically use it for both pulling and pushing build images + oc secrets link builder builder-image-secret --for=pull,mount +---- + + + +== oc secrets unlink +Detach secrets from a service account + +.Example usage +[source,bash,options="nowrap"] +---- + # Unlink a secret currently associated with a service account + oc secrets unlink serviceaccount-name secret-name another-secret-name ... +---- + + + +== oc set data +Update the data within a config map or secret + +.Example usage +[source,bash,options="nowrap"] +---- + # Set the 'password' key of a secret + oc set data secret/foo password=this_is_secret + + # Remove the 'password' key from a secret + oc set data secret/foo password- + + # Update the 'haproxy.conf' key of a config map from a file on disk + oc set data configmap/bar --from-file=../haproxy.conf + + # Update a secret with the contents of a directory, one key per file + oc set data secret/foo --from-file=secret-dir +---- + + + +== oc set env +Update environment variables on a pod template + +.Example usage +[source,bash,options="nowrap"] +---- + # Update deployment config 'myapp' with a new environment variable + oc set env dc/myapp STORAGE_DIR=/local + + # List the environment variables defined on a build config 'sample-build' + oc set env bc/sample-build --list + + # List the environment variables defined on all pods + oc set env pods --all --list + + # Output modified build config in YAML + oc set env bc/sample-build STORAGE_DIR=/data -o yaml + + # Update all containers in all replication controllers in the project to have ENV=prod + oc set env rc --all ENV=prod + + # Import environment from a secret + oc set env --from=secret/mysecret dc/myapp + + # Import environment from a config map with a prefix + oc set env --from=configmap/myconfigmap --prefix=MYSQL_ dc/myapp + + # Remove the environment variable ENV from container 'c1' in all deployment configs + oc set env dc --all --containers="c1" ENV- + + # Remove the environment variable ENV from a deployment config definition on disk and + # update the deployment config on the server + oc set env -f dc.json ENV- + + # Set some of the local shell environment into a deployment config on the server + oc set env | grep RAILS_ | oc env -e - dc/myapp +---- + + + +== oc set image +Update the image of a pod template + +.Example usage +[source,bash,options="nowrap"] +---- + # Set a deployment configs's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'. + oc set image dc/nginx busybox=busybox nginx=nginx:1.9.1 + + # Set a deployment configs's app container image to the image referenced by the imagestream tag 'openshift/ruby:2.3'. + oc set image dc/myapp app=openshift/ruby:2.3 --source=imagestreamtag + + # Update all deployments' and rc's nginx container's image to 'nginx:1.9.1' + oc set image deployments,rc nginx=nginx:1.9.1 --all + + # Update image of all containers of daemonset abc to 'nginx:1.9.1' + oc set image daemonset abc *=nginx:1.9.1 + + # Print result (in yaml format) of updating nginx container image from local file, without hitting the server + oc set image -f path/to/file.yaml nginx=nginx:1.9.1 --local -o yaml +---- + + + +== oc set image-lookup +Change how images are resolved when deploying applications + +.Example usage +[source,bash,options="nowrap"] +---- + # Print all of the image streams and whether they resolve local names + oc set image-lookup + + # Use local name lookup on image stream mysql + oc set image-lookup mysql + + # Force a deployment to use local name lookup + oc set image-lookup deploy/mysql + + # Show the current status of the deployment lookup + oc set image-lookup deploy/mysql --list + + # Disable local name lookup on image stream mysql + oc set image-lookup mysql --enabled=false + + # Set local name lookup on all image streams + oc set image-lookup --all +---- + + + +== oc set probe +Update a probe on a pod template + +.Example usage +[source,bash,options="nowrap"] +---- + # Clear both readiness and liveness probes off all containers + oc set probe dc/myapp --remove --readiness --liveness + + # Set an exec action as a liveness probe to run 'echo ok' + oc set probe dc/myapp --liveness -- echo ok + + # Set a readiness probe to try to open a TCP socket on 3306 + oc set probe rc/mysql --readiness --open-tcp=3306 + + # Set an HTTP startup probe for port 8080 and path /healthz over HTTP on the pod IP + oc set probe dc/webapp --startup --get-url=http://:8080/healthz + + # Set an HTTP readiness probe for port 8080 and path /healthz over HTTP on the pod IP + oc set probe dc/webapp --readiness --get-url=http://:8080/healthz + + # Set an HTTP readiness probe over HTTPS on 127.0.0.1 for a hostNetwork pod + oc set probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats + + # Set only the initial-delay-seconds field on all deployments + oc set probe dc --all --readiness --initial-delay-seconds=30 +---- + + + +== oc set resources +Update resource requests/limits on objects with pod templates + +.Example usage +[source,bash,options="nowrap"] +---- + # Set a deployments nginx container CPU limits to "200m and memory to 512Mi" + oc set resources deployment nginx -c=nginx --limits=cpu=200m,memory=512Mi + + # Set the resource request and limits for all containers in nginx + oc set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi + + # Remove the resource requests for resources on containers in nginx + oc set resources deployment nginx --limits=cpu=0,memory=0 --requests=cpu=0,memory=0 + + # Print the result (in YAML format) of updating nginx container limits locally, without hitting the server + oc set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml +---- + + + +== oc set route-backends +Update the backends for a route + +.Example usage +[source,bash,options="nowrap"] +---- + # Print the backends on the route 'web' + oc set route-backends web + + # Set two backend services on route 'web' with 2/3rds of traffic going to 'a' + oc set route-backends web a=2 b=1 + + # Increase the traffic percentage going to b by 10%% relative to a + oc set route-backends web --adjust b=+10%% + + # Set traffic percentage going to b to 10%% of the traffic going to a + oc set route-backends web --adjust b=10%% + + # Set weight of b to 10 + oc set route-backends web --adjust b=10 + + # Set the weight to all backends to zero + oc set route-backends web --zero +---- + + + +== oc set selector +Set the selector on a resource + +.Example usage +[source,bash,options="nowrap"] +---- + # Set the labels and selector before creating a deployment/service pair. + oc create service clusterip my-svc --clusterip="None" -o yaml --dry-run | oc set selector --local -f - 'environment=qa' -o yaml | oc create -f - + oc create deployment my-dep -o yaml --dry-run | oc label --local -f - environment=qa -o yaml | oc create -f - +---- + + + +== oc set serviceaccount +Update the service account of a resource + +.Example usage +[source,bash,options="nowrap"] +---- + # Set deployment nginx-deployment's service account to serviceaccount1 + oc set serviceaccount deployment nginx-deployment serviceaccount1 + + # Print the result (in YAML format) of updated nginx deployment with service account from a local file, without hitting the API server + oc set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run -o yaml +---- + + + +== oc set subject +Update the user, group, or service account in a role binding or cluster role binding + +.Example usage +[source,bash,options="nowrap"] +---- + # Update a cluster role binding for serviceaccount1 + oc set subject clusterrolebinding admin --serviceaccount=namespace:serviceaccount1 + + # Update a role binding for user1, user2, and group1 + oc set subject rolebinding admin --user=user1 --user=user2 --group=group1 + + # Print the result (in YAML format) of updating role binding subjects locally, without hitting the server + oc create rolebinding admin --role=admin --user=admin -o yaml --dry-run | oc set subject --local -f - --user=foo -o yaml +---- + + + +== oc set volumes +Update volumes on a pod template + +.Example usage +[source,bash,options="nowrap"] +---- + # List volumes defined on all deployment configs in the current project + oc set volume dc --all + + # Add a new empty dir volume to deployment config (dc) 'myapp' mounted under + # /var/lib/myapp + oc set volume dc/myapp --add --mount-path=/var/lib/myapp + + # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1' + oc set volume dc/myapp --add --name=v1 -t pvc --claim-name=pvc1 --overwrite + + # Remove volume 'v1' from deployment config 'myapp' + oc set volume dc/myapp --remove --name=v1 + + # Create a new persistent volume claim that overwrites an existing volume 'v1' + oc set volume dc/myapp --add --name=v1 -t pvc --claim-size=1G --overwrite + + # Change the mount point for volume 'v1' to /data + oc set volume dc/myapp --add --name=v1 -m /data --overwrite + + # Modify the deployment config by removing volume mount "v1" from container "c1" + # (and by removing the volume "v1" if no other containers have volume mounts that reference it) + oc set volume dc/myapp --remove --name=v1 --containers=c1 + + # Add new volume based on a more complex volume source (AWS EBS, GCE PD, + # Ceph, Gluster, NFS, ISCSI, ...) + oc set volume dc/myapp --add -m /data --source= +---- + + + +== oc tag +Tag existing images into image streams + +.Example usage +[source,bash,options="nowrap"] +---- + # Tag the current image for the image stream 'openshift/ruby' and tag '2.0' into the image stream 'yourproject/ruby with tag 'tip' + oc tag openshift/ruby:2.0 yourproject/ruby:tip + + # Tag a specific image + oc tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip + + # Tag an external container image + oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip + + # Tag an external container image and request pullthrough for it + oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --reference-policy=local + + # Tag an external container image and include the full manifest list + oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --import-mode=PreserveOriginal + + # Remove the specified spec tag from an image stream + oc tag openshift/origin-control-plane:latest -d +---- + + + +== oc version +Print the client and server version information + +.Example usage +[source,bash,options="nowrap"] +---- + # Print the OpenShift client, kube-apiserver, and openshift-apiserver version information for the current context + oc version + + # Print the OpenShift client, kube-apiserver, and openshift-apiserver version numbers for the current context + oc version --short + + # Print the OpenShift client version information for the current context + oc version --client +---- + + + +== oc wait +Experimental: Wait for a specific condition on one or many resources + +.Example usage +[source,bash,options="nowrap"] +---- + # Wait for the pod "busybox1" to contain the status condition of type "Ready" + oc wait --for=condition=Ready pod/busybox1 + + # The default value of status condition is true; you can wait for other targets after an equal delimiter (compared after Unicode simple case folding, which is a more general form of case-insensitivity): + oc wait --for=condition=Ready=false pod/busybox1 + + # Wait for the pod "busybox1" to contain the status phase to be "Running". + oc wait --for=jsonpath='{.status.phase}'=Running pod/busybox1 + + # Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command + oc delete pod/busybox1 + oc wait --for=delete pod/busybox1 --timeout=60s +---- + + diff --git a/modules/microshift-preparing-for-image-building.adoc b/modules/microshift-preparing-for-image-building.adoc index 303ea962ed..a63aaa3a34 100644 --- a/modules/microshift-preparing-for-image-building.adoc +++ b/modules/microshift-preparing-for-image-building.adoc @@ -8,11 +8,10 @@ Read link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images[Composing, installing, and managing RHEL for Edge images]. -//[IMPORTANT] -//==== -//{product-title} deployments have only been tested with {op-system-ostree-first} {op-system-version}. Other versions of {op-system} are not recommended. -//==== -//Can update this note as needed, but this is no longer correct for 4.13 +[IMPORTANT] +==== +{product-title} {ocp-version} deployments have only been tested with {op-system-ostree-first} {op-system-version}. Other versions of {op-system} are not supported. +==== To build an {op-system-ostree-first} {op-system-version} image for a given CPU architecture, you need a {op-system} {op-system-version} build host of the same CPU architecture that meets the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/composing_installing_and_managing_rhel_for_edge_images/setting-up-image-builder_composing-installing-managing-rhel-for-edge-images#edge-image-builder-system-requirements_setting-up-image-builder[Image Builder system requirements]. diff --git a/modules/microshift-provisioning-ostree.adoc b/modules/microshift-provisioning-ostree.adoc index 8cf9f1bed0..4a099afbda 100644 --- a/modules/microshift-provisioning-ostree.adoc +++ b/modules/microshift-provisioning-ostree.adoc @@ -28,7 +28,7 @@ If you are using a Kickstart such as the {op-system-ostree} Installer (ISO) imag .. Detailed instructions about how to create a user. .. How to fetch and deploy the {op-system-ostree} image. -For more information, see "Additional resources." +For more information, read "Additional resources." .Procedure diff --git a/modules/microshift-rpm-ostree.adoc b/modules/microshift-rpm-ostree-https.adoc similarity index 77% rename from modules/microshift-rpm-ostree.adoc rename to modules/microshift-rpm-ostree-https.adoc index 4822054204..3a0c47e36a 100644 --- a/modules/microshift-rpm-ostree.adoc +++ b/modules/microshift-rpm-ostree-https.adoc @@ -3,10 +3,10 @@ // * microshift_networking/microshift-networking.adoc :_content-type: PROCEDURE -[id="microshift-rpm-ostree-package-system_{context}"] -= RPM-OStree image and package system +[id="microshift-rpm-ostree-https_{context}"] += Using the RPM-OStree HTTP(S) proxy -To use the HTTP(S) proxy in rpm-ostree, set the `http_proxy environment` variable for the `rpm-ostreed` service. +To use the HTTP(S) proxy in RPM-OStree, set the `http_proxy environment` variable for the `rpm-ostreed` service. .Procedure @@ -31,4 +31,4 @@ $ sudo systemctl daemon-reload ---- $ sudo systemctl restart rpm-ostreed.service ---- -//Q: Instructions for how to test that the proxy works by booting the image, verifying that MicroShift starts, and that their application is accessible? +//Q: Instructions for how to test that the proxy works by booting the image, verifying that MicroShift starts, and that the application is accessible? diff --git a/modules/microshift-troubleshooting-nodeport.adoc b/modules/microshift-troubleshooting-nodeport.adoc index 0fa734283f..9c7b955363 100644 --- a/modules/microshift-troubleshooting-nodeport.adoc +++ b/modules/microshift-troubleshooting-nodeport.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * microshift_troubleshooting/microshift-known-issues.adoc +// * module may be unused in 4.13 :_content-type: PROCEDURE [id="microshift-troubleshooting-nodeport_{context}"]