diff --git a/modules/oc-adm-by-example-content.adoc b/modules/oc-adm-by-example-content.adoc index 36cb34afd1..3c3fa936f2 100644 --- a/modules/oc-adm-by-example-content.adoc +++ b/modules/oc-adm-by-example-content.adoc @@ -16,10 +16,10 @@ Output the inputs and dependencies of your builds ---- # Build the dependency tree for the 'latest' tag in oc adm build-chain - + # Build the dependency tree for the 'v2' tag in dot format and visualize it via the dot utility oc adm build-chain :v2 -o dot | dot -T svg -o deps.svg - + # Build the dependency tree across all namespaces for the specified image stream tag found in the 'test' namespace oc adm build-chain -n test --all ---- @@ -34,21 +34,21 @@ Mirror an operator-registry catalog ---- # Mirror an operator-registry image and its contents to a registry oc adm catalog mirror quay.io/my/image:latest myregistry.com - + # Mirror an operator-registry image and its contents to a particular namespace in a registry oc adm catalog mirror quay.io/my/image:latest myregistry.com/my-namespace - + # Mirror to an airgapped registry by first mirroring to files oc adm catalog mirror quay.io/my/image:latest file:///local/index oc adm catalog mirror file:///local/index/my/image:latest my-airgapped-registry.com - + # Configure a cluster to use a mirrored registry oc apply -f manifests/imageDigestMirrorSet.yaml - + # Edit the mirroring mappings and mirror with "oc image mirror" manually oc adm catalog mirror --manifests-only quay.io/my/image:latest myregistry.com oc image mirror -f manifests/mapping.txt - + # Delete all ImageDigestMirrorSets generated by oc adm catalog mirror oc delete imagedigestmirrorset -l operators.openshift.org/catalog=true ---- @@ -82,6 +82,14 @@ Deny a certificate signing request == oc adm copy-to-node Copies specified files to the node. +.Example usage +[source,bash,options="nowrap"] +---- + # copy a new bootstrap kubeconfig file to node-0 + oc adm copy-to-node --copy=new-bootstrap-kubeconfig=/etc/kubernetes/kubeconfig node/node-0 +---- + + == oc adm cordon Mark node as unschedulable @@ -151,7 +159,7 @@ Drain node in preparation for maintenance ---- # Drain node "foo", even if there are pods not managed by a replication controller, replica set, job, daemon set, or stateful set on it oc adm drain foo --force - + # As above, but abort if there are pods not managed by a replication controller, replica set, job, daemon set, or stateful set, and use a grace period of 15 minutes oc adm drain foo --grace-period=900 ---- @@ -178,10 +186,10 @@ Create a new group ---- # Add a group with no users oc adm groups new my-group - + # Add a group with two users oc adm groups new my-group user1 user2 - + # Add a group with one user and shorter output oc adm groups new my-group user1 -o name ---- @@ -196,13 +204,13 @@ Remove old OpenShift groups referencing missing records from an external provide ---- # Prune all orphaned groups oc adm groups prune --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Prune all orphaned groups except the ones from the denylist file oc adm groups prune --blacklist=/path/to/denylist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Prune all orphaned groups from a list of specific groups specified in an allowlist file oc adm groups prune --whitelist=/path/to/allowlist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Prune all orphaned groups from a list of specific groups specified in a list oc adm groups prune groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm ---- @@ -229,16 +237,16 @@ Sync OpenShift groups with records from an external provider ---- # Sync all groups with an LDAP server oc adm groups sync --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Sync all groups except the ones from the blacklist file with an LDAP server oc adm groups sync --blacklist=/path/to/blacklist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Sync specific groups specified in an allowlist file with an LDAP server oc adm groups sync --whitelist=/path/to/allowlist.txt --sync-config=/path/to/sync-config.yaml --confirm - + # Sync all OpenShift groups that have been synced previously with an LDAP server oc adm groups sync --type=openshift --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Sync specific OpenShift groups if they have been synced previously with an LDAP server oc adm groups sync groups/group1 groups/group2 groups/group3 --sync-config=/path/to/sync-config.yaml --confirm ---- @@ -253,13 +261,13 @@ Collect debugging data for a given resource ---- # Collect debugging data for the "openshift-apiserver" clusteroperator oc adm inspect clusteroperator/openshift-apiserver - + # Collect debugging data for the "openshift-apiserver" and "kube-apiserver" clusteroperators oc adm inspect clusteroperator/openshift-apiserver clusteroperator/kube-apiserver - + # Collect debugging data for all clusteroperators oc adm inspect clusteroperator - + # Collect debugging data for all clusteroperators and clusterversions oc adm inspect clusteroperators,clusterversions ---- @@ -286,7 +294,7 @@ Update template instances to point to the latest group-version-kinds ---- # Perform a dry-run of updating all objects oc adm migrate template-instances - + # To actually perform the update, the confirm flag must be appended oc adm migrate template-instances --confirm ---- @@ -301,19 +309,19 @@ Launch a new instance of a pod for gathering debug information ---- # Gather information using the default plug-in image and command, writing into ./must-gather.local. oc adm must-gather - + # Gather information with a specific local folder to copy to oc adm must-gather --dest-dir=/local/directory - + # Gather audit information oc adm must-gather -- /usr/bin/gather_audit_logs - + # Gather information using multiple plug-in images oc adm must-gather --image=quay.io/kubevirt/must-gather --image=quay.io/openshift/origin-must-gather - + # Gather information using a specific image stream plug-in oc adm must-gather --image-stream=openshift/must-gather:latest - + # Gather information using a specific image, command, and pod directory oc adm must-gather --image=my/image:tag --source-dir=/pod/directory -- myspecial-command.sh ---- @@ -340,10 +348,10 @@ Display and filter node logs ---- # Show kubelet logs from all masters oc adm node-logs --role master -u kubelet - + # See what logs are available in masters in /var/log oc adm node-logs --role master --path=/ - + # Display cron log file from all masters oc adm node-logs --role master --path=cron ---- @@ -365,16 +373,39 @@ Watch platform certificates. == oc adm ocp-certificates regenerate-leaf Regenerate client and serving certificates of an OpenShift cluster +.Example usage +[source,bash,options="nowrap"] +---- + # Regenerate a leaf certificate contained in a particular secret. + oc adm ocp-certificates regenerate-leaf -n openshift-config-managed secret/kube-controller-manager-client-cert-key +---- + == oc adm ocp-certificates regenerate-machine-config-server-serving-cert Regenerate the machine config operator certificates in an OpenShift cluster +.Example usage +[source,bash,options="nowrap"] +---- + # Regenerate the MCO certs without modifying user-data secrets + oc adm ocp-certificates regenerate-machine-config-server-serving-cert --update-ignition=false + + # Update the user-data secrets to use new MCS certs + oc adm ocp-certificates update-ignition-ca-bundle-for-machine-config-server +---- + == oc adm ocp-certificates regenerate-top-level Regenerate the top level certificates in an OpenShift cluster +.Example usage +[source,bash,options="nowrap"] +---- + # Regenerate the signing certificate contained in a particular secret. + oc adm ocp-certificates regenerate-top-level -n openshift-kube-apiserver-operator secret/loadbalancer-serving-signer-key +---- @@ -384,6 +415,9 @@ Remove old CAs from ConfigMaps representing platform trust bundles in an OpenShi .Example usage [source,bash,options="nowrap"] ---- + # Remove a trust bundled contained in a particular config map + oc adm ocp-certificates remove-old-trust -n openshift-config-managed configmaps/kube-apiserver-aggregator-client-ca --created-before 2023-06-05T14:44:06Z + # Remove only CA certificates created before a certain date from all trust bundles oc adm ocp-certificates remove-old-trust configmaps -A --all --created-before 2023-06-05T14:44:06Z ---- @@ -397,10 +431,10 @@ Update user-data secrets in an OpenShift cluster to use updated MCO certfs [source,bash,options="nowrap"] ---- # Regenerate the MCO certs without modifying user-data secrets - oc adm certificates regenerate-machine-config-server-serving-cert --update-ignition=false - + oc adm ocp-certificates regenerate-machine-config-server-serving-cert --update-ignition=false + # Update the user-data secrets to use new MCS certs - oc adm certificates update-ignition-ca-bundle-for-machine-config-server + oc adm ocp-certificates update-ignition-ca-bundle-for-machine-config-server ---- @@ -413,7 +447,7 @@ Isolate project network ---- # Provide isolation for project p1 oc adm pod-network isolate-projects - + # Allow all projects with label name=top-secret to have their own isolated project network oc adm pod-network isolate-projects --selector='name=top-secret' ---- @@ -428,7 +462,7 @@ Join project network ---- # Allow project p2 to use project p1 network oc adm pod-network join-projects --to= - + # Allow all projects with label name=top-secret to use project p1 network oc adm pod-network join-projects --to= --selector='name=top-secret' ---- @@ -443,7 +477,7 @@ Make project network global ---- # Allow project p1 to access all pods in the cluster and vice versa oc adm pod-network make-projects-global - + # Allow all projects with label name=share to access all pods in the cluster and vice versa oc adm pod-network make-projects-global --selector='name=share' ---- @@ -458,7 +492,7 @@ Add a role to users or service accounts for the current project ---- # Add the 'view' role to user1 for the current project oc adm policy add-role-to-user view user1 - + # Add the 'edit' role to serviceaccount1 for the current project oc adm policy add-role-to-user edit -z serviceaccount1 ---- @@ -485,7 +519,7 @@ Add a security context constraint to users or a service account ---- # Add the 'restricted' security context constraint to user1 and user2 oc adm policy add-scc-to-user restricted user1 user2 - + # Add the 'privileged' security context constraint to serviceaccount1 in the current namespace oc adm policy add-scc-to-user privileged -z serviceaccount1 ---- @@ -501,13 +535,13 @@ Check which service account can create a pod # Check whether service accounts sa1 and sa2 can admit a pod with a template pod spec specified in my_resource.yaml # Service Account specified in myresource.yaml file is ignored oc adm policy scc-review -z sa1,sa2 -f my_resource.yaml - + # Check whether service accounts system:serviceaccount:bob:default can admit a pod with a template pod spec specified in my_resource.yaml oc adm policy scc-review -z system:serviceaccount:bob:default -f my_resource.yaml - + # Check whether the service account specified in my_resource_with_sa.yaml can admit the pod oc adm policy scc-review -f my_resource_with_sa.yaml - + # Check whether the default service account can admit the pod; default is taken since no service account is defined in myresource_with_no_sa.yaml oc adm policy scc-review -f myresource_with_no_sa.yaml ---- @@ -522,10 +556,10 @@ Check whether a user or a service account can create a pod ---- # Check whether user bob can create a pod specified in myresource.yaml oc adm policy scc-subject-review -u bob -f myresource.yaml - + # Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml oc adm policy scc-subject-review -u bob -g projectAdmin -f myresource.yaml - + # Check whether a service account specified in the pod template spec in myresourcewithsa.yaml can create the pod oc adm policy scc-subject-review -f myresourcewithsa.yaml ---- @@ -541,7 +575,7 @@ Remove old completed and failed builds # Dry run deleting older completed and failed builds and also including # all builds whose associated build config no longer exists oc adm prune builds --orphans - + # To actually perform the prune operation, the confirm flag must be appended oc adm prune builds --orphans --confirm ---- @@ -556,7 +590,7 @@ Remove old completed and failed deployment configs ---- # Dry run deleting all but the last complete deployment for every deployment config oc adm prune deployments --keep-complete=1 - + # To actually perform the prune operation, the confirm flag must be appended oc adm prune deployments --keep-complete=1 --confirm ---- @@ -571,13 +605,13 @@ Remove old OpenShift groups referencing missing records from an external provide ---- # Prune all orphaned groups oc adm prune groups --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Prune all orphaned groups except the ones from the denylist file oc adm prune groups --blacklist=/path/to/denylist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Prune all orphaned groups from a list of specific groups specified in an allowlist file oc adm prune groups --whitelist=/path/to/allowlist.txt --sync-config=/path/to/ldap-sync-config.yaml --confirm - + # Prune all orphaned groups from a list of specific groups specified in a list oc adm prune groups groups/group_name groups/other_name --sync-config=/path/to/ldap-sync-config.yaml --confirm ---- @@ -593,26 +627,65 @@ Remove unreferenced images # See what the prune command would delete if only images and their referrers were more than an hour old # and obsoleted by 3 newer revisions under the same tag were considered oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m - + # To actually perform the prune operation, the confirm flag must be appended oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m --confirm - + # See what the prune command would delete if we are interested in removing images # exceeding currently set limit ranges ('openshift.io/Image') oc adm prune images --prune-over-size-limit - + # To actually perform the prune operation, the confirm flag must be appended oc adm prune images --prune-over-size-limit --confirm - + # Force the insecure HTTP protocol with the particular registry host name oc adm prune images --registry-url=http://registry.example.org --confirm - + # Force a secure connection with a custom certificate authority to the particular registry host name oc adm prune images --registry-url=registry.example.org --certificate-authority=/path/to/custom/ca.crt --confirm ---- +== oc adm prune renderedmachineconfigs +Prunes rendered MachineConfigs in an OpenShift cluster + +.Example usage +[source,bash,options="nowrap"] +---- + # See what the prune command would delete if run with no options + oc adm prune renderedmachineconfigs + + # To actually perform the prune operation, the confirm flag must be appended + oc adm prune renderedmachineconfigs --confirm + + # See what the prune command would delete if run on the worker MachineConfigPool + oc adm prune renderedmachineconfigs --pool-name=worker + + # Prunes 10 oldest rendered MachineConfigs in the cluster + oc adm prune renderedmachineconfigs --count=10 --confirm + + # Prunes 10 oldest rendered MachineConfigs in the cluster for the worker MachineConfigPool + oc adm prune renderedmachineconfigs --count=10 --pool-name=worker --confirm +---- + + + +== oc adm prune renderedmachineconfigs list +Lists rendered MachineConfigs in an OpenShift cluster + +.Example usage +[source,bash,options="nowrap"] +---- + # List all rendered MachineConfigs for the worker MachineConfigPool in the cluster + oc adm prune renderedmachineconfigs list --pool-name=worker + + # List all rendered MachineConfigs in use by the cluster's MachineConfigPools + oc adm prune renderedmachineconfigs list --in-use +---- + + + == oc adm reboot-machine-config-pool Initiate reboot of the specified MachineConfigPool. @@ -621,10 +694,10 @@ Initiate reboot of the specified MachineConfigPool. ---- # Reboot all MachineConfigPools oc adm reboot-machine-config-pool mcp/worker mcp/master - + # Reboot all MachineConfigPools that inherit from worker. This include all custom MachineConfigPools and infra. oc adm reboot-machine-config-pool mcp/worker - + # Reboot masters oc adm reboot-machine-config-pool mcp/master ---- @@ -634,17 +707,15 @@ Initiate reboot of the specified MachineConfigPool. == oc adm release extract Extract the contents of an update payload to disk -include::snippets/osd-aws-example-only.adoc[] - .Example usage [source,bash,options="nowrap"] ---- # Use git to check out the source code for the current cluster release to DIR oc adm release extract --git=DIR - + # Extract cloud credential requests for AWS oc adm release extract --credentials-requests --cloud=aws - + # Use git to check out the source code for the current cluster release to DIR from linux/s390x image # Note: Wildcard filter is not supported; pass a single os/arch to extract oc adm release extract --git=DIR quay.io/openshift-release-dev/ocp-release:4.11.2 --filter-by-os=linux/s390x @@ -660,16 +731,16 @@ Display information about a release ---- # Show information about the cluster's current release oc adm release info - + # Show the source code that comprises a release oc adm release info 4.11.2 --commit-urls - + # Show the source code difference between two releases oc adm release info 4.11.0 4.11.2 --commits - + # Show where the images referenced by the release are located oc adm release info quay.io/openshift-release-dev/ocp-release:4.11.2 --pullspecs - + # Show information about linux/s390x image # Note: Wildcard filter is not supported; pass a single os/arch to extract oc adm release info quay.io/openshift-release-dev/ocp-release:4.11.2 --filter-by-os=linux/s390x @@ -686,18 +757,18 @@ Mirror a release to a different image registry location # Perform a dry run showing what would be mirrored, including the mirror objects oc adm release mirror 4.11.0 --to myregistry.local/openshift/release \ --release-image-signature-to-dir /tmp/releases --dry-run - + # Mirror a release into the current directory oc adm release mirror 4.11.0 --to file://openshift/release \ --release-image-signature-to-dir /tmp/releases - + # Mirror a release to another directory in the default location oc adm release mirror 4.11.0 --to-dir /tmp/releases - + # Upload a release from the current directory to another server oc adm release mirror --from file://openshift/release --to myregistry.com/openshift/release \ --release-image-signature-to-dir /tmp/releases - + # Mirror the 4.11.0 release to repository registry.example.com and apply signatures to connected cluster oc adm release mirror --from=quay.io/openshift-release-dev/ocp-release:4.11.0-x86_64 \ --to=registry.example.com/your/repository --apply-release-image-signature @@ -713,15 +784,15 @@ Create a new OpenShift release ---- # Create a release from the latest origin images and push to a DockerHub repository oc adm release new --from-image-stream=4.11 -n origin --to-image docker.io/mycompany/myrepo:latest - + # Create a new release with updated metadata from a previous release oc adm release new --from-release registry.ci.openshift.org/origin/release:v4.11 --name 4.11.1 \ --previous 4.11.0 --metadata ... --to-image docker.io/mycompany/myrepo:latest - + # Create a new release and override a single image oc adm release new --from-release registry.ci.openshift.org/origin/release:v4.11 \ cli=docker.io/mycompany/cli:latest --to-image docker.io/mycompany/myrepo:latest - + # Run a verification pass to ensure the release can be reproduced oc adm release new --from-release registry.ci.openshift.org/origin/release:v4.11 ---- @@ -736,13 +807,13 @@ Restarts kubelet on the specified nodes ---- # Restart all the nodes, 10% at a time oc adm restart-kubelet nodes --all --directive=RemoveKubeletKubeconfig - + # Restart all the nodes, 20 nodes at a time oc adm restart-kubelet nodes --all --parallelism=20 --directive=RemoveKubeletKubeconfig - + # Restart all the nodes, 15% at a time oc adm restart-kubelet nodes --all --parallelism=15% --directive=RemoveKubeletKubeconfig - + # Restart all the masters at the same time oc adm restart-kubelet nodes -l node-role.kubernetes.io/master --parallelism=100% --directive=RemoveKubeletKubeconfig ---- @@ -758,16 +829,16 @@ Update the taints on one or more nodes # Update node 'foo' with a taint with key 'dedicated' and value 'special-user' and effect 'NoSchedule' # If a taint with that key and effect already exists, its value is replaced as specified oc adm taint nodes foo dedicated=special-user:NoSchedule - + # Remove from node 'foo' the taint with key 'dedicated' and effect 'NoSchedule' if one exists oc adm taint nodes foo dedicated:NoSchedule- - + # Remove from node 'foo' all the taints with key 'dedicated' oc adm taint nodes foo dedicated- - + # Add a taint with key 'dedicated' on nodes having label myLabel=X oc adm taint node -l myLabel=X dedicated=foo:PreferNoSchedule - + # Add to node 'foo' a taint with key 'bar' and no value oc adm taint nodes foo bar:NoSchedule ---- @@ -806,7 +877,7 @@ Display resource (CPU/memory) usage of nodes ---- # Show metrics for all nodes oc adm top node - + # Show metrics for a given node oc adm top node NODE_NAME ---- @@ -821,13 +892,13 @@ Display resource (CPU/memory) usage of pods ---- # Show metrics for all pods in the default namespace oc adm top pod - + # Show metrics for all pods in the given namespace oc adm top pod --namespace=NAMESPACE - + # Show metrics for a given pod and its containers oc adm top pod POD_NAME --containers - + # Show metrics for the pods defined by label name=myLabel oc adm top pod -l name=myLabel ---- @@ -854,7 +925,7 @@ Upgrade a cluster or adjust the upgrade channel ---- # View the update status and available cluster updates oc adm upgrade - + # Update to the latest version oc adm upgrade --to-latest=true ---- @@ -870,16 +941,16 @@ Verify the image identity contained in the image signature # Verify the image signature and identity using the local GPG keychain oc adm verify-image-signature sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ --expected-identity=registry.local:5000/foo/bar:v1 - + # Verify the image signature and identity using the local GPG keychain and save the status oc adm verify-image-signature sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ --expected-identity=registry.local:5000/foo/bar:v1 --save - + # Verify the image signature and identity via exposed registry route oc adm verify-image-signature sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 \ --expected-identity=registry.local:5000/foo/bar:v1 \ --registry-url=docker-registry.foo.com - + # Remove all signature verifications from the image oc adm verify-image-signature sha256:c841e9b64e4579bd56c794bdd7c36e1c257110fd2404bebbb8b613e4935228c4 --remove-all ---- @@ -894,10 +965,10 @@ Wait for nodes to reboot after running `oc adm reboot-machine-config-pool` ---- # Wait for all nodes to complete a requested reboot from 'oc adm reboot-machine-config-pool mcp/worker mcp/master' oc adm wait-for-node-reboot nodes --all - + # Wait for masters to complete a requested reboot from 'oc adm reboot-machine-config-pool mcp/master' oc adm wait-for-node-reboot nodes -l node-role.kubernetes.io/master - + # Wait for masters to complete a specific reboot oc adm wait-for-node-reboot nodes -l node-role.kubernetes.io/master --reboot-number=4 ---- @@ -912,7 +983,9 @@ wait for the platform operators to become stable ---- # Wait for all clusteroperators to become stable oc adm wait-for-stable-cluster - + # Consider operators to be stable if they report as such for 5 minutes straight oc adm wait-for-stable-cluster --minimum-stable-period 5m ---- + + diff --git a/modules/oc-by-example-content.adoc b/modules/oc-by-example-content.adoc index 8a7107d751..2b046138e8 100644 --- a/modules/oc-by-example-content.adoc +++ b/modules/oc-by-example-content.adoc @@ -17,19 +17,19 @@ Update the annotations on a resource # Update pod 'foo' with the annotation 'description' and the value 'my frontend' # If the same annotation is set multiple times, only the last value will be applied oc annotate pods foo description='my frontend' - + # Update a pod identified by type and name in "pod.json" oc annotate -f pod.json description='my frontend' - + # Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value oc annotate --overwrite pods foo description='my frontend running nginx' - + # Update all pods in the namespace oc annotate pods --all description='my frontend running nginx' - + # Update pod 'foo' only if the resource is unchanged from version 1 oc annotate pods foo description='my frontend running nginx' --resource-version=1 - + # Update pod 'foo' by removing an annotation named 'description' if it exists # Does not require the --overwrite flag oc annotate pods foo description- @@ -45,19 +45,19 @@ Print the supported API resources on the server ---- # Print the supported API resources oc api-resources - + # Print the supported API resources with more information oc api-resources -o wide - + # Print the supported API resources sorted by a column oc api-resources --sort-by=name - + # Print the supported namespaced resources oc api-resources --namespaced=true - + # Print the supported non-namespaced resources oc api-resources --namespaced=false - + # Print the supported API resources with a specific APIGroup oc api-resources --api-group=rbac.authorization.k8s.io ---- @@ -84,20 +84,20 @@ Apply a configuration to a resource by file name or stdin ---- # Apply the configuration in pod.json to a pod oc apply -f ./pod.json - + # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml oc apply -k dir/ - + # Apply the JSON passed into stdin to a pod cat pod.json | oc apply -f - - + # Apply the configuration from all files that end with '.json' oc apply -f '*.json' - + # Note: --prune is still in Alpha # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx oc apply --prune -f manifest.yaml -l app=nginx - + # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file oc apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/ConfigMap ---- @@ -112,7 +112,7 @@ Edit latest last-applied-configuration annotations of a resource/object ---- # Edit the last-applied-configuration annotations by type/name in YAML oc apply edit-last-applied deployment/nginx - + # Edit the last-applied-configuration annotations by file in JSON oc apply edit-last-applied -f deploy.yaml -o json ---- @@ -127,10 +127,10 @@ Set the last-applied-configuration annotation on a live object to match the cont ---- # Set the last-applied-configuration of a resource to match the contents of a file oc apply set-last-applied -f deploy.yaml - + # Execute set-last-applied against each configuration file in a directory oc apply set-last-applied -f path/ - + # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist oc apply set-last-applied -f deploy.yaml --create-annotation=true ---- @@ -145,7 +145,7 @@ View the latest last-applied-configuration annotations of a resource/object ---- # View the last-applied-configuration annotations by type/name in YAML oc apply view-last-applied deployment/nginx - + # View the last-applied-configuration annotations by file in JSON oc apply view-last-applied -f deploy.yaml -o json ---- @@ -161,14 +161,14 @@ Attach to a running container # Get output from running pod mypod; use the 'oc.kubernetes.io/default-container' annotation # for selecting the container to be attached or the first container in the pod will be chosen oc attach mypod - + # Get output from ruby-container from pod mypod oc attach mypod -c ruby-container - + # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod # and sends stdout/stderr from 'bash' back to the client oc attach mypod -c ruby-container -i -t - + # Get output from the first pod of a replica set named nginx oc attach rs/nginx ---- @@ -183,27 +183,27 @@ Check whether an action is allowed ---- # Check to see if I can create pods in any namespace oc auth can-i create pods --all-namespaces - + # Check to see if I can list deployments in my current namespace oc auth can-i list deployments.apps - + # Check to see if service account "foo" of namespace "dev" can list pods # in the namespace "prod". # You must be allowed to use impersonation for the global option "--as". oc auth can-i list pods --as=system:serviceaccount:dev:foo -n prod - + # Check to see if I can do everything in my current namespace ("*" means all) oc auth can-i '*' '*' - + # Check to see if I can get the job named "bar" in namespace "foo" oc auth can-i list jobs.batch/bar -n foo - + # Check to see if I can read pod logs oc auth can-i get pods --subresource=log - + # Check to see if I can access the URL /logs/ oc auth can-i get /logs/ - + # List all allowed actions in namespace "foo" oc auth can-i --list --namespace=foo ---- @@ -230,7 +230,7 @@ Experimental: Check self subject attributes ---- # Get your subject attributes. oc auth whoami - + # Get your subject attributes in JSON format. oc auth whoami -o json ---- @@ -245,7 +245,7 @@ Autoscale a deployment config, deployment, replica set, stateful set, or replica ---- # Auto scale a deployment "foo", with the number of pods between 2 and 10, no target CPU utilization specified so a default autoscaling policy will be used oc autoscale deployment foo --min=2 --max=10 - + # Auto scale a replication controller "foo", with the number of pods between 1 and 5, target CPU utilization at 80% oc autoscale rc foo --max=5 --cpu-percent=80 ---- @@ -260,16 +260,16 @@ Cancel running, pending, or new builds ---- # Cancel the build with the given name oc cancel-build ruby-build-2 - + # Cancel the named build and print the build logs oc cancel-build ruby-build-2 --dump-logs - + # Cancel the named build and create a new one with the same parameters oc cancel-build ruby-build-2 --restart - + # Cancel multiple builds oc cancel-build ruby-build-1 ruby-build-2 ruby-build-3 - + # Cancel all builds created from the 'ruby-build' build config that are in the 'new' state oc cancel-build bc/ruby-build --state=new ---- @@ -296,13 +296,13 @@ Dump relevant information for debugging and diagnosis ---- # Dump current cluster state to stdout oc cluster-info dump - + # Dump current cluster state to /path/to/cluster-state oc cluster-info dump --output-directory=/path/to/cluster-state - + # Dump all namespaces to stdout oc cluster-info dump --all-namespaces - + # Dump a set of namespaces to /path/to/cluster-state oc cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state ---- @@ -323,8 +323,8 @@ Output shell completion code for the specified shell (bash, zsh, fish, or powers ## If oc is installed via homebrew, this should start working immediately ## If you've installed via other means, you may need add the completion to your completion directory oc completion bash > $(brew --prefix)/etc/bash_completion.d/oc - - + + # Installing bash completion on Linux ## If bash-completion is not installed on Linux, install the 'bash-completion' package ## via your distribution's package manager. @@ -337,18 +337,18 @@ Output shell completion code for the specified shell (bash, zsh, fish, or powers source '$HOME/.kube/completion.bash.inc' " >> $HOME/.bash_profile source $HOME/.bash_profile - + # Load the oc completion code for zsh[1] into the current shell source <(oc completion zsh) # Set the oc completion code for zsh[1] to autoload on startup oc completion zsh > "${fpath[1]}/_oc" - - + + # Load the oc completion code for fish[2] into the current shell oc completion fish | source # To load completions for each session, execute once: oc completion fish > ~/.config/fish/completions/oc.fish - + # Load the oc completion code for powershell into the current shell oc completion powershell | Out-String | Invoke-Expression # Set oc completion code for powershell to run on startup @@ -433,7 +433,7 @@ Describe one or many contexts ---- # List all the contexts in your kubeconfig file oc config get-contexts - + # Describe one context in your kubeconfig file oc config get-contexts my-context ---- @@ -484,10 +484,10 @@ Update the OpenShift CA bundle by contacting the apiserver. ---- # Refresh the CA bundle for the current context's cluster oc config refresh-ca-bundle - + # Refresh the CA bundle for the cluster named e2e in your kubeconfig oc config refresh-ca-bundle e2e - + # Print the CA bundle from the current OpenShift cluster's apiserver. oc config refresh-ca-bundle --dry-run ---- @@ -514,13 +514,13 @@ Set an individual value in a kubeconfig file ---- # Set the server field on the my-cluster cluster to https://1.2.3.4 oc config set clusters.my-cluster.server https://1.2.3.4 - + # Set the certificate-authority-data field on the my-cluster cluster oc config set clusters.my-cluster.certificate-authority-data $(echo "cert_data_here" | base64 -i -) - + # Set the cluster field in the my-context context to my-cluster oc config set contexts.my-context.cluster my-cluster - + # Set the client-key-data field in the cluster-admin user using --set-raw-bytes option oc config set users.cluster-admin.client-key-data cert_data_here --set-raw-bytes=true ---- @@ -535,16 +535,16 @@ Set a cluster entry in kubeconfig ---- # Set only the server field on the e2e cluster entry without touching other values oc config set-cluster e2e --server=https://1.2.3.4 - + # Embed certificate authority data for the e2e cluster entry oc config set-cluster e2e --embed-certs --certificate-authority=~/.kube/e2e/kubernetes.ca.crt - + # Disable cert checking for the e2e cluster entry oc config set-cluster e2e --insecure-skip-tls-verify=true - + # Set the custom TLS server name to use for validation for the e2e cluster entry oc config set-cluster e2e --tls-server-name=my-cluster-name - + # Set the proxy URL for the e2e cluster entry oc config set-cluster e2e --proxy-url=https://1.2.3.4 ---- @@ -572,31 +572,31 @@ Set a user entry in kubeconfig # Set only the "client-key" field on the "cluster-admin" # entry, without touching other values oc config set-credentials cluster-admin --client-key=~/.kube/admin.key - + # Set basic auth for the "cluster-admin" entry oc config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif - + # Embed client certificate data in the "cluster-admin" entry oc config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true - + # Enable the Google Compute Platform auth provider for the "cluster-admin" entry oc config set-credentials cluster-admin --auth-provider=gcp - + # Enable the OpenID Connect auth provider for the "cluster-admin" entry with additional arguments oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-id=foo --auth-provider-arg=client-secret=bar - + # Remove the "client-secret" config value for the OpenID Connect auth provider for the "cluster-admin" entry oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-secret- - + # Enable new exec auth plugin for the "cluster-admin" entry oc config set-credentials cluster-admin --exec-command=/path/to/the/executable --exec-api-version=client.authentication.k8s.io/v1beta1 - + # Define new exec auth plugin arguments for the "cluster-admin" entry oc config set-credentials cluster-admin --exec-arg=arg1 --exec-arg=arg2 - + # Create or update exec auth plugin environment variables for the "cluster-admin" entry oc config set-credentials cluster-admin --exec-env=key1=val1 --exec-env=key2=val2 - + # Remove exec auth plugin environment variables for the "cluster-admin" entry oc config set-credentials cluster-admin --exec-env=var-to-remove- ---- @@ -611,7 +611,7 @@ Unset an individual value in a kubeconfig file ---- # Unset the current-context oc config unset current-context - + # Unset namespace in foo context oc config unset contexts.foo.namespace ---- @@ -638,10 +638,10 @@ Display merged kubeconfig settings or a specified kubeconfig file ---- # Show merged kubeconfig settings oc config view - + # Show merged kubeconfig settings, raw certificate data, and exposed secrets oc config view --raw - + # Get the password for the e2e user oc config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' ---- @@ -660,22 +660,22 @@ Copy files and directories to and from containers # # For advanced use cases, such as symlinks, wildcard expansion or # file mode preservation, consider using 'oc exec'. - + # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace tar cf - /tmp/foo | oc exec -i -n -- tar xf - -C /tmp/bar - + # Copy /tmp/foo from a remote pod to /tmp/bar locally oc exec -n -- tar cf - /tmp/foo | tar xf - -C /tmp/bar - + # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace oc cp /tmp/foo_dir :/tmp/bar_dir - + # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container oc cp /tmp/foo :/tmp/bar -c - + # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace oc cp /tmp/foo /:/tmp/bar - + # Copy /tmp/foo from a remote pod to /tmp/bar locally oc cp /:/tmp/foo /tmp/bar ---- @@ -690,10 +690,10 @@ Create a resource from a file or from stdin ---- # Create a pod using the data in pod.json oc create -f ./pod.json - + # Create a pod based on the JSON passed into stdin cat pod.json | oc create -f - - + # Edit the data in registry.yaml in JSON then create the resource using the edited data oc create -f registry.yaml --edit -o json ---- @@ -732,19 +732,19 @@ Create a cluster role ---- # Create a cluster role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods oc create clusterrole pod-reader --verb=get,list,watch --resource=pods - + # Create a cluster role named "pod-reader" with ResourceName specified oc create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - + # Create a cluster role named "foo" with API Group specified oc create clusterrole foo --verb=get,list,watch --resource=rs.apps - + # Create a cluster role named "foo" with SubResource specified oc create clusterrole foo --verb=get,list,watch --resource=pods,pods/status - + # Create a cluster role name "foo" with NonResourceURL specified oc create clusterrole "foo" --verb=get --non-resource-url=/logs/* - + # Create a cluster role name "monitoring" with AggregationRule specified oc create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true" ---- @@ -771,16 +771,16 @@ Create a config map from a local file, directory or literal value ---- # Create a new config map named my-config based on folder bar oc create configmap my-config --from-file=path/to/bar - + # Create a new config map named my-config with specified keys instead of file basenames on disk oc create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt - + # Create a new config map named my-config with key1=config1 and key2=config2 oc create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2 - + # Create a new config map named my-config from the key=value pairs in the file oc create configmap my-config --from-file=path/to/bar - + # Create a new config map named my-config from an env file oc create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ---- @@ -795,7 +795,7 @@ Create a cron job with the specified name ---- # Create a cron job oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" - + # Create a cron job with a command oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" -- date ---- @@ -810,13 +810,13 @@ Create a deployment with the specified name ---- # Create a deployment named my-dep that runs the busybox image oc create deployment my-dep --image=busybox - + # Create a deployment with a command oc create deployment my-dep --image=busybox -- date - + # Create a deployment named my-dep that runs the nginx image with 3 replicas oc create deployment my-dep --image=nginx --replicas=3 - + # Create a deployment named my-dep that runs the busybox image and expose port 5701 oc create deployment my-dep --image=busybox --port=5701 ---- @@ -880,34 +880,34 @@ Create an ingress with the specified name # Create a single ingress called 'simple' that directs requests to foo.com/bar to svc # svc1:8080 with a TLS secret "my-cert" oc create ingress simple --rule="foo.com/bar=svc1:8080,tls=my-cert" - + # Create a catch all ingress of "/path" pointing to service svc:port and Ingress Class as "otheringress" oc create ingress catch-all --class=otheringress --rule="/path=svc:port" - + # Create an ingress with two annotations: ingress.annotation1 and ingress.annotations2 oc create ingress annotated --class=default --rule="foo.com/bar=svc:port" \ --annotation ingress.annotation1=foo \ --annotation ingress.annotation2=bla - + # Create an ingress with the same host and multiple paths oc create ingress multipath --class=default \ --rule="foo.com/=svc:port" \ --rule="foo.com/admin/=svcadmin:portadmin" - + # Create an ingress with multiple hosts and the pathType as Prefix oc create ingress ingress1 --class=default \ --rule="foo.com/path*=svc:8080" \ --rule="bar.com/admin*=svc2:http" - + # Create an ingress with TLS enabled using the default ingress certificate and different path types oc create ingress ingtls --class=default \ --rule="foo.com/=svc:https,tls" \ --rule="foo.com/path/subpath*=othersvc:8080" - + # Create an ingress with TLS enabled using a specific secret and pathType as Prefix oc create ingress ingsecret --class=default \ --rule="foo.com/*=svc:8080,tls=secret1" - + # Create an ingress with a default backend oc create ingress ingdefault --class=default \ --default-backend=defaultsvc:http \ @@ -924,10 +924,10 @@ Create a job with the specified name ---- # Create a job oc create job my-job --image=busybox - + # Create a job with a command oc create job my-job --image=busybox -- date - + # Create a job from a cron job named "a-cronjob" oc create job test-job --from=cronjob/a-cronjob ---- @@ -955,7 +955,7 @@ Create a pod disruption budget with the specified name # Create a pod disruption budget named my-pdb that will select all pods with the app=rails label # and require at least one of them being available at any point in time oc create poddisruptionbudget my-pdb --selector=app=rails --min-available=1 - + # Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label # and require at least half of the pods selected to be available at any point in time oc create pdb my-pdb --selector=app=nginx --min-available=50% @@ -971,10 +971,10 @@ Create a priority class with the specified name ---- # Create a priority class named high-priority oc create priorityclass high-priority --value=1000 --description="high priority" - + # Create a priority class named default-priority that is considered as the global default priority oc create priorityclass default-priority --value=1000 --global-default=true --description="default priority" - + # Create a priority class named high-priority that cannot preempt pods with lower priority oc create priorityclass high-priority --value=1000 --description="high priority" --preemption-policy="Never" ---- @@ -989,7 +989,7 @@ Create a quota with the specified name ---- # Create a new resource quota named my-quota oc create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10 - + # Create a new resource quota named best-effort oc create quota best-effort --hard=pods=100 --scopes=BestEffort ---- @@ -1004,13 +1004,13 @@ Create a role with single rule ---- # Create a role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods oc create role pod-reader --verb=get --verb=list --verb=watch --resource=pods - + # Create a role named "pod-reader" with ResourceName specified oc create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - + # Create a role named "foo" with API Group specified oc create role foo --verb=get,list,watch --resource=rs.apps - + # Create a role named "foo" with SubResource specified oc create role foo --verb=get,list,watch --resource=pods,pods/status ---- @@ -1025,7 +1025,7 @@ Create a role binding for a particular role or cluster role ---- # Create a role binding for user1, user2, and group1 using the admin cluster role oc create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1 - + # Create a role binding for serviceaccount monitoring:sa-dev using the admin role oc create rolebinding admin-binding --role=admin --serviceaccount=monitoring:sa-dev ---- @@ -1040,7 +1040,7 @@ Create a route that uses edge TLS termination ---- # Create an edge route named "my-route" that exposes the frontend service oc create route edge my-route --service=frontend - + # Create an edge route that exposes the frontend service and specify a path # If the route name is omitted, the service name will be used oc create route edge --service=frontend --path /assets @@ -1056,7 +1056,7 @@ Create a route that uses passthrough TLS termination ---- # Create a passthrough route named "my-route" that exposes the frontend service oc create route passthrough my-route --service=frontend - + # Create a passthrough route that exposes the frontend service and specify # a host name. If the route name is omitted, the service name will be used oc create route passthrough --service=frontend --hostname=www.example.com @@ -1072,7 +1072,7 @@ Create a route that uses reencrypt TLS termination ---- # Create a route named "my-route" that exposes the frontend service oc create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert - + # Create a reencrypt route that exposes the frontend service, letting the # route name default to the service name and the destination CA certificate # default to the service CA @@ -1089,7 +1089,7 @@ Create a secret for use with a Docker registry ---- # If you do not already have a .dockercfg file, create a dockercfg secret directly oc create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL - + # Create a new secret named my-secret from ~/.docker/config.json oc create secret docker-registry my-secret --from-file=.dockerconfigjson=path/to/.docker/config.json ---- @@ -1104,16 +1104,16 @@ Create a secret from a local file, directory, or literal value ---- # Create a new secret named my-secret with keys for each file in folder bar oc create secret generic my-secret --from-file=path/to/bar - + # Create a new secret named my-secret with specified keys instead of names on disk oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub - + # Create a new secret named my-secret with key1=supersecret and key2=topsecret oc create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret - + # Create a new secret named my-secret using a combination of a file and a literal oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-literal=passphrase=topsecret - + # Create a new secret named my-secret from env files oc create secret generic my-secret --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ---- @@ -1140,7 +1140,7 @@ Create a ClusterIP service ---- # Create a new ClusterIP service named my-cs oc create service clusterip my-cs --tcp=5678:8080 - + # Create a new ClusterIP service named my-cs (in headless mode) oc create service clusterip my-cs --clusterip="None" ---- @@ -1203,19 +1203,19 @@ Request a service account token ---- # Request a token to authenticate to the kube-apiserver as the service account "myapp" in the current namespace oc create token myapp - + # Request a token for a service account in a custom namespace oc create token myapp --namespace myns - + # Request a token with a custom expiration oc create token myapp --duration 10m - + # Request a token with a custom audience oc create token myapp --audience https://example.com - + # Request a token bound to an instance of a Secret object oc create token myapp --bound-object-kind Secret --bound-object-name mysecret - + # Request a token bound to an instance of a Secret object with a specific UID oc create token myapp --bound-object-kind Secret --bound-object-name mysecret --bound-object-uid 0d4691ed-659b-4935-a832-355f77ee47cc ---- @@ -1254,29 +1254,29 @@ Launch a new instance of a pod for debugging ---- # Start a shell session into a pod using the OpenShift tools image oc debug - + # Debug a currently running deployment by creating a new pod oc debug deploy/test - + # Debug a node as an administrator oc debug node/master-1 - + # Debug a Windows Node # Note: the chosen image must match the Windows Server version (2019, 2022) of the Node oc debug node/win-worker-1 --image=mcr.microsoft.com/powershell:lts-nanoserver-ltsc2022 - + # Launch a shell in a pod using the provided image stream tag oc debug istag/mysql:latest -n openshift - + # Test running a job as a non-root user oc debug job/test --as-user=1000000 - + # Debug a specific failing container by running the env command in the 'second' container oc debug daemonset/test -c second -- /bin/env - + # See the pod that would be created to debug oc debug mypod-9xbc -o yaml - + # Debug a resource but launch the debug pod in another namespace # Note: Not all resources can be debugged using --to-namespace without modification. For example, # volumes and service accounts are namespace-dependent. Add '-o yaml' to output the debug pod definition @@ -1294,28 +1294,28 @@ Delete resources by file names, stdin, resources and names, or by resources and ---- # Delete a pod using the type and name specified in pod.json oc delete -f ./pod.json - + # Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml oc delete -k dir - + # Delete resources from all files that end with '.json' oc delete -f '*.json' - + # Delete a pod based on the type and name in the JSON passed into stdin cat pod.json | oc delete -f - - + # Delete pods and services with same names "baz" and "foo" oc delete pod,service baz foo - + # Delete pods and services with label name=myLabel oc delete pods,services -l name=myLabel - + # Delete a pod with minimal delay oc delete pod foo --now - + # Force delete a pod on a dead node oc delete pod foo --force - + # Delete all pods oc delete pods --all ---- @@ -1330,19 +1330,19 @@ Show details of a specific resource or group of resources ---- # Describe a node oc describe nodes kubernetes-node-emt8.c.myproject.internal - + # Describe a pod oc describe pods/nginx - + # Describe a pod identified by type and name in "pod.json" oc describe -f pod.json - + # Describe all pods oc describe pods - + # Describe pods by label name=myLabel oc describe pods -l name=myLabel - + # Describe all pods managed by the 'frontend' replication controller # (rc-created pods get the name of the rc as a prefix in the pod name) oc describe pods frontend @@ -1358,7 +1358,7 @@ Diff the live version against a would-be applied version ---- # Diff resources included in pod.json oc diff -f pod.json - + # Diff file read from stdin cat service.yaml | oc diff -f - ---- @@ -1373,16 +1373,16 @@ Edit a resource on the server ---- # Edit the service named 'registry' oc edit svc/registry - + # Use an alternative editor KUBE_EDITOR="nano" oc edit svc/registry - + # Edit the job 'myjob' in JSON using the v1 API format oc edit job.v1.batch/myjob -o json - + # Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation oc edit deployment/mydeployment -o yaml --save-config - + # Edit the 'status' subresource for the 'mydeployment' deployment oc edit deployment mydeployment --subresource='status' ---- @@ -1397,16 +1397,16 @@ List events ---- # List recent events in the default namespace oc events - + # List recent events in all namespaces oc events --all-namespaces - + # List recent events for the specified pod, then wait for more events and list them as they arrive oc events --for pod/web-pod-13je7 --watch - + # List recent events in YAML format oc events -oyaml - + # List recent only events of type 'Warning' or 'Normal' oc events --types=Warning,Normal ---- @@ -1421,24 +1421,24 @@ Execute a command in a container ---- # Get output from running the 'date' command from pod mypod, using the first container by default oc exec mypod -- date - + # Get output from running the 'date' command in ruby-container from pod mypod oc exec mypod -c ruby-container -- date - + # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod # and sends stdout/stderr from 'bash' back to the client oc exec mypod -c ruby-container -i -t -- bash -il - + # List contents of /usr from the first container of pod mypod and sort by modification time # If the command you want to execute in the pod has any flags in common (e.g. -i), # you must use two dashes (--) to separate your command's flags/arguments # Also note, do not surround your command and its flags/arguments with quotes # unless that is how you would execute it normally (i.e., do ls -t /usr, not "ls -t /usr") oc exec mypod -i -t -- ls -t /usr - + # Get output from running 'date' command from the first pod of the deployment mydeployment, using the first container by default oc exec deploy/mydeployment -- date - + # Get output from running 'date' command from the first pod of the service myservice, using the first container by default oc exec svc/myservice -- date ---- @@ -1453,16 +1453,16 @@ Get documentation for a resource ---- # Get the documentation of the resource and its fields oc explain pods - + # Get all the fields in the resource oc explain pods --recursive - + # Get the explanation for deployment in supported api versions oc explain deployments --api-version=apps/v1 - + # Get the documentation of a specific field of a resource oc explain pods.spec.containers - + # Get the documentation of resources in different format oc explain deployment --output=plaintext-openapiv2 ---- @@ -1477,20 +1477,20 @@ Expose a replicated application as a service or route ---- # Create a route based on service nginx. The new route will reuse nginx's labels oc expose service nginx - + # Create a route and specify your own label and route name oc expose service nginx -l name=myroute --name=fromdowntown - + # Create a route and specify a host name oc expose service nginx --hostname=www.example.com - + # Create a route with a wildcard oc expose service nginx --hostname=x.example.com --wildcard-policy=Subdomain # This would be equivalent to *.example.com. NOTE: only hosts are matched by the wildcard; subdomains would not be included - + # Expose a deployment configuration as a service and use the specified port oc expose dc ruby-hello-world --port=8080 - + # Expose a service as a route in the specified path oc expose service nginx --path=/nginx ---- @@ -1505,13 +1505,13 @@ Extract secrets or config maps to disk ---- # Extract the secret "test" to the current directory oc extract secret/test - + # Extract the config map "nginx" to the /tmp directory oc extract configmap/nginx --to=/tmp - + # Extract the config map "nginx" to STDOUT oc extract configmap/nginx --to=- - + # Extract only the key "nginx.conf" from config map "nginx" to the /tmp directory oc extract configmap/nginx --to=/tmp --keys=nginx.conf ---- @@ -1526,37 +1526,37 @@ Display one or many resources ---- # List all pods in ps output format oc get pods - + # List all pods in ps output format with more information (such as node name) oc get pods -o wide - + # List a single replication controller with specified NAME in ps output format oc get replicationcontroller web - + # List deployments in JSON output format, in the "v1" version of the "apps" API group oc get deployments.v1.apps -o json - + # List a single pod in JSON output format oc get -o json pod web-pod-13je7 - + # List a pod identified by type and name specified in "pod.yaml" in JSON output format oc get -f pod.yaml -o json - + # List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml oc get -k dir/ - + # Return only the phase value of the specified pod oc get -o template pod/web-pod-13je7 --template={{.status.phase}} - + # List resource information in custom columns oc get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0].name,IMAGE:.spec.containers[0].image - + # List all replication controllers and services together in ps output format oc get rc,services - + # List one or more resources by their type and names oc get rc/web service/frontend pods/web-pod-13je7 - + # List the 'status' subresource for a single pod oc get pod web-pod-13je7 --subresource status ---- @@ -1571,7 +1571,7 @@ Experimental: Get token from external OIDC issuer as credentials exec plugin ---- # Starts an auth code flow to the issuer url with the client id and the given extra scopes oc get-token --client-id=client-id --issuer-url=test.issuer.url --extra-scopes=email,profile - + # Starts an authe code flow to the issuer url with a different callback address. oc get-token --client-id=client-id --issuer-url=test.issuer.url --callback-address=127.0.0.1:8343 ---- @@ -1598,31 +1598,31 @@ Add layers to images and push them to a registry ---- # Remove the entrypoint on the mysql:latest image oc image append --from mysql:latest --to myregistry.com/myimage:latest --image '{"Entrypoint":null}' - + # Add a new layer to the image oc image append --from mysql:latest --to myregistry.com/myimage:latest layer.tar.gz - + # Add a new layer to the image and store the result on disk # This results in $(pwd)/v2/mysql/blobs,manifests oc image append --from mysql:latest --to file://mysql:local layer.tar.gz - + # Add a new layer to the image and store the result on disk in a designated directory # This will result in $(pwd)/mysql-local/v2/mysql/blobs,manifests oc image append --from mysql:latest --to file://mysql:local --dir mysql-local layer.tar.gz - + # Add a new layer to an image that is stored on disk (~/mysql-local/v2/image exists) oc image append --from-dir ~/mysql-local --to myregistry.com/myimage:latest layer.tar.gz - + # Add a new layer to an image that was mirrored to the current directory on disk ($(pwd)/v2/image exists) oc image append --from-dir v2 --to myregistry.com/myimage:latest layer.tar.gz - + # Add a new layer to a multi-architecture image for an os/arch that is different from the system's os/arch # Note: The first image in the manifest list that matches the filter will be returned when --keep-manifest-list is not specified oc image append --from docker.io/library/busybox:latest --filter-by-os=linux/s390x --to myregistry.com/myimage:latest layer.tar.gz - + # Add a new layer to a multi-architecture image for all the os/arch manifests when keep-manifest-list is specified oc image append --from docker.io/library/busybox:latest --keep-manifest-list --to myregistry.com/myimage:latest layer.tar.gz - + # Add a new layer to a multi-architecture image for all the os/arch manifests that is specified by the filter, while preserving the manifestlist oc image append --from docker.io/library/busybox:latest --filter-by-os=linux/s390x --keep-manifest-list --to myregistry.com/myimage:latest layer.tar.gz ---- @@ -1637,41 +1637,41 @@ Copy files from an image to the file system ---- # Extract the busybox image into the current directory oc image extract docker.io/library/busybox:latest - + # Extract the busybox image into a designated directory (must exist) oc image extract docker.io/library/busybox:latest --path /:/tmp/busybox - + # Extract the busybox image into the current directory for linux/s390x platform # Note: Wildcard filter is not supported with extract; pass a single os/arch to extract oc image extract docker.io/library/busybox:latest --filter-by-os=linux/s390x - + # Extract a single file from the image into the current directory oc image extract docker.io/library/centos:7 --path /bin/bash:. - + # Extract all .repo files from the image's /etc/yum.repos.d/ folder into the current directory oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:. - + # Extract all .repo files from the image's /etc/yum.repos.d/ folder into a designated directory (must exist) # This results in /tmp/yum.repos.d/*.repo on local system oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:/tmp/yum.repos.d - + # Extract an image stored on disk into the current directory ($(pwd)/v2/busybox/blobs,manifests exists) # --confirm is required because the current directory is not empty oc image extract file://busybox:local --confirm - + # Extract an image stored on disk in a directory other than $(pwd)/v2 into the current directory # --confirm is required because the current directory is not empty ($(pwd)/busybox-mirror-dir/v2/busybox exists) oc image extract file://busybox:local --dir busybox-mirror-dir --confirm - + # Extract an image stored on disk in a directory other than $(pwd)/v2 into a designated directory (must exist) oc image extract file://busybox:local --dir busybox-mirror-dir --path /:/tmp/busybox - + # Extract the last layer in the image oc image extract docker.io/library/centos:7[-1] - + # Extract the first three layers of the image oc image extract docker.io/library/centos:7[:3] - + # Extract the last three layers of the image oc image extract docker.io/library/centos:7[-3:] ---- @@ -1686,13 +1686,13 @@ Display information about an image ---- # Show information about an image oc image info quay.io/openshift/cli:latest - + # Show information about images matching a wildcard oc image info quay.io/openshift/cli:4.* - + # Show information about a file mirrored to disk under DIR oc image info --dir=DIR file://library/busybox:latest - + # Select which image from a multi-OS image to show oc image info library/busybox:latest --filter-by-os=linux/arm64 ---- @@ -1702,57 +1702,55 @@ Display information about an image == oc image mirror Mirror images from one repository to another -include::snippets/osd-aws-example-only.adoc[] - .Example usage [source,bash,options="nowrap"] ---- # Copy image to another tag oc image mirror myregistry.com/myimage:latest myregistry.com/myimage:stable - + # Copy image to another registry oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable - + # Copy all tags starting with mysql to the destination repository oc image mirror myregistry.com/myimage:mysql* docker.io/myrepository/myimage - + # Copy image to disk, creating a directory structure that can be served as a registry oc image mirror myregistry.com/myimage:latest file://myrepository/myimage:latest - + # Copy image to S3 (pull from .s3.amazonaws.com/image:latest) oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image:latest - + # Copy image to S3 without setting a tag (pull via @) oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image - + # Copy image to multiple locations oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable \ docker.io/myrepository/myimage:dev - + # Copy multiple images oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ myregistry.com/myimage:new=myregistry.com/other:target - + # Copy manifest list of a multi-architecture image, even if only a single image is found oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ --keep-manifest-list=true - + # Copy specific os/arch manifest of a multi-architecture image # Run 'oc image info myregistry.com/myimage:latest' to see available os/arch for multi-arch images # Note that with multi-arch images, this results in a new manifest list digest that includes only # the filtered manifests oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ --filter-by-os=os/arch - + # Copy all os/arch manifests of a multi-architecture image # Run 'oc image info myregistry.com/myimage:latest' to see list of os/arch manifests that will be mirrored oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ --keep-manifest-list=true - + # Note the above command is equivalent to oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ --filter-by-os=.* - + # Copy specific os/arch manifest of a multi-architecture image # Run 'oc image info myregistry.com/myimage:latest' to see available os/arch for multi-arch images # Note that the target registry may reject a manifest list if the platform specific images do not all @@ -1772,22 +1770,22 @@ Import images from a container image registry ---- # Import tag latest into a new image stream oc import-image mystream --from=registry.io/repo/image:latest --confirm - + # Update imported data for tag latest in an already existing image stream oc import-image mystream - + # Update imported data for tag stable in an already existing image stream oc import-image mystream:stable - + # Update imported data for all tags in an existing image stream oc import-image mystream --all - + # Update imported data for a tag that points to a manifest list to include the full manifest list oc import-image mystream --import-mode=PreserveOriginal - + # Import all tags into a new image stream oc import-image mystream --from=registry.io/repo/image --all --confirm - + # Import all tags into a new image stream using a custom timeout oc --request-timeout=5m import-image mystream --from=registry.io/repo/image --all --confirm ---- @@ -1802,10 +1800,10 @@ Build a kustomization target from a directory or URL ---- # Build the current working directory oc kustomize - + # Build some shared configuration directory oc kustomize /home/config/production - + # Build from github oc kustomize https://github.com/kubernetes-sigs/kustomize.git/examples/helloWorld?ref=v1.0.6 ---- @@ -1820,19 +1818,19 @@ Update the labels on a resource ---- # Update pod 'foo' with the label 'unhealthy' and the value 'true' oc label pods foo unhealthy=true - + # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value oc label --overwrite pods foo status=unhealthy - + # Update all pods in the namespace oc label pods --all status=unhealthy - + # Update a pod identified by the type and name in "pod.json" oc label -f pod.json status=unhealthy - + # Update pod 'foo' only if the resource is unchanged from version 1 oc label pods foo status=unhealthy --resource-version=1 - + # Update pod 'foo' by removing a label named 'bar' if it exists # Does not require the --overwrite flag oc label pods foo bar- @@ -1848,50 +1846,21 @@ Log in to a server ---- # Log in interactively oc login --username=myuser - + # Log in to the given server with the given certificate authority file oc login localhost:8443 --certificate-authority=/path/to/cert.crt - + # Log in to the given server with the given credentials (will not prompt interactively) oc login localhost:8443 --username=myuser --password=mypass - + # Log in to the given server through a browser oc login localhost:8443 --web --callback-port 8280 - -ifdef::openshift-dedicated,openshift-rosa[] + # Log in to the external OIDC issuer through Auth Code + PKCE by starting a local server listening port 8080 - oc login --exec-plugin=oc-oidc --issuer-url= --client-id= --extra-scopes=email,profile --callback-port=8080 - - # Log in with an external OIDC if the external OIDC certificate is not publically trusted - oc login --exec-plugin=oc-oidc --issuer-url= --client-id= --extra-scopes=email --callback-port=8080 --oidc-certificate-authority -endif::openshift-dedicated,openshift-rosa[] + oc login localhost:8443 --exec-plugin=oc-oidc --client-id=client-id --extra-scopes=email,profile --callback-port=8080 ---- -ifdef::openshift-dedicated,openshift-rosa[] -.Arguments -[cols="30,70"] -|=== -|Option |Definition -|`--exec-plugin` -|Specifies the type of exec plugin credentials used to authenticate the external OIDC issuer. Currently, only `oc-oidc` is supported. - -|`--issuer-url` -|Issuer URL for the external issuer. Required. - -|`--client-id` -|Client ID for the external OIDC issuer. Only supports Auth Code and PKCE. Required. - -|`--extra-scopes` -|Extra scopes for the external OIDC issuer. Optional. - -|`--callback-port` -|The port that the callback server is redirected to after authentication flow is complete. The default is any random, open port. - -|`--oidc-certificate-authority` -|Path to a certificate file for the external OIDC certificate authority. -|=== -endif::openshift-dedicated,openshift-rosa[] == oc logout End the current server session @@ -1913,18 +1882,18 @@ Print the logs for a container in a pod ---- # Start streaming the logs of the most recent build of the openldap build config oc logs -f bc/openldap - + # Start streaming the logs of the latest deployment of the mysql deployment config oc logs -f dc/mysql - + # Get the logs of the first deployment for the mysql deployment config. Note that logs # from older deployments may not exist either because the deployment was successful # or due to deployment pruning or manual deletion of the deployment oc logs --version=1 dc/mysql - + # Return a snapshot of ruby-container logs from pod backend oc logs backend -c ruby-container - + # Start streaming of ruby-container logs from pod backend oc logs -f pod/backend -c ruby-container ---- @@ -1939,47 +1908,47 @@ Create a new application ---- # List all local templates and image streams that can be used to create an app oc new-app --list - + # Create an application based on the source code in the current git repository (with a public remote) and a container image oc new-app . --image=registry/repo/langimage - + # Create an application myapp with Docker based build strategy expecting binary input oc new-app --strategy=docker --binary --name myapp - + # Create a Ruby application based on the provided [image]~[source code] combination oc new-app centos/ruby-25-centos7~https://github.com/sclorg/ruby-ex.git - + # Use the public container registry MySQL image to create an app. Generated artifacts will be labeled with db=mysql oc new-app mysql MYSQL_USER=user MYSQL_PASSWORD=pass MYSQL_DATABASE=testdb -l db=mysql - + # Use a MySQL image in a private registry to create an app and override application artifacts' names oc new-app --image=myregistry.com/mycompany/mysql --name=private - + # Use an image with the full manifest list to create an app and override application artifacts' names oc new-app --image=myregistry.com/mycompany/image --name=private --import-mode=PreserveOriginal - + # Create an application from a remote repository using its beta4 branch oc new-app https://github.com/openshift/ruby-hello-world#beta4 - + # Create an application based on a stored template, explicitly setting a parameter value oc new-app --template=ruby-helloworld-sample --param=MYSQL_USER=admin - + # Create an application from a remote repository and specify a context directory oc new-app https://github.com/youruser/yourgitrepo --context-dir=src/build - + # Create an application from a remote private repository and specify which existing secret to use oc new-app https://github.com/youruser/yourgitrepo --source-secret=yoursecret - + # Create an application based on a template file, explicitly setting a parameter value oc new-app --file=./example/myapp/template.json --param=MYSQL_USER=admin - + # Search all templates, image streams, and container images for the ones that match "ruby" oc new-app --search ruby - + # Search for "ruby", but only in stored templates (--template, --image-stream and --image # can be used to filter search results) oc new-app --search --template=ruby - + # Search for "ruby" in stored templates and print the output as YAML oc new-app --search --template=ruby --output=yaml ---- @@ -1995,31 +1964,31 @@ Create a new build configuration # Create a build config based on the source code in the current git repository (with a public # remote) and a container image oc new-build . --image=repo/langimage - + # Create a NodeJS build config based on the provided [image]~[source code] combination oc new-build centos/nodejs-8-centos7~https://github.com/sclorg/nodejs-ex.git - + # Create a build config from a remote repository using its beta2 branch oc new-build https://github.com/openshift/ruby-hello-world#beta2 - + # Create a build config using a Dockerfile specified as an argument oc new-build -D $'FROM centos:7\nRUN yum install -y httpd' - + # Create a build config from a remote repository and add custom environment variables oc new-build https://github.com/openshift/ruby-hello-world -e RACK_ENV=development - + # Create a build config from a remote private repository and specify which existing secret to use oc new-build https://github.com/youruser/yourgitrepo --source-secret=yoursecret - + # Create a build config using an image with the full manifest list to create an app and override application artifacts' names oc new-build --image=myregistry.com/mycompany/image --name=private --import-mode=PreserveOriginal - + # Create a build config from a remote repository and inject the npmrc into a build oc new-build https://github.com/openshift/ruby-hello-world --build-secret npmrc:.npmrc - + # Create a build config from a remote repository and inject environment data into a build oc new-build https://github.com/openshift/ruby-hello-world --build-config-map env:config - + # Create a build config that gets its input from a remote repository and another container image oc new-build https://github.com/openshift/ruby-hello-world --source-image=openshift/jenkins-1-centos7 --source-image-path=/var/lib/jenkins:tmp ---- @@ -2034,7 +2003,7 @@ Request a new project ---- # Create a new project with minimal information oc new-project web-team-dev - + # Create a new project with a display name and description oc new-project web-team-dev --display-name="Web Team Development" --description="Development project for the web team." ---- @@ -2049,10 +2018,10 @@ Observe changes to resources and react to them (experimental) ---- # Observe changes to services oc observe services - + # Observe changes to services, including the clusterIP and invoke a script for each oc observe services --template '{ .spec.clusterIP }' -- register_dns.sh - + # Observe changes to services filtered by a label selector oc observe services -l regist-dns=true --template '{ .spec.clusterIP }' -- register_dns.sh ---- @@ -2067,19 +2036,19 @@ Update fields of a resource ---- # Partially update a node using a strategic merge patch, specifying the patch as JSON oc patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' - + # Partially update a node using a strategic merge patch, specifying the patch as YAML oc patch node k8s-node-1 -p $'spec:\n unschedulable: true' - + # Partially update a node identified by the type and name specified in "node.json" using strategic merge patch oc patch -f node.json -p '{"spec":{"unschedulable":true}}' - + # Update a container's image; spec.containers[*].name is required because it's a merge key oc patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}' - + # Update a container's image using a JSON patch with positional arrays oc patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]' - + # Update a deployment's replicas through the 'scale' subresource using a merge patch oc patch deployment nginx-deployment --subresource='scale' --type='merge' -p '{"spec":{"replicas":2}}' ---- @@ -2106,7 +2075,7 @@ Add a role to users or service accounts for the current project ---- # Add the 'view' role to user1 for the current project oc policy add-role-to-user view user1 - + # Add the 'edit' role to serviceaccount1 for the current project oc policy add-role-to-user edit -z serviceaccount1 ---- @@ -2122,13 +2091,13 @@ Check which service account can create a pod # Check whether service accounts sa1 and sa2 can admit a pod with a template pod spec specified in my_resource.yaml # Service Account specified in myresource.yaml file is ignored oc policy scc-review -z sa1,sa2 -f my_resource.yaml - + # Check whether service accounts system:serviceaccount:bob:default can admit a pod with a template pod spec specified in my_resource.yaml oc policy scc-review -z system:serviceaccount:bob:default -f my_resource.yaml - + # Check whether the service account specified in my_resource_with_sa.yaml can admit the pod oc policy scc-review -f my_resource_with_sa.yaml - + # Check whether the default service account can admit the pod; default is taken since no service account is defined in myresource_with_no_sa.yaml oc policy scc-review -f myresource_with_no_sa.yaml ---- @@ -2143,10 +2112,10 @@ Check whether a user or a service account can create a pod ---- # Check whether user bob can create a pod specified in myresource.yaml oc policy scc-subject-review -u bob -f myresource.yaml - + # Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml oc policy scc-subject-review -u bob -g projectAdmin -f myresource.yaml - + # Check whether a service account specified in the pod template spec in myresourcewithsa.yaml can create the pod oc policy scc-subject-review -f myresourcewithsa.yaml ---- @@ -2161,22 +2130,22 @@ Forward one or more local ports to a pod ---- # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod oc port-forward pod/mypod 5000 6000 - + # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment oc port-forward deployment/mydeployment 5000 6000 - + # Listen on port 8443 locally, forwarding to the targetPort of the service's port named "https" in a pod selected by the service oc port-forward service/myservice 8443:https - + # Listen on port 8888 locally, forwarding to 5000 in the pod oc port-forward pod/mypod 8888:5000 - + # Listen on port 8888 on all addresses, forwarding to 5000 in the pod oc port-forward --address 0.0.0.0 pod/mypod 8888:5000 - + # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod oc port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 - + # Listen on a random port locally, forwarding to 5000 in the pod oc port-forward pod/mypod :5000 ---- @@ -2191,22 +2160,22 @@ Process a template into list of resources ---- # Convert the template.json file into a resource list and pass to create oc process -f template.json | oc create -f - - + # Process a file locally instead of contacting the server oc process -f template.json --local -o yaml - + # Process template while passing a user-defined label oc process -f template.json -l name=mytemplate - + # Convert a stored template into a resource list oc process foo - + # Convert a stored template into a resource list by setting/overriding parameter values oc process foo PARM1=VALUE1 PARM2=VALUE2 - + # Convert a template stored in different namespace into a resource list oc process openshift//foo - + # Convert template.json into a resource list cat template.json | oc process -f - ---- @@ -2221,7 +2190,7 @@ Switch to another project ---- # Switch to the 'myapp' project oc project myapp - + # Display the project currently in use oc project ---- @@ -2248,22 +2217,22 @@ Run a proxy to the Kubernetes API server ---- # To proxy all of the Kubernetes API and nothing else oc proxy --api-prefix=/ - + # To proxy only part of the Kubernetes API and also some static files # You can get pods info with 'curl localhost:8001/api/v1/pods' oc proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/ - + # To proxy the entire Kubernetes API at a different root # You can get pods info with 'curl localhost:8001/custom/api/v1/pods' oc proxy --api-prefix=/custom/ - + # Run a proxy to the Kubernetes API server on port 8011, serving static content from ./local/www/ oc proxy --port=8011 --www=./local/www/ - + # Run a proxy to the Kubernetes API server on an arbitrary local port # The chosen port for the server will be output to stdout oc proxy --port=0 - + # Run a proxy to the Kubernetes API server, changing the API prefix to k8s-api # This makes e.g. the pods API available at localhost:8001/k8s-api/v1/pods/ oc proxy --api-prefix=/k8s-api @@ -2279,7 +2248,7 @@ Log in to the integrated registry ---- # Log in to the integrated registry oc registry login - + # Log in to different registry using BASIC auth credentials oc registry login --registry quay.io/myregistry --auth-basic=USER:PASS ---- @@ -2294,13 +2263,13 @@ Replace a resource by file name or stdin ---- # Replace a pod using the data in pod.json oc replace -f ./pod.json - + # Replace a pod based on the JSON passed into stdin cat pod.json | oc replace -f - - + # Update a single-container pod's image version (tag) to v4 oc get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | oc replace -f - - + # Force replace, delete and then re-create the resource oc replace --force -f ./pod.json ---- @@ -2315,16 +2284,16 @@ Revert part of an application back to a previous deployment ---- # Perform a rollback to the last successfully completed deployment for a deployment config oc rollback frontend - + # See what a rollback to version 3 will look like, but do not perform the rollback oc rollback frontend --to-version=3 --dry-run - + # Perform a rollback to a specific deployment oc rollback frontend-2 - + # Perform the rollback manually by piping the JSON of the new config back to oc oc rollback frontend -o json | oc replace dc/frontend -f - - + # Print the updated deployment configuration in JSON format instead of performing the rollback oc rollback frontend -o json ---- @@ -2351,7 +2320,7 @@ View rollout history ---- # View the rollout history of a deployment oc rollout history dc/nginx - + # View the details of deployment revision 3 oc rollout history dc/nginx --revision=3 ---- @@ -2366,7 +2335,7 @@ Start a new rollout for a deployment config with the latest state from its trigg ---- # Start a new rollout based on the latest images defined in the image change triggers oc rollout latest dc/nginx - + # Print the rolled out deployment config oc rollout latest dc/nginx -o json ---- @@ -2393,12 +2362,15 @@ Restart a resource .Example usage [source,bash,options="nowrap"] ---- + # Restart all deployments in test-namespace namespace + oc rollout restart deployment -n test-namespace + # Restart a deployment oc rollout restart deployment/nginx - + # Restart a daemon set oc rollout restart daemonset/abc - + # Restart deployments with the app=nginx label oc rollout restart deployment --selector=app=nginx ---- @@ -2450,7 +2422,7 @@ Undo a previous rollout ---- # Roll back to the previous deployment oc rollout undo dc/nginx - + # Roll back to deployment revision 3. The replication controller for that version must exist oc rollout undo dc/nginx --to-revision=3 ---- @@ -2465,17 +2437,17 @@ Start a shell session in a container ---- # Open a shell session on the first container in pod 'foo' oc rsh foo - + # Open a shell session on the first container in pod 'foo' and namespace 'bar' # (Note that oc client specific arguments must come before the resource name and its arguments) oc rsh -n bar foo - + # Run the command 'cat /etc/resolv.conf' inside pod 'foo' oc rsh foo cat /etc/resolv.conf - + # See the configuration of your internal registry oc rsh dc/docker-registry cat config.yml - + # Open a shell session on the container named 'index' inside a pod of your job oc rsh -c index job/scheduled ---- @@ -2490,7 +2462,7 @@ Copy files between a local file system and a pod ---- # Synchronize a local directory with a pod directory oc rsync ./local/dir/ POD:/remote/dir - + # Synchronize a pod directory with a local directory oc rsync POD:/remote/dir/ ./local/dir ---- @@ -2505,28 +2477,28 @@ Run a particular image on the cluster ---- # Start a nginx pod oc run nginx --image=nginx - + # Start a hazelcast pod and let the container expose port 5701 oc run hazelcast --image=hazelcast/hazelcast --port=5701 - + # Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container oc run hazelcast --image=hazelcast/hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default" - + # Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container oc run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod" - + # Dry run; print the corresponding API objects without creating them oc run nginx --image=nginx --dry-run=client - + # Start a nginx pod, but overload the spec with a partial set of values parsed from JSON oc run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }' - + # Start a busybox pod and keep it in the foreground, don't restart it if it exits oc run -i -t busybox --image=busybox --restart=Never - + # Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command oc run nginx --image=nginx -- ... - + # Start the nginx pod using a different command and custom arguments oc run nginx --image=nginx --command -- ... ---- @@ -2541,16 +2513,16 @@ Set a new size for a deployment, replica set, or replication controller ---- # Scale a replica set named 'foo' to 3 oc scale --replicas=3 rs/foo - + # Scale a resource identified by type and name specified in "foo.yaml" to 3 oc scale --replicas=3 -f foo.yaml - + # If the deployment named mysql's current size is 2, scale mysql to 3 oc scale --current-replicas=2 --replicas=3 deployment/mysql - + # Scale multiple replication controllers oc scale --replicas=5 rc/example1 rc/example2 rc/example3 - + # Scale stateful set named 'web' to 3 oc scale --replicas=3 statefulset/web ---- @@ -2565,7 +2537,7 @@ Link secrets to a service account ---- # Add an image pull secret to a service account to automatically use it for pulling pod images oc secrets link serviceaccount-name pull-secret --for=pull - + # Add an image pull secret to a service account to automatically use it for both pulling and pushing build images oc secrets link builder builder-image-secret --for=pull,mount ---- @@ -2592,10 +2564,10 @@ Update a build hook on a build config ---- # Clear post-commit hook on a build config oc set build-hook bc/mybuild --post-commit --remove - + # Set the post-commit hook to execute a test suite using a new entrypoint oc set build-hook bc/mybuild --post-commit --command -- /bin/bash -c /var/lib/test-image.sh - + # Set the post-commit hook to execute a shell script oc set build-hook bc/mybuild --post-commit --script="/var/lib/test-image.sh param1 param2 && /var/lib/done.sh" ---- @@ -2610,13 +2582,13 @@ Update a build secret on a build config ---- # Clear the push secret on a build config oc set build-secret --push --remove bc/mybuild - + # Set the pull secret on a build config oc set build-secret --pull bc/mybuild mysecret - + # Set the push and pull secret on a build config oc set build-secret --push --pull bc/mybuild mysecret - + # Set the source secret on a set of build configs matching a selector oc set build-secret --source -l app=myapp gitsecret ---- @@ -2631,13 +2603,13 @@ Update the data within a config map or secret ---- # Set the 'password' key of a secret oc set data secret/foo password=this_is_secret - + # Remove the 'password' key from a secret oc set data secret/foo password- - + # Update the 'haproxy.conf' key of a config map from a file on disk oc set data configmap/bar --from-file=../haproxy.conf - + # Update a secret with the contents of a directory, one key per file oc set data secret/foo --from-file=secret-dir ---- @@ -2652,11 +2624,11 @@ Update a deployment hook on a deployment config ---- # Clear pre and post hooks on a deployment config oc set deployment-hook dc/myapp --remove --pre --post - + # Set the pre deployment hook to execute a db migration command for an application # using the data volume from the application oc set deployment-hook dc/myapp --pre --volumes=data -- /var/lib/migrate-db.sh - + # Set a mid deployment hook along with additional environment variables oc set deployment-hook dc/myapp --mid --volumes=data -e VAR1=value1 -e VAR2=value2 -- /var/lib/prepare-deploy.sh ---- @@ -2671,32 +2643,32 @@ Update environment variables on a pod template ---- # Update deployment config 'myapp' with a new environment variable oc set env dc/myapp STORAGE_DIR=/local - + # List the environment variables defined on a build config 'sample-build' oc set env bc/sample-build --list - + # List the environment variables defined on all pods oc set env pods --all --list - + # Output modified build config in YAML oc set env bc/sample-build STORAGE_DIR=/data -o yaml - + # Update all containers in all replication controllers in the project to have ENV=prod oc set env rc --all ENV=prod - + # Import environment from a secret oc set env --from=secret/mysecret dc/myapp - + # Import environment from a config map with a prefix oc set env --from=configmap/myconfigmap --prefix=MYSQL_ dc/myapp - + # Remove the environment variable ENV from container 'c1' in all deployment configs oc set env dc --all --containers="c1" ENV- - + # Remove the environment variable ENV from a deployment config definition on disk and # update the deployment config on the server oc set env -f dc.json ENV- - + # Set some of the local shell environment into a deployment config on the server oc set env | grep RAILS_ | oc env -e - dc/myapp ---- @@ -2711,16 +2683,16 @@ Update the image of a pod template ---- # Set a deployment config's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'. oc set image dc/nginx busybox=busybox nginx=nginx:1.9.1 - + # Set a deployment config's app container image to the image referenced by the imagestream tag 'openshift/ruby:2.3'. oc set image dc/myapp app=openshift/ruby:2.3 --source=imagestreamtag - + # Update all deployments' and rc's nginx container's image to 'nginx:1.9.1' oc set image deployments,rc nginx=nginx:1.9.1 --all - + # Update image of all containers of daemonset abc to 'nginx:1.9.1' oc set image daemonset abc *=nginx:1.9.1 - + # Print result (in YAML format) of updating nginx container image from local file, without hitting the server oc set image -f path/to/file.yaml nginx=nginx:1.9.1 --local -o yaml ---- @@ -2735,19 +2707,19 @@ Change how images are resolved when deploying applications ---- # Print all of the image streams and whether they resolve local names oc set image-lookup - + # Use local name lookup on image stream mysql oc set image-lookup mysql - + # Force a deployment to use local name lookup oc set image-lookup deploy/mysql - + # Show the current status of the deployment lookup oc set image-lookup deploy/mysql --list - + # Disable local name lookup on image stream mysql oc set image-lookup mysql --enabled=false - + # Set local name lookup on all image streams oc set image-lookup --all ---- @@ -2762,22 +2734,22 @@ Update a probe on a pod template ---- # Clear both readiness and liveness probes off all containers oc set probe dc/myapp --remove --readiness --liveness - + # Set an exec action as a liveness probe to run 'echo ok' oc set probe dc/myapp --liveness -- echo ok - + # Set a readiness probe to try to open a TCP socket on 3306 oc set probe rc/mysql --readiness --open-tcp=3306 - + # Set an HTTP startup probe for port 8080 and path /healthz over HTTP on the pod IP oc set probe dc/webapp --startup --get-url=http://:8080/healthz - + # Set an HTTP readiness probe for port 8080 and path /healthz over HTTP on the pod IP oc set probe dc/webapp --readiness --get-url=http://:8080/healthz - + # Set an HTTP readiness probe over HTTPS on 127.0.0.1 for a hostNetwork pod oc set probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats - + # Set only the initial-delay-seconds field on all deployments oc set probe dc --all --readiness --initial-delay-seconds=30 ---- @@ -2792,13 +2764,13 @@ Update resource requests/limits on objects with pod templates ---- # Set a deployments nginx container CPU limits to "200m and memory to 512Mi" oc set resources deployment nginx -c=nginx --limits=cpu=200m,memory=512Mi - + # Set the resource request and limits for all containers in nginx oc set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi - + # Remove the resource requests for resources on containers in nginx oc set resources deployment nginx --limits=cpu=0,memory=0 --requests=cpu=0,memory=0 - + # Print the result (in YAML format) of updating nginx container limits locally, without hitting the server oc set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml ---- @@ -2813,19 +2785,19 @@ Update the backends for a route ---- # Print the backends on the route 'web' oc set route-backends web - + # Set two backend services on route 'web' with 2/3rds of traffic going to 'a' oc set route-backends web a=2 b=1 - + # Increase the traffic percentage going to b by 10%% relative to a oc set route-backends web --adjust b=+10%% - + # Set traffic percentage going to b to 10%% of the traffic going to a oc set route-backends web --adjust b=10%% - + # Set weight of b to 10 oc set route-backends web --adjust b=10 - + # Set the weight to all backends to zero oc set route-backends web --zero ---- @@ -2853,7 +2825,7 @@ Update the service account of a resource ---- # Set deployment nginx-deployment's service account to serviceaccount1 oc set serviceaccount deployment nginx-deployment serviceaccount1 - + # Print the result (in YAML format) of updated nginx deployment with service account from a local file, without hitting the API server oc set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run -o yaml ---- @@ -2868,10 +2840,10 @@ Update the user, group, or service account in a role binding or cluster role bin ---- # Update a cluster role binding for serviceaccount1 oc set subject clusterrolebinding admin --serviceaccount=namespace:serviceaccount1 - + # Update a role binding for user1, user2, and group1 oc set subject rolebinding admin --user=user1 --user=user2 --group=group1 - + # Print the result (in YAML format) of updating role binding subjects locally, without hitting the server oc create rolebinding admin --role=admin --user=admin -o yaml --dry-run | oc set subject --local -f - --user=foo -o yaml ---- @@ -2886,26 +2858,26 @@ Update the triggers on one or more objects ---- # Print the triggers on the deployment config 'myapp' oc set triggers dc/myapp - + # Set all triggers to manual oc set triggers dc/myapp --manual - + # Enable all automatic triggers oc set triggers dc/myapp --auto - + # Reset the GitHub webhook on a build to a new, generated secret oc set triggers bc/webapp --from-github oc set triggers bc/webapp --from-webhook - + # Remove all triggers oc set triggers bc/webapp --remove-all - + # Stop triggering on config change oc set triggers dc/myapp --from-config --remove - + # Add an image trigger to a build config oc set triggers bc/webapp --from-image=namespace1/image:latest - + # Add an image trigger to a stateful set on the main container oc set triggers statefulset/db --from-image=namespace1/image:latest -c main ---- @@ -2920,27 +2892,27 @@ Update volumes on a pod template ---- # List volumes defined on all deployment configs in the current project oc set volume dc --all - + # Add a new empty dir volume to deployment config (dc) 'myapp' mounted under # /var/lib/myapp oc set volume dc/myapp --add --mount-path=/var/lib/myapp - + # Use an existing persistent volume claim (PVC) to overwrite an existing volume 'v1' oc set volume dc/myapp --add --name=v1 -t pvc --claim-name=pvc1 --overwrite - + # Remove volume 'v1' from deployment config 'myapp' oc set volume dc/myapp --remove --name=v1 - + # Create a new persistent volume claim that overwrites an existing volume 'v1' oc set volume dc/myapp --add --name=v1 -t pvc --claim-size=1G --overwrite - + # Change the mount point for volume 'v1' to /data oc set volume dc/myapp --add --name=v1 -m /data --overwrite - + # Modify the deployment config by removing volume mount "v1" from container "c1" # (and by removing the volume "v1" if no other containers have volume mounts that reference it) oc set volume dc/myapp --remove --name=v1 --containers=c1 - + # Add new volume based on a more complex volume source (AWS EBS, GCE PD, # Ceph, Gluster, NFS, ISCSI, ...) oc set volume dc/myapp --add -m /data --source= @@ -2956,20 +2928,20 @@ Start a new build ---- # Starts build from build config "hello-world" oc start-build hello-world - + # Starts build from a previous build "hello-world-1" oc start-build --from-build=hello-world-1 - + # Use the contents of a directory as build input oc start-build hello-world --from-dir=src/ - + # Send the contents of a Git repository to the server from tag 'v2' oc start-build hello-world --from-repo=../hello-world --commit=v2 - + # Start a new build for build config "hello-world" and watch the logs until the build # completes or fails oc start-build hello-world --follow - + # Start a new build for build config "hello-world" and wait until the build completes. It # exits with a non-zero return code if the build fails oc start-build hello-world --wait @@ -2985,10 +2957,10 @@ Show an overview of the current project ---- # See an overview of the current project oc status - + # Export the overview of the current project in an svg file oc status -o dot | dot -T svg -o project.svg - + # See an overview of the current project including details for any identified issues oc status --suggest ---- @@ -3003,19 +2975,19 @@ Tag existing images into image streams ---- # Tag the current image for the image stream 'openshift/ruby' and tag '2.0' into the image stream 'yourproject/ruby with tag 'tip' oc tag openshift/ruby:2.0 yourproject/ruby:tip - + # Tag a specific image oc tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip - + # Tag an external container image oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip - + # Tag an external container image and request pullthrough for it oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --reference-policy=local - + # Tag an external container image and include the full manifest list oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --import-mode=PreserveOriginal - + # Remove the specified spec tag from an image stream oc tag openshift/origin-control-plane:latest -d ---- @@ -3030,10 +3002,10 @@ Print the client and server version information ---- # Print the OpenShift client, kube-apiserver, and openshift-apiserver version information for the current context oc version - + # Print the OpenShift client, kube-apiserver, and openshift-apiserver version numbers for the current context in json format oc version --output json - + # Print the OpenShift client version information for the current context oc version --client ---- @@ -3048,16 +3020,19 @@ Experimental: Wait for a specific condition on one or many resources ---- # Wait for the pod "busybox1" to contain the status condition of type "Ready" oc wait --for=condition=Ready pod/busybox1 - + # The default value of status condition is true; you can wait for other targets after an equal delimiter (compared after Unicode simple case folding, which is a more general form of case-insensitivity) oc wait --for=condition=Ready=false pod/busybox1 - + # Wait for the pod "busybox1" to contain the status phase to be "Running" oc wait --for=jsonpath='{.status.phase}'=Running pod/busybox1 - + + # Wait for pod "busybox1" to be Ready + oc wait --for='jsonpath={.status.conditions[?(@.type=="Ready")].status}=True' pod/busybox1 + # Wait for the service "loadbalancer" to have ingress. oc wait --for=jsonpath='{.status.loadBalancer.ingress}' service/loadbalancer - + # Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command oc delete pod/busybox1 oc wait --for=delete pod/busybox1 --timeout=60s @@ -3074,3 +3049,5 @@ Return information about the current session # Display the currently authenticated user oc whoami ---- + +