diff --git a/modules/microshift-oc-by-example-content-gen.adoc b/modules/microshift-oc-by-example-content-gen.adoc index b998592cb7..aae63daafd 100644 --- a/modules/microshift-oc-by-example-content-gen.adoc +++ b/modules/microshift-oc-by-example-content-gen.adoc @@ -1920,7 +1920,7 @@ Start a shell session in a container # Run the command 'cat /etc/resolv.conf' inside pod 'foo' oc rsh foo cat /etc/resolv.conf - # See the configuration of your {product-registry} + # See the configuration of your internal registry oc rsh dc/docker-registry cat config.yml # Open a shell session on the container named 'index' inside a pod of your job diff --git a/modules/oc-by-example-content.adoc b/modules/oc-by-example-content.adoc index eb4db62ed1..21f2ef4f8b 100644 --- a/modules/oc-by-example-content.adoc +++ b/modules/oc-by-example-content.adoc @@ -17,19 +17,19 @@ Update the annotations on a resource # Update pod 'foo' with the annotation 'description' and the value 'my frontend' # If the same annotation is set multiple times, only the last value will be applied oc annotate pods foo description='my frontend' - + # Update a pod identified by type and name in "pod.json" oc annotate -f pod.json description='my frontend' - + # Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value oc annotate --overwrite pods foo description='my frontend running nginx' - + # Update all pods in the namespace oc annotate pods --all description='my frontend running nginx' - + # Update pod 'foo' only if the resource is unchanged from version 1 oc annotate pods foo description='my frontend running nginx' --resource-version=1 - + # Update pod 'foo' by removing an annotation named 'description' if it exists # Does not require the --overwrite flag oc annotate pods foo description- @@ -45,19 +45,19 @@ Print the supported API resources on the server ---- # Print the supported API resources oc api-resources - + # Print the supported API resources with more information oc api-resources -o wide - + # Print the supported API resources sorted by a column oc api-resources --sort-by=name - + # Print the supported namespaced resources oc api-resources --namespaced=true - + # Print the supported non-namespaced resources oc api-resources --namespaced=false - + # Print the supported API resources with a specific APIGroup oc api-resources --api-group=rbac.authorization.k8s.io ---- @@ -84,20 +84,20 @@ Apply a configuration to a resource by file name or stdin ---- # Apply the configuration in pod.json to a pod oc apply -f ./pod.json - + # Apply resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml oc apply -k dir/ - + # Apply the JSON passed into stdin to a pod cat pod.json | oc apply -f - - + # Apply the configuration from all files that end with '.json' - i.e. expand wildcard characters in file names oc apply -f '*.json' - + # Note: --prune is still in Alpha # Apply the configuration in manifest.yaml that matches label app=nginx and delete all other resources that are not in the file and match label app=nginx oc apply --prune -f manifest.yaml -l app=nginx - + # Apply the configuration in manifest.yaml and delete all the other config maps that are not in the file oc apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/ConfigMap ---- @@ -112,7 +112,7 @@ Edit latest last-applied-configuration annotations of a resource/object ---- # Edit the last-applied-configuration annotations by type/name in YAML oc apply edit-last-applied deployment/nginx - + # Edit the last-applied-configuration annotations by file in JSON oc apply edit-last-applied -f deploy.yaml -o json ---- @@ -127,10 +127,10 @@ Set the last-applied-configuration annotation on a live object to match the cont ---- # Set the last-applied-configuration of a resource to match the contents of a file oc apply set-last-applied -f deploy.yaml - + # Execute set-last-applied against each configuration file in a directory oc apply set-last-applied -f path/ - + # Set the last-applied-configuration of a resource to match the contents of a file; will create the annotation if it does not already exist oc apply set-last-applied -f deploy.yaml --create-annotation=true ---- @@ -145,7 +145,7 @@ View the latest last-applied-configuration annotations of a resource/object ---- # View the last-applied-configuration annotations by type/name in YAML oc apply view-last-applied deployment/nginx - + # View the last-applied-configuration annotations by file in JSON oc apply view-last-applied -f deploy.yaml -o json ---- @@ -161,14 +161,14 @@ Attach to a running container # Get output from running pod mypod; use the 'oc.kubernetes.io/default-container' annotation # for selecting the container to be attached or the first container in the pod will be chosen oc attach mypod - + # Get output from ruby-container from pod mypod oc attach mypod -c ruby-container - + # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod # and sends stdout/stderr from 'bash' back to the client oc attach mypod -c ruby-container -i -t - + # Get output from the first pod of a replica set named nginx oc attach rs/nginx ---- @@ -183,22 +183,22 @@ Check whether an action is allowed ---- # Check to see if I can create pods in any namespace oc auth can-i create pods --all-namespaces - + # Check to see if I can list deployments in my current namespace oc auth can-i list deployments.apps - + # Check to see if I can do everything in my current namespace ("*" means all) oc auth can-i '*' '*' - + # Check to see if I can get the job named "bar" in namespace "foo" oc auth can-i list jobs.batch/bar -n foo - + # Check to see if I can read pod logs oc auth can-i get pods --subresource=log - + # Check to see if I can access the URL /logs/ oc auth can-i get /logs/ - + # List all allowed actions in namespace "foo" oc auth can-i --list --namespace=foo ---- @@ -225,7 +225,7 @@ Autoscale a deployment config, deployment, replica set, stateful set, or replica ---- # Auto scale a deployment "foo", with the number of pods between 2 and 10, no target CPU utilization specified so a default autoscaling policy will be used oc autoscale deployment foo --min=2 --max=10 - + # Auto scale a replication controller "foo", with the number of pods between 1 and 5, target CPU utilization at 80% oc autoscale rc foo --max=5 --cpu-percent=80 ---- @@ -240,16 +240,16 @@ Cancel running, pending, or new builds ---- # Cancel the build with the given name oc cancel-build ruby-build-2 - + # Cancel the named build and print the build logs oc cancel-build ruby-build-2 --dump-logs - + # Cancel the named build and create a new one with the same parameters oc cancel-build ruby-build-2 --restart - + # Cancel multiple builds oc cancel-build ruby-build-1 ruby-build-2 ruby-build-3 - + # Cancel all builds created from the 'ruby-build' build config that are in the 'new' state oc cancel-build bc/ruby-build --state=new ---- @@ -276,13 +276,13 @@ Dump relevant information for debugging and diagnosis ---- # Dump current cluster state to stdout oc cluster-info dump - + # Dump current cluster state to /path/to/cluster-state oc cluster-info dump --output-directory=/path/to/cluster-state - + # Dump all namespaces to stdout oc cluster-info dump --all-namespaces - + # Dump a set of namespaces to /path/to/cluster-state oc cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state ---- @@ -303,8 +303,8 @@ Output shell completion code for the specified shell (bash, zsh, fish, or powers ## If oc is installed via homebrew, this should start working immediately ## If you've installed via other means, you may need add the completion to your completion directory oc completion bash > $(brew --prefix)/etc/bash_completion.d/oc - - + + # Installing bash completion on Linux ## If bash-completion is not installed on Linux, install the 'bash-completion' package ## via your distribution's package manager. @@ -317,18 +317,18 @@ Output shell completion code for the specified shell (bash, zsh, fish, or powers source '$HOME/.kube/completion.bash.inc' " >> $HOME/.bash_profile source $HOME/.bash_profile - + # Load the oc completion code for zsh[1] into the current shell source <(oc completion zsh) # Set the oc completion code for zsh[1] to autoload on startup oc completion zsh > "${fpath[1]}/_oc" - - + + # Load the oc completion code for fish[2] into the current shell oc completion fish | source # To load completions for each session, execute once: oc completion fish > ~/.config/fish/completions/oc.fish - + # Load the oc completion code for powershell into the current shell oc completion powershell | Out-String | Invoke-Expression # Set oc completion code for powershell to run on startup @@ -413,7 +413,7 @@ Describe one or many contexts ---- # List all the contexts in your kubeconfig file oc config get-contexts - + # Describe one context in your kubeconfig file oc config get-contexts my-context ---- @@ -452,13 +452,13 @@ Set an individual value in a kubeconfig file ---- # Set the server field on the my-cluster cluster to https://1.2.3.4 oc config set clusters.my-cluster.server https://1.2.3.4 - + # Set the certificate-authority-data field on the my-cluster cluster oc config set clusters.my-cluster.certificate-authority-data $(echo "cert_data_here" | base64 -i -) - + # Set the cluster field in the my-context context to my-cluster oc config set contexts.my-context.cluster my-cluster - + # Set the client-key-data field in the cluster-admin user using --set-raw-bytes option oc config set users.cluster-admin.client-key-data cert_data_here --set-raw-bytes=true ---- @@ -473,16 +473,16 @@ Set a cluster entry in kubeconfig ---- # Set only the server field on the e2e cluster entry without touching other values oc config set-cluster e2e --server=https://1.2.3.4 - + # Embed certificate authority data for the e2e cluster entry oc config set-cluster e2e --embed-certs --certificate-authority=~/.kube/e2e/kubernetes.ca.crt - + # Disable cert checking for the e2e cluster entry oc config set-cluster e2e --insecure-skip-tls-verify=true - + # Set custom TLS server name to use for validation for the e2e cluster entry oc config set-cluster e2e --tls-server-name=my-cluster-name - + # Set proxy url for the e2e cluster entry oc config set-cluster e2e --proxy-url=https://1.2.3.4 ---- @@ -510,31 +510,31 @@ Set a user entry in kubeconfig # Set only the "client-key" field on the "cluster-admin" # entry, without touching other values oc config set-credentials cluster-admin --client-key=~/.kube/admin.key - + # Set basic auth for the "cluster-admin" entry oc config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif - + # Embed client certificate data in the "cluster-admin" entry oc config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true - + # Enable the Google Compute Platform auth provider for the "cluster-admin" entry oc config set-credentials cluster-admin --auth-provider=gcp - + # Enable the OpenID Connect auth provider for the "cluster-admin" entry with additional args oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-id=foo --auth-provider-arg=client-secret=bar - + # Remove the "client-secret" config value for the OpenID Connect auth provider for the "cluster-admin" entry oc config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-secret- - + # Enable new exec auth plugin for the "cluster-admin" entry oc config set-credentials cluster-admin --exec-command=/path/to/the/executable --exec-api-version=client.authentication.k8s.io/v1beta1 - + # Define new exec auth plugin args for the "cluster-admin" entry oc config set-credentials cluster-admin --exec-arg=arg1 --exec-arg=arg2 - + # Create or update exec auth plugin environment variables for the "cluster-admin" entry oc config set-credentials cluster-admin --exec-env=key1=val1 --exec-env=key2=val2 - + # Remove exec auth plugin environment variables for the "cluster-admin" entry oc config set-credentials cluster-admin --exec-env=var-to-remove- ---- @@ -549,7 +549,7 @@ Unset an individual value in a kubeconfig file ---- # Unset the current-context oc config unset current-context - + # Unset namespace in foo context oc config unset contexts.foo.namespace ---- @@ -576,10 +576,10 @@ Display merged kubeconfig settings or a specified kubeconfig file ---- # Show merged kubeconfig settings oc config view - + # Show merged kubeconfig settings and raw certificate data and exposed secrets oc config view --raw - + # Get the password for the e2e user oc config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' ---- @@ -598,22 +598,22 @@ Copy files and directories to and from containers # # For advanced use cases, such as symlinks, wildcard expansion or # file mode preservation, consider using 'oc exec'. - + # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace tar cf - /tmp/foo | oc exec -i -n -- tar xf - -C /tmp/bar - + # Copy /tmp/foo from a remote pod to /tmp/bar locally oc exec -n -- tar cf - /tmp/foo | tar xf - -C /tmp/bar - + # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace oc cp /tmp/foo_dir :/tmp/bar_dir - + # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container oc cp /tmp/foo :/tmp/bar -c - + # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace oc cp /tmp/foo /:/tmp/bar - + # Copy /tmp/foo from a remote pod to /tmp/bar locally oc cp /:/tmp/foo /tmp/bar ---- @@ -628,10 +628,10 @@ Create a resource from a file or from stdin ---- # Create a pod using the data in pod.json oc create -f ./pod.json - + # Create a pod based on the JSON passed into stdin cat pod.json | oc create -f - - + # Edit the data in registry.yaml in JSON then create the resource using the edited data oc create -f registry.yaml --edit -o json ---- @@ -670,19 +670,19 @@ Create a cluster role ---- # Create a cluster role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods oc create clusterrole pod-reader --verb=get,list,watch --resource=pods - + # Create a cluster role named "pod-reader" with ResourceName specified oc create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - + # Create a cluster role named "foo" with API Group specified oc create clusterrole foo --verb=get,list,watch --resource=rs.apps - + # Create a cluster role named "foo" with SubResource specified oc create clusterrole foo --verb=get,list,watch --resource=pods,pods/status - + # Create a cluster role name "foo" with NonResourceURL specified oc create clusterrole "foo" --verb=get --non-resource-url=/logs/* - + # Create a cluster role name "monitoring" with AggregationRule specified oc create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true" ---- @@ -709,16 +709,16 @@ Create a config map from a local file, directory or literal value ---- # Create a new config map named my-config based on folder bar oc create configmap my-config --from-file=path/to/bar - + # Create a new config map named my-config with specified keys instead of file basenames on disk oc create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt - + # Create a new config map named my-config with key1=config1 and key2=config2 oc create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2 - + # Create a new config map named my-config from the key=value pairs in the file oc create configmap my-config --from-file=path/to/bar - + # Create a new config map named my-config from an env file oc create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ---- @@ -733,7 +733,7 @@ Create a cron job with the specified name ---- # Create a cron job oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" - + # Create a cron job with a command oc create cronjob my-job --image=busybox --schedule="*/1 * * * *" -- date ---- @@ -748,13 +748,13 @@ Create a deployment with the specified name ---- # Create a deployment named my-dep that runs the busybox image oc create deployment my-dep --image=busybox - + # Create a deployment with a command oc create deployment my-dep --image=busybox -- date - + # Create a deployment named my-dep that runs the nginx image with 3 replicas oc create deployment my-dep --image=nginx --replicas=3 - + # Create a deployment named my-dep that runs the busybox image and expose port 5701 oc create deployment my-dep --image=busybox --port=5701 ---- @@ -818,34 +818,34 @@ Create an ingress with the specified name # Create a single ingress called 'simple' that directs requests to foo.com/bar to svc # svc1:8080 with a tls secret "my-cert" oc create ingress simple --rule="foo.com/bar=svc1:8080,tls=my-cert" - + # Create a catch all ingress of "/path" pointing to service svc:port and Ingress Class as "otheringress" oc create ingress catch-all --class=otheringress --rule="/path=svc:port" - + # Create an ingress with two annotations: ingress.annotation1 and ingress.annotations2 oc create ingress annotated --class=default --rule="foo.com/bar=svc:port" \ --annotation ingress.annotation1=foo \ --annotation ingress.annotation2=bla - + # Create an ingress with the same host and multiple paths oc create ingress multipath --class=default \ --rule="foo.com/=svc:port" \ --rule="foo.com/admin/=svcadmin:portadmin" - + # Create an ingress with multiple hosts and the pathType as Prefix oc create ingress ingress1 --class=default \ --rule="foo.com/path*=svc:8080" \ --rule="bar.com/admin*=svc2:http" - + # Create an ingress with TLS enabled using the default ingress certificate and different path types oc create ingress ingtls --class=default \ --rule="foo.com/=svc:https,tls" \ --rule="foo.com/path/subpath*=othersvc:8080" - + # Create an ingress with TLS enabled using a specific secret and pathType as Prefix oc create ingress ingsecret --class=default \ --rule="foo.com/*=svc:8080,tls=secret1" - + # Create an ingress with a default backend oc create ingress ingdefault --class=default \ --default-backend=defaultsvc:http \ @@ -862,10 +862,10 @@ Create a job with the specified name ---- # Create a job oc create job my-job --image=busybox - + # Create a job with a command oc create job my-job --image=busybox -- date - + # Create a job from a cron job named "a-cronjob" oc create job test-job --from=cronjob/a-cronjob ---- @@ -893,7 +893,7 @@ Create a pod disruption budget with the specified name # Create a pod disruption budget named my-pdb that will select all pods with the app=rails label # and require at least one of them being available at any point in time oc create poddisruptionbudget my-pdb --selector=app=rails --min-available=1 - + # Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label # and require at least half of the pods selected to be available at any point in time oc create pdb my-pdb --selector=app=nginx --min-available=50% @@ -909,10 +909,10 @@ Create a priority class with the specified name ---- # Create a priority class named high-priority oc create priorityclass high-priority --value=1000 --description="high priority" - + # Create a priority class named default-priority that is considered as the global default priority oc create priorityclass default-priority --value=1000 --global-default=true --description="default priority" - + # Create a priority class named high-priority that cannot preempt pods with lower priority oc create priorityclass high-priority --value=1000 --description="high priority" --preemption-policy="Never" ---- @@ -927,7 +927,7 @@ Create a quota with the specified name ---- # Create a new resource quota named my-quota oc create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10 - + # Create a new resource quota named best-effort oc create quota best-effort --hard=pods=100 --scopes=BestEffort ---- @@ -942,13 +942,13 @@ Create a role with single rule ---- # Create a role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods oc create role pod-reader --verb=get --verb=list --verb=watch --resource=pods - + # Create a role named "pod-reader" with ResourceName specified oc create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod - + # Create a role named "foo" with API Group specified oc create role foo --verb=get,list,watch --resource=rs.apps - + # Create a role named "foo" with SubResource specified oc create role foo --verb=get,list,watch --resource=pods,pods/status ---- @@ -975,7 +975,7 @@ Create a route that uses edge TLS termination ---- # Create an edge route named "my-route" that exposes the frontend service oc create route edge my-route --service=frontend - + # Create an edge route that exposes the frontend service and specify a path # If the route name is omitted, the service name will be used oc create route edge --service=frontend --path /assets @@ -991,7 +991,7 @@ Create a route that uses passthrough TLS termination ---- # Create a passthrough route named "my-route" that exposes the frontend service oc create route passthrough my-route --service=frontend - + # Create a passthrough route that exposes the frontend service and specify # a host name. If the route name is omitted, the service name will be used oc create route passthrough --service=frontend --hostname=www.example.com @@ -1007,7 +1007,7 @@ Create a route that uses reencrypt TLS termination ---- # Create a route named "my-route" that exposes the frontend service oc create route reencrypt my-route --service=frontend --dest-ca-cert cert.cert - + # Create a reencrypt route that exposes the frontend service, letting the # route name default to the service name and the destination CA certificate # default to the service CA @@ -1024,7 +1024,7 @@ Create a secret for use with a Docker registry ---- # If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using: oc create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL - + # Create a new secret named my-secret from ~/.docker/config.json oc create secret docker-registry my-secret --from-file=.dockerconfigjson=path/to/.docker/config.json ---- @@ -1039,16 +1039,16 @@ Create a secret from a local file, directory, or literal value ---- # Create a new secret named my-secret with keys for each file in folder bar oc create secret generic my-secret --from-file=path/to/bar - + # Create a new secret named my-secret with specified keys instead of names on disk oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub - + # Create a new secret named my-secret with key1=supersecret and key2=topsecret oc create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret - + # Create a new secret named my-secret using a combination of a file and a literal oc create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-literal=passphrase=topsecret - + # Create a new secret named my-secret from env files oc create secret generic my-secret --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env ---- @@ -1075,7 +1075,7 @@ Create a ClusterIP service ---- # Create a new ClusterIP service named my-cs oc create service clusterip my-cs --tcp=5678:8080 - + # Create a new ClusterIP service named my-cs (in headless mode) oc create service clusterip my-cs --clusterip="None" ---- @@ -1138,19 +1138,19 @@ Request a service account token ---- # Request a token to authenticate to the kube-apiserver as the service account "myapp" in the current namespace oc create token myapp - + # Request a token for a service account in a custom namespace oc create token myapp --namespace myns - + # Request a token with a custom expiration oc create token myapp --duration 10m - + # Request a token with a custom audience oc create token myapp --audience https://example.com - + # Request a token bound to an instance of a Secret object oc create token myapp --bound-object-kind Secret --bound-object-name mysecret - + # Request a token bound to an instance of a Secret object with a specific uid oc create token myapp --bound-object-kind Secret --bound-object-name mysecret --bound-object-uid 0d4691ed-659b-4935-a832-355f77ee47cc ---- @@ -1189,25 +1189,25 @@ Launch a new instance of a pod for debugging ---- # Start a shell session into a pod using the OpenShift tools image oc debug - + # Debug a currently running deployment by creating a new pod oc debug deploy/test - + # Debug a node as an administrator oc debug node/master-1 - + # Launch a shell in a pod using the provided image stream tag oc debug istag/mysql:latest -n openshift - + # Test running a job as a non-root user oc debug job/test --as-user=1000000 - + # Debug a specific failing container by running the env command in the 'second' container oc debug daemonset/test -c second -- /bin/env - + # See the pod that would be created to debug oc debug mypod-9xbc -o yaml - + # Debug a resource but launch the debug pod in another namespace # Note: Not all resources can be debugged using --to-namespace without modification. For example, # volumes and service accounts are namespace-dependent. Add '-o yaml' to output the debug pod definition @@ -1225,28 +1225,28 @@ Delete resources by file names, stdin, resources and names, or by resources and ---- # Delete a pod using the type and name specified in pod.json oc delete -f ./pod.json - + # Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml oc delete -k dir - + # Delete resources from all files that end with '.json' - i.e. expand wildcard characters in file names oc delete -f '*.json' - + # Delete a pod based on the type and name in the JSON passed into stdin cat pod.json | oc delete -f - - + # Delete pods and services with same names "baz" and "foo" oc delete pod,service baz foo - + # Delete pods and services with label name=myLabel oc delete pods,services -l name=myLabel - + # Delete a pod with minimal delay oc delete pod foo --now - + # Force delete a pod on a dead node oc delete pod foo --force - + # Delete all pods oc delete pods --all ---- @@ -1261,19 +1261,19 @@ Show details of a specific resource or group of resources ---- # Describe a node oc describe nodes kubernetes-node-emt8.c.myproject.internal - + # Describe a pod oc describe pods/nginx - + # Describe a pod identified by type and name in "pod.json" oc describe -f pod.json - + # Describe all pods oc describe pods - + # Describe pods by label name=myLabel oc describe po -l name=myLabel - + # Describe all pods managed by the 'frontend' replication controller # (rc-created pods get the name of the rc as a prefix in the pod name) oc describe pods frontend @@ -1289,7 +1289,7 @@ Diff the live version against a would-be applied version ---- # Diff resources included in pod.json oc diff -f pod.json - + # Diff file read from stdin cat service.yaml | oc diff -f - ---- @@ -1304,16 +1304,16 @@ Edit a resource on the server ---- # Edit the service named 'registry' oc edit svc/registry - + # Use an alternative editor KUBE_EDITOR="nano" oc edit svc/registry - + # Edit the job 'myjob' in JSON using the v1 API format oc edit job.v1.batch/myjob -o json - + # Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation oc edit deployment/mydeployment -o yaml --save-config - + # Edit the deployment/mydeployment's status subresource oc edit deployment mydeployment --subresource='status' ---- @@ -1328,16 +1328,16 @@ List events ---- # List recent events in the default namespace. oc events - + # List recent events in all namespaces. oc events --all-namespaces - + # List recent events for the specified pod, then wait for more events and list them as they arrive. oc events --for pod/web-pod-13je7 --watch - + # List recent events in given format. Supported ones, apart from default, are json and yaml. oc events -oyaml - + # List recent only events in given event types oc events --types=Warning,Normal ---- @@ -1352,24 +1352,24 @@ Execute a command in a container ---- # Get output from running the 'date' command from pod mypod, using the first container by default oc exec mypod -- date - + # Get output from running the 'date' command in ruby-container from pod mypod oc exec mypod -c ruby-container -- date - + # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod # and sends stdout/stderr from 'bash' back to the client oc exec mypod -c ruby-container -i -t -- bash -il - + # List contents of /usr from the first container of pod mypod and sort by modification time # If the command you want to execute in the pod has any flags in common (e.g. -i), # you must use two dashes (--) to separate your command's flags/arguments # Also note, do not surround your command and its flags/arguments with quotes # unless that is how you would execute it normally (i.e., do ls -t /usr, not "ls -t /usr") oc exec mypod -i -t -- ls -t /usr - + # Get output from running 'date' command from the first pod of the deployment mydeployment, using the first container by default oc exec deploy/mydeployment -- date - + # Get output from running 'date' command from the first pod of the service myservice, using the first container by default oc exec svc/myservice -- date ---- @@ -1384,7 +1384,7 @@ Get documentation for a resource ---- # Get the documentation of the resource and its fields oc explain pods - + # Get the documentation of a specific field of a resource oc explain pods.spec.containers ---- @@ -1399,20 +1399,20 @@ Expose a replicated application as a service or route ---- # Create a route based on service nginx. The new route will reuse nginx's labels oc expose service nginx - + # Create a route and specify your own label and route name oc expose service nginx -l name=myroute --name=fromdowntown - + # Create a route and specify a host name oc expose service nginx --hostname=www.example.com - + # Create a route with a wildcard oc expose service nginx --hostname=x.example.com --wildcard-policy=Subdomain # This would be equivalent to *.example.com. NOTE: only hosts are matched by the wildcard; subdomains would not be included - + # Expose a deployment configuration as a service and use the specified port oc expose dc ruby-hello-world --port=8080 - + # Expose a service as a route in the specified path oc expose service nginx --path=/nginx ---- @@ -1427,13 +1427,13 @@ Extract secrets or config maps to disk ---- # Extract the secret "test" to the current directory oc extract secret/test - + # Extract the config map "nginx" to the /tmp directory oc extract configmap/nginx --to=/tmp - + # Extract the config map "nginx" to STDOUT oc extract configmap/nginx --to=- - + # Extract only the key "nginx.conf" from config map "nginx" to the /tmp directory oc extract configmap/nginx --to=/tmp --keys=nginx.conf ---- @@ -1448,37 +1448,37 @@ Display one or many resources ---- # List all pods in ps output format oc get pods - + # List all pods in ps output format with more information (such as node name) oc get pods -o wide - + # List a single replication controller with specified NAME in ps output format oc get replicationcontroller web - + # List deployments in JSON output format, in the "v1" version of the "apps" API group oc get deployments.v1.apps -o json - + # List a single pod in JSON output format oc get -o json pod web-pod-13je7 - + # List a pod identified by type and name specified in "pod.yaml" in JSON output format oc get -f pod.yaml -o json - + # List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml oc get -k dir/ - + # Return only the phase value of the specified pod oc get -o template pod/web-pod-13je7 --template={{.status.phase}} - + # List resource information in custom columns oc get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0].name,IMAGE:.spec.containers[0].image - + # List all replication controllers and services together in ps output format oc get rc,services - + # List one or more resources by their type and names oc get rc/web service/frontend pods/web-pod-13je7 - + # List status subresource for a single pod. oc get pod web-pod-13je7 --subresource status ---- @@ -1505,24 +1505,24 @@ Add layers to images and push them to a registry ---- # Remove the entrypoint on the mysql:latest image oc image append --from mysql:latest --to myregistry.com/myimage:latest --image '{"Entrypoint":null}' - + # Add a new layer to the image oc image append --from mysql:latest --to myregistry.com/myimage:latest layer.tar.gz - + # Add a new layer to the image and store the result on disk # This results in $(pwd)/v2/mysql/blobs,manifests oc image append --from mysql:latest --to file://mysql:local layer.tar.gz - + # Add a new layer to the image and store the result on disk in a designated directory # This will result in $(pwd)/mysql-local/v2/mysql/blobs,manifests oc image append --from mysql:latest --to file://mysql:local --dir mysql-local layer.tar.gz - + # Add a new layer to an image that is stored on disk (~/mysql-local/v2/image exists) oc image append --from-dir ~/mysql-local --to myregistry.com/myimage:latest layer.tar.gz - + # Add a new layer to an image that was mirrored to the current directory on disk ($(pwd)/v2/image exists) oc image append --from-dir v2 --to myregistry.com/myimage:latest layer.tar.gz - + # Add a new layer to a multi-architecture image for an os/arch that is different from the system's os/arch # Note: Wildcard filter is not supported with append. Pass a single os/arch to append oc image append --from docker.io/library/busybox:latest --filter-by-os=linux/s390x --to myregistry.com/myimage:latest layer.tar.gz @@ -1538,41 +1538,41 @@ Copy files from an image to the file system ---- # Extract the busybox image into the current directory oc image extract docker.io/library/busybox:latest - + # Extract the busybox image into a designated directory (must exist) oc image extract docker.io/library/busybox:latest --path /:/tmp/busybox - + # Extract the busybox image into the current directory for linux/s390x platform # Note: Wildcard filter is not supported with extract. Pass a single os/arch to extract oc image extract docker.io/library/busybox:latest --filter-by-os=linux/s390x - + # Extract a single file from the image into the current directory oc image extract docker.io/library/centos:7 --path /bin/bash:. - + # Extract all .repo files from the image's /etc/yum.repos.d/ folder into the current directory oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:. - + # Extract all .repo files from the image's /etc/yum.repos.d/ folder into a designated directory (must exist) # This results in /tmp/yum.repos.d/*.repo on local system oc image extract docker.io/library/centos:7 --path /etc/yum.repos.d/*.repo:/tmp/yum.repos.d - + # Extract an image stored on disk into the current directory ($(pwd)/v2/busybox/blobs,manifests exists) # --confirm is required because the current directory is not empty oc image extract file://busybox:local --confirm - + # Extract an image stored on disk in a directory other than $(pwd)/v2 into the current directory # --confirm is required because the current directory is not empty ($(pwd)/busybox-mirror-dir/v2/busybox exists) oc image extract file://busybox:local --dir busybox-mirror-dir --confirm - + # Extract an image stored on disk in a directory other than $(pwd)/v2 into a designated directory (must exist) oc image extract file://busybox:local --dir busybox-mirror-dir --path /:/tmp/busybox - + # Extract the last layer in the image oc image extract docker.io/library/centos:7[-1] - + # Extract the first three layers of the image oc image extract docker.io/library/centos:7[:3] - + # Extract the last three layers of the image oc image extract docker.io/library/centos:7[-3:] ---- @@ -1587,13 +1587,13 @@ Display information about an image ---- # Show information about an image oc image info quay.io/openshift/cli:latest - + # Show information about images matching a wildcard oc image info quay.io/openshift/cli:4.* - + # Show information about a file mirrored to disk under DIR oc image info --dir=DIR file://library/busybox:latest - + # Select which image from a multi-OS image to show oc image info library/busybox:latest --filter-by-os=linux/arm64 ---- @@ -1608,46 +1608,46 @@ Mirror images from one repository to another ---- # Copy image to another tag oc image mirror myregistry.com/myimage:latest myregistry.com/myimage:stable - + # Copy image to another registry oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable - + # Copy all tags starting with mysql to the destination repository oc image mirror myregistry.com/myimage:mysql* docker.io/myrepository/myimage - + # Copy image to disk, creating a directory structure that can be served as a registry oc image mirror myregistry.com/myimage:latest file://myrepository/myimage:latest - + # Copy image to S3 (pull from .s3.amazonaws.com/image:latest) oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image:latest - + # Copy image to S3 without setting a tag (pull via @) oc image mirror myregistry.com/myimage:latest s3://s3.amazonaws.com///image - + # Copy image to multiple locations oc image mirror myregistry.com/myimage:latest docker.io/myrepository/myimage:stable \ docker.io/myrepository/myimage:dev - + # Copy multiple images oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ myregistry.com/myimage:new=myregistry.com/other:target - + # Copy manifest list of a multi-architecture image, even if only a single image is found oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ --keep-manifest-list=true - + # Copy specific os/arch manifest of a multi-architecture image # Run 'oc image info myregistry.com/myimage:latest' to see available os/arch for multi-arch images # Note that with multi-arch images, this results in a new manifest list digest that includes only # the filtered manifests oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ --filter-by-os=os/arch - + # Copy all os/arch manifests of a multi-architecture image # Run 'oc image info myregistry.com/myimage:latest' to see list of os/arch manifests that will be mirrored oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ --keep-manifest-list=true - + # Note the above command is equivalent to oc image mirror myregistry.com/myimage:latest=myregistry.com/other:test \ --filter-by-os=.* @@ -1663,22 +1663,22 @@ Import images from a container image registry ---- # Import tag latest into a new image stream oc import-image mystream --from=registry.io/repo/image:latest --confirm - + # Update imported data for tag latest in an already existing image stream oc import-image mystream - + # Update imported data for tag stable in an already existing image stream oc import-image mystream:stable - + # Update imported data for all tags in an existing image stream oc import-image mystream --all - + # Update imported data for a tag which points to a manifest list to include the full manifest list oc import-image mystream --import-mode=PreserveOriginal - + # Import all tags into a new image stream oc import-image mystream --from=registry.io/repo/image --all --confirm - + # Import all tags into a new image stream using a custom timeout oc --request-timeout=5m import-image mystream --from=registry.io/repo/image --all --confirm ---- @@ -1693,10 +1693,10 @@ Build a kustomization target from a directory or URL. ---- # Build the current working directory oc kustomize - + # Build some shared configuration directory oc kustomize /home/config/production - + # Build from github oc kustomize https://github.com/kubernetes-sigs/kustomize.git/examples/helloWorld?ref=v1.0.6 ---- @@ -1711,19 +1711,19 @@ Update the labels on a resource ---- # Update pod 'foo' with the label 'unhealthy' and the value 'true' oc label pods foo unhealthy=true - + # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value oc label --overwrite pods foo status=unhealthy - + # Update all pods in the namespace oc label pods --all status=unhealthy - + # Update a pod identified by the type and name in "pod.json" oc label -f pod.json status=unhealthy - + # Update pod 'foo' only if the resource is unchanged from version 1 oc label pods foo status=unhealthy --resource-version=1 - + # Update pod 'foo' by removing a label named 'bar' if it exists # Does not require the --overwrite flag oc label pods foo bar- @@ -1739,10 +1739,10 @@ Log in to a server ---- # Log in interactively oc login --username=myuser - + # Log in to the given server with the given certificate authority file oc login localhost:8443 --certificate-authority=/path/to/cert.crt - + # Log in to the given server with the given credentials (will not prompt interactively) oc login localhost:8443 --username=myuser --password=mypass ---- @@ -1769,18 +1769,18 @@ Print the logs for a container in a pod ---- # Start streaming the logs of the most recent build of the openldap build config oc logs -f bc/openldap - + # Start streaming the logs of the latest deployment of the mysql deployment config oc logs -f dc/mysql - + # Get the logs of the first deployment for the mysql deployment config. Note that logs # from older deployments may not exist either because the deployment was successful # or due to deployment pruning or manual deletion of the deployment oc logs --version=1 dc/mysql - + # Return a snapshot of ruby-container logs from pod backend oc logs backend -c ruby-container - + # Start streaming of ruby-container logs from pod backend oc logs -f pod/backend -c ruby-container ---- @@ -1795,44 +1795,44 @@ Create a new application ---- # List all local templates and image streams that can be used to create an app oc new-app --list - + # Create an application based on the source code in the current git repository (with a public remote) and a container image oc new-app . --image=registry/repo/langimage - + # Create an application myapp with Docker based build strategy expecting binary input oc new-app --strategy=docker --binary --name myapp - + # Create a Ruby application based on the provided [image]~[source code] combination oc new-app centos/ruby-25-centos7~https://github.com/sclorg/ruby-ex.git - + # Use the public container registry MySQL image to create an app. Generated artifacts will be labeled with db=mysql oc new-app mysql MYSQL_USER=user MYSQL_PASSWORD=pass MYSQL_DATABASE=testdb -l db=mysql - + # Use a MySQL image in a private registry to create an app and override application artifacts' names oc new-app --image=myregistry.com/mycompany/mysql --name=private - + # Create an application from a remote repository using its beta4 branch oc new-app https://github.com/openshift/ruby-hello-world#beta4 - + # Create an application based on a stored template, explicitly setting a parameter value oc new-app --template=ruby-helloworld-sample --param=MYSQL_USER=admin - + # Create an application from a remote repository and specify a context directory oc new-app https://github.com/youruser/yourgitrepo --context-dir=src/build - + # Create an application from a remote private repository and specify which existing secret to use oc new-app https://github.com/youruser/yourgitrepo --source-secret=yoursecret - + # Create an application based on a template file, explicitly setting a parameter value oc new-app --file=./example/myapp/template.json --param=MYSQL_USER=admin - + # Search all templates, image streams, and container images for the ones that match "ruby" oc new-app --search ruby - + # Search for "ruby", but only in stored templates (--template, --image-stream and --image # can be used to filter search results) oc new-app --search --template=ruby - + # Search for "ruby" in stored templates and print the output as YAML oc new-app --search --template=ruby --output=yaml ---- @@ -1848,28 +1848,28 @@ Create a new build configuration # Create a build config based on the source code in the current git repository (with a public # remote) and a container image oc new-build . --image=repo/langimage - + # Create a NodeJS build config based on the provided [image]~[source code] combination oc new-build centos/nodejs-8-centos7~https://github.com/sclorg/nodejs-ex.git - + # Create a build config from a remote repository using its beta2 branch oc new-build https://github.com/openshift/ruby-hello-world#beta2 - + # Create a build config using a Dockerfile specified as an argument oc new-build -D $'FROM centos:7\nRUN yum install -y httpd' - + # Create a build config from a remote repository and add custom environment variables oc new-build https://github.com/openshift/ruby-hello-world -e RACK_ENV=development - + # Create a build config from a remote private repository and specify which existing secret to use oc new-build https://github.com/youruser/yourgitrepo --source-secret=yoursecret - + # Create a build config from a remote repository and inject the npmrc into a build oc new-build https://github.com/openshift/ruby-hello-world --build-secret npmrc:.npmrc - + # Create a build config from a remote repository and inject environment data into a build oc new-build https://github.com/openshift/ruby-hello-world --build-config-map env:config - + # Create a build config that gets its input from a remote repository and another container image oc new-build https://github.com/openshift/ruby-hello-world --source-image=openshift/jenkins-1-centos7 --source-image-path=/var/lib/jenkins:tmp ---- @@ -1884,7 +1884,7 @@ Request a new project ---- # Create a new project with minimal information oc new-project web-team-dev - + # Create a new project with a display name and description oc new-project web-team-dev --display-name="Web Team Development" --description="Development project for the web team." ---- @@ -1899,10 +1899,10 @@ Observe changes to resources and react to them (experimental) ---- # Observe changes to services oc observe services - + # Observe changes to services, including the clusterIP and invoke a script for each oc observe services --template '{ .spec.clusterIP }' -- register_dns.sh - + # Observe changes to services filtered by a label selector oc observe namespaces -l regist-dns=true --template '{ .spec.clusterIP }' -- register_dns.sh ---- @@ -1917,19 +1917,19 @@ Update fields of a resource ---- # Partially update a node using a strategic merge patch, specifying the patch as JSON oc patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' - + # Partially update a node using a strategic merge patch, specifying the patch as YAML oc patch node k8s-node-1 -p $'spec:\n unschedulable: true' - + # Partially update a node identified by the type and name specified in "node.json" using strategic merge patch oc patch -f node.json -p '{"spec":{"unschedulable":true}}' - + # Update a container's image; spec.containers[*].name is required because it's a merge key oc patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}' - + # Update a container's image using a JSON patch with positional arrays oc patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]' - + # Update a deployment's replicas through the scale subresource using a merge patch. oc patch deployment nginx-deployment --subresource='scale' --type='merge' -p '{"spec":{"replicas":2}}' ---- @@ -1956,7 +1956,7 @@ Add a role to users or service accounts for the current project ---- # Add the 'view' role to user1 for the current project oc policy add-role-to-user view user1 - + # Add the 'edit' role to serviceaccount1 for the current project oc policy add-role-to-user edit -z serviceaccount1 ---- @@ -1972,13 +1972,13 @@ Check which service account can create a pod # Check whether service accounts sa1 and sa2 can admit a pod with a template pod spec specified in my_resource.yaml # Service Account specified in myresource.yaml file is ignored oc policy scc-review -z sa1,sa2 -f my_resource.yaml - + # Check whether service accounts system:serviceaccount:bob:default can admit a pod with a template pod spec specified in my_resource.yaml oc policy scc-review -z system:serviceaccount:bob:default -f my_resource.yaml - + # Check whether the service account specified in my_resource_with_sa.yaml can admit the pod oc policy scc-review -f my_resource_with_sa.yaml - + # Check whether the default service account can admit the pod; default is taken since no service account is defined in myresource_with_no_sa.yaml oc policy scc-review -f myresource_with_no_sa.yaml ---- @@ -1993,10 +1993,10 @@ Check whether a user or a service account can create a pod ---- # Check whether user bob can create a pod specified in myresource.yaml oc policy scc-subject-review -u bob -f myresource.yaml - + # Check whether user bob who belongs to projectAdmin group can create a pod specified in myresource.yaml oc policy scc-subject-review -u bob -g projectAdmin -f myresource.yaml - + # Check whether a service account specified in the pod template spec in myresourcewithsa.yaml can create the pod oc policy scc-subject-review -f myresourcewithsa.yaml ---- @@ -2011,22 +2011,22 @@ Forward one or more local ports to a pod ---- # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod oc port-forward pod/mypod 5000 6000 - + # Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment oc port-forward deployment/mydeployment 5000 6000 - + # Listen on port 8443 locally, forwarding to the targetPort of the service's port named "https" in a pod selected by the service oc port-forward service/myservice 8443:https - + # Listen on port 8888 locally, forwarding to 5000 in the pod oc port-forward pod/mypod 8888:5000 - + # Listen on port 8888 on all addresses, forwarding to 5000 in the pod oc port-forward --address 0.0.0.0 pod/mypod 8888:5000 - + # Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod oc port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000 - + # Listen on a random port locally, forwarding to 5000 in the pod oc port-forward pod/mypod :5000 ---- @@ -2041,22 +2041,22 @@ Process a template into list of resources ---- # Convert the template.json file into a resource list and pass to create oc process -f template.json | oc create -f - - + # Process a file locally instead of contacting the server oc process -f template.json --local -o yaml - + # Process template while passing a user-defined label oc process -f template.json -l name=mytemplate - + # Convert a stored template into a resource list oc process foo - + # Convert a stored template into a resource list by setting/overriding parameter values oc process foo PARM1=VALUE1 PARM2=VALUE2 - + # Convert a template stored in different namespace into a resource list oc process openshift//foo - + # Convert template.json into a resource list cat template.json | oc process -f - ---- @@ -2071,7 +2071,7 @@ Switch to another project ---- # Switch to the 'myapp' project oc project myapp - + # Display the project currently in use oc project ---- @@ -2098,22 +2098,22 @@ Run a proxy to the Kubernetes API server ---- # To proxy all of the Kubernetes API and nothing else oc proxy --api-prefix=/ - + # To proxy only part of the Kubernetes API and also some static files # You can get pods info with 'curl localhost:8001/api/v1/pods' oc proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/ - + # To proxy the entire Kubernetes API at a different root # You can get pods info with 'curl localhost:8001/custom/api/v1/pods' oc proxy --api-prefix=/custom/ - + # Run a proxy to the Kubernetes API server on port 8011, serving static content from ./local/www/ oc proxy --port=8011 --www=./local/www/ - + # Run a proxy to the Kubernetes API server on an arbitrary local port # The chosen port for the server will be output to stdout oc proxy --port=0 - + # Run a proxy to the Kubernetes API server, changing the API prefix to k8s-api # This makes e.g. the pods API available at localhost:8001/k8s-api/v1/pods/ oc proxy --api-prefix=/k8s-api @@ -2141,7 +2141,7 @@ Log in to the integrated registry ---- # Log in to the integrated registry oc registry login - + # Log in to different registry using BASIC auth credentials oc registry login --registry quay.io/myregistry --auth-basic=USER:PASS ---- @@ -2156,13 +2156,13 @@ Replace a resource by file name or stdin ---- # Replace a pod using the data in pod.json oc replace -f ./pod.json - + # Replace a pod based on the JSON passed into stdin cat pod.json | oc replace -f - - + # Update a single-container pod's image version (tag) to v4 oc get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | oc replace -f - - + # Force replace, delete and then re-create the resource oc replace --force -f ./pod.json ---- @@ -2177,16 +2177,16 @@ Revert part of an application back to a previous deployment ---- # Perform a rollback to the last successfully completed deployment for a deployment config oc rollback frontend - + # See what a rollback to version 3 will look like, but do not perform the rollback oc rollback frontend --to-version=3 --dry-run - + # Perform a rollback to a specific deployment oc rollback frontend-2 - + # Perform the rollback manually by piping the JSON of the new config back to oc oc rollback frontend -o json | oc replace dc/frontend -f - - + # Print the updated deployment configuration in JSON format instead of performing the rollback oc rollback frontend -o json ---- @@ -2213,7 +2213,7 @@ View rollout history ---- # View the rollout history of a deployment oc rollout history dc/nginx - + # View the details of deployment revision 3 oc rollout history dc/nginx --revision=3 ---- @@ -2228,7 +2228,7 @@ Start a new rollout for a deployment config with the latest state from its trigg ---- # Start a new rollout based on the latest images defined in the image change triggers oc rollout latest dc/nginx - + # Print the rolled out deployment config oc rollout latest dc/nginx -o json ---- @@ -2257,10 +2257,10 @@ Restart a resource ---- # Restart a deployment oc rollout restart deployment/nginx - + # Restart a daemon set oc rollout restart daemonset/abc - + # Restart deployments with the app=nginx label oc rollout restart deployment --selector=app=nginx ---- @@ -2312,7 +2312,7 @@ Undo a previous rollout ---- # Roll back to the previous deployment oc rollout undo dc/nginx - + # Roll back to deployment revision 3. The replication controller for that version must exist oc rollout undo dc/nginx --to-revision=3 ---- @@ -2327,17 +2327,17 @@ Start a shell session in a container ---- # Open a shell session on the first container in pod 'foo' oc rsh foo - + # Open a shell session on the first container in pod 'foo' and namespace 'bar' # (Note that oc client specific arguments must come before the resource name and its arguments) oc rsh -n bar foo - + # Run the command 'cat /etc/resolv.conf' inside pod 'foo' oc rsh foo cat /etc/resolv.conf - - # See the configuration of your {product-registry} + + # See the configuration of your internal registry oc rsh dc/docker-registry cat config.yml - + # Open a shell session on the container named 'index' inside a pod of your job oc rsh -c index job/sheduled ---- @@ -2352,7 +2352,7 @@ Copy files between a local file system and a pod ---- # Synchronize a local directory with a pod directory oc rsync ./local/dir/ POD:/remote/dir - + # Synchronize a pod directory with a local directory oc rsync POD:/remote/dir/ ./local/dir ---- @@ -2367,28 +2367,28 @@ Run a particular image on the cluster ---- # Start a nginx pod oc run nginx --image=nginx - + # Start a hazelcast pod and let the container expose port 5701 oc run hazelcast --image=hazelcast/hazelcast --port=5701 - + # Start a hazelcast pod and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container oc run hazelcast --image=hazelcast/hazelcast --env="DNS_DOMAIN=cluster" --env="POD_NAMESPACE=default" - + # Start a hazelcast pod and set labels "app=hazelcast" and "env=prod" in the container oc run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod" - + # Dry run; print the corresponding API objects without creating them oc run nginx --image=nginx --dry-run=client - + # Start a nginx pod, but overload the spec with a partial set of values parsed from JSON oc run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }' - + # Start a busybox pod and keep it in the foreground, don't restart it if it exits oc run -i -t busybox --image=busybox --restart=Never - + # Start the nginx pod using the default command, but use custom arguments (arg1 .. argN) for that command oc run nginx --image=nginx -- ... - + # Start the nginx pod using a different command and custom arguments oc run nginx --image=nginx --command -- ... ---- @@ -2403,16 +2403,16 @@ Set a new size for a deployment, replica set, or replication controller ---- # Scale a replica set named 'foo' to 3 oc scale --replicas=3 rs/foo - + # Scale a resource identified by type and name specified in "foo.yaml" to 3 oc scale --replicas=3 -f foo.yaml - + # If the deployment named mysql's current size is 2, scale mysql to 3 oc scale --current-replicas=2 --replicas=3 deployment/mysql - + # Scale multiple replication controllers oc scale --replicas=5 rc/foo rc/bar rc/baz - + # Scale stateful set named 'web' to 3 oc scale --replicas=3 statefulset/web ---- @@ -2427,7 +2427,7 @@ Link secrets to a service account ---- # Add an image pull secret to a service account to automatically use it for pulling pod images oc secrets link serviceaccount-name pull-secret --for=pull - + # Add an image pull secret to a service account to automatically use it for both pulling and pushing build images oc secrets link builder builder-image-secret --for=pull,mount ---- @@ -2454,10 +2454,10 @@ Update a build hook on a build config ---- # Clear post-commit hook on a build config oc set build-hook bc/mybuild --post-commit --remove - + # Set the post-commit hook to execute a test suite using a new entrypoint oc set build-hook bc/mybuild --post-commit --command -- /bin/bash -c /var/lib/test-image.sh - + # Set the post-commit hook to execute a shell script oc set build-hook bc/mybuild --post-commit --script="/var/lib/test-image.sh param1 param2 && /var/lib/done.sh" ---- @@ -2472,13 +2472,13 @@ Update a build secret on a build config ---- # Clear the push secret on a build config oc set build-secret --push --remove bc/mybuild - + # Set the pull secret on a build config oc set build-secret --pull bc/mybuild mysecret - + # Set the push and pull secret on a build config oc set build-secret --push --pull bc/mybuild mysecret - + # Set the source secret on a set of build configs matching a selector oc set build-secret --source -l app=myapp gitsecret ---- @@ -2493,13 +2493,13 @@ Update the data within a config map or secret ---- # Set the 'password' key of a secret oc set data secret/foo password=this_is_secret - + # Remove the 'password' key from a secret oc set data secret/foo password- - + # Update the 'haproxy.conf' key of a config map from a file on disk oc set data configmap/bar --from-file=../haproxy.conf - + # Update a secret with the contents of a directory, one key per file oc set data secret/foo --from-file=secret-dir ---- @@ -2514,11 +2514,11 @@ Update a deployment hook on a deployment config ---- # Clear pre and post hooks on a deployment config oc set deployment-hook dc/myapp --remove --pre --post - + # Set the pre deployment hook to execute a db migration command for an application # using the data volume from the application oc set deployment-hook dc/myapp --pre --volumes=data -- /var/lib/migrate-db.sh - + # Set a mid deployment hook along with additional environment variables oc set deployment-hook dc/myapp --mid --volumes=data -e VAR1=value1 -e VAR2=value2 -- /var/lib/prepare-deploy.sh ---- @@ -2533,32 +2533,32 @@ Update environment variables on a pod template ---- # Update deployment config 'myapp' with a new environment variable oc set env dc/myapp STORAGE_DIR=/local - + # List the environment variables defined on a build config 'sample-build' oc set env bc/sample-build --list - + # List the environment variables defined on all pods oc set env pods --all --list - + # Output modified build config in YAML oc set env bc/sample-build STORAGE_DIR=/data -o yaml - + # Update all containers in all replication controllers in the project to have ENV=prod oc set env rc --all ENV=prod - + # Import environment from a secret oc set env --from=secret/mysecret dc/myapp - + # Import environment from a config map with a prefix oc set env --from=configmap/myconfigmap --prefix=MYSQL_ dc/myapp - + # Remove the environment variable ENV from container 'c1' in all deployment configs oc set env dc --all --containers="c1" ENV- - + # Remove the environment variable ENV from a deployment config definition on disk and # update the deployment config on the server oc set env -f dc.json ENV- - + # Set some of the local shell environment into a deployment config on the server oc set env | grep RAILS_ | oc env -e - dc/myapp ---- @@ -2573,16 +2573,16 @@ Update the image of a pod template ---- # Set a deployment configs's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'. oc set image dc/nginx busybox=busybox nginx=nginx:1.9.1 - + # Set a deployment configs's app container image to the image referenced by the imagestream tag 'openshift/ruby:2.3'. oc set image dc/myapp app=openshift/ruby:2.3 --source=imagestreamtag - + # Update all deployments' and rc's nginx container's image to 'nginx:1.9.1' oc set image deployments,rc nginx=nginx:1.9.1 --all - + # Update image of all containers of daemonset abc to 'nginx:1.9.1' oc set image daemonset abc *=nginx:1.9.1 - + # Print result (in yaml format) of updating nginx container image from local file, without hitting the server oc set image -f path/to/file.yaml nginx=nginx:1.9.1 --local -o yaml ---- @@ -2597,19 +2597,19 @@ Change how images are resolved when deploying applications ---- # Print all of the image streams and whether they resolve local names oc set image-lookup - + # Use local name lookup on image stream mysql oc set image-lookup mysql - + # Force a deployment to use local name lookup oc set image-lookup deploy/mysql - + # Show the current status of the deployment lookup oc set image-lookup deploy/mysql --list - + # Disable local name lookup on image stream mysql oc set image-lookup mysql --enabled=false - + # Set local name lookup on all image streams oc set image-lookup --all ---- @@ -2624,22 +2624,22 @@ Update a probe on a pod template ---- # Clear both readiness and liveness probes off all containers oc set probe dc/myapp --remove --readiness --liveness - + # Set an exec action as a liveness probe to run 'echo ok' oc set probe dc/myapp --liveness -- echo ok - + # Set a readiness probe to try to open a TCP socket on 3306 oc set probe rc/mysql --readiness --open-tcp=3306 - + # Set an HTTP startup probe for port 8080 and path /healthz over HTTP on the pod IP oc set probe dc/webapp --startup --get-url=http://:8080/healthz - + # Set an HTTP readiness probe for port 8080 and path /healthz over HTTP on the pod IP oc set probe dc/webapp --readiness --get-url=http://:8080/healthz - + # Set an HTTP readiness probe over HTTPS on 127.0.0.1 for a hostNetwork pod oc set probe dc/router --readiness --get-url=https://127.0.0.1:1936/stats - + # Set only the initial-delay-seconds field on all deployments oc set probe dc --all --readiness --initial-delay-seconds=30 ---- @@ -2654,13 +2654,13 @@ Update resource requests/limits on objects with pod templates ---- # Set a deployments nginx container CPU limits to "200m and memory to 512Mi" oc set resources deployment nginx -c=nginx --limits=cpu=200m,memory=512Mi - + # Set the resource request and limits for all containers in nginx oc set resources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi - + # Remove the resource requests for resources on containers in nginx oc set resources deployment nginx --limits=cpu=0,memory=0 --requests=cpu=0,memory=0 - + # Print the result (in YAML format) of updating nginx container limits locally, without hitting the server oc set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml ---- @@ -2675,19 +2675,19 @@ Update the backends for a route ---- # Print the backends on the route 'web' oc set route-backends web - + # Set two backend services on route 'web' with 2/3rds of traffic going to 'a' oc set route-backends web a=2 b=1 - + # Increase the traffic percentage going to b by 10%% relative to a oc set route-backends web --adjust b=+10%% - + # Set traffic percentage going to b to 10%% of the traffic going to a oc set route-backends web --adjust b=10%% - + # Set weight of b to 10 oc set route-backends web --adjust b=10 - + # Set the weight to all backends to zero oc set route-backends web --zero ---- @@ -2715,7 +2715,7 @@ Update the service account of a resource ---- # Set deployment nginx-deployment's service account to serviceaccount1 oc set serviceaccount deployment nginx-deployment serviceaccount1 - + # Print the result (in YAML format) of updated nginx deployment with service account from a local file, without hitting the API server oc set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run -o yaml ---- @@ -2730,10 +2730,10 @@ Update the user, group, or service account in a role binding or cluster role bin ---- # Update a cluster role binding for serviceaccount1 oc set subject clusterrolebinding admin --serviceaccount=namespace:serviceaccount1 - + # Update a role binding for user1, user2, and group1 oc set subject rolebinding admin --user=user1 --user=user2 --group=group1 - + # Print the result (in YAML format) of updating role binding subjects locally, without hitting the server oc create rolebinding admin --role=admin --user=admin -o yaml --dry-run | oc set subject --local -f - --user=foo -o yaml ---- @@ -2748,26 +2748,26 @@ Update the triggers on one or more objects ---- # Print the triggers on the deployment config 'myapp' oc set triggers dc/myapp - + # Set all triggers to manual oc set triggers dc/myapp --manual - + # Enable all automatic triggers oc set triggers dc/myapp --auto - + # Reset the GitHub webhook on a build to a new, generated secret oc set triggers bc/webapp --from-github oc set triggers bc/webapp --from-webhook - + # Remove all triggers oc set triggers bc/webapp --remove-all - + # Stop triggering on config change oc set triggers dc/myapp --from-config --remove - + # Add an image trigger to a build config oc set triggers bc/webapp --from-image=namespace1/image:latest - + # Add an image trigger to a stateful set on the main container oc set triggers statefulset/db --from-image=namespace1/image:latest -c main ---- @@ -2782,27 +2782,27 @@ Update volumes on a pod template ---- # List volumes defined on all deployment configs in the current project oc set volume dc --all - + # Add a new empty dir volume to deployment config (dc) 'myapp' mounted under # /var/lib/myapp oc set volume dc/myapp --add --mount-path=/var/lib/myapp - + # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1' oc set volume dc/myapp --add --name=v1 -t pvc --claim-name=pvc1 --overwrite - + # Remove volume 'v1' from deployment config 'myapp' oc set volume dc/myapp --remove --name=v1 - + # Create a new persistent volume claim that overwrites an existing volume 'v1' oc set volume dc/myapp --add --name=v1 -t pvc --claim-size=1G --overwrite - + # Change the mount point for volume 'v1' to /data oc set volume dc/myapp --add --name=v1 -m /data --overwrite - + # Modify the deployment config by removing volume mount "v1" from container "c1" # (and by removing the volume "v1" if no other containers have volume mounts that reference it) oc set volume dc/myapp --remove --name=v1 --containers=c1 - + # Add new volume based on a more complex volume source (AWS EBS, GCE PD, # Ceph, Gluster, NFS, ISCSI, ...) oc set volume dc/myapp --add -m /data --source= @@ -2818,20 +2818,20 @@ Start a new build ---- # Starts build from build config "hello-world" oc start-build hello-world - + # Starts build from a previous build "hello-world-1" oc start-build --from-build=hello-world-1 - + # Use the contents of a directory as build input oc start-build hello-world --from-dir=src/ - + # Send the contents of a Git repository to the server from tag 'v2' oc start-build hello-world --from-repo=../hello-world --commit=v2 - + # Start a new build for build config "hello-world" and watch the logs until the build # completes or fails oc start-build hello-world --follow - + # Start a new build for build config "hello-world" and wait until the build completes. It # exits with a non-zero return code if the build fails oc start-build hello-world --wait @@ -2847,10 +2847,10 @@ Show an overview of the current project ---- # See an overview of the current project oc status - + # Export the overview of the current project in an svg file oc status -o dot | dot -T svg -o project.svg - + # See an overview of the current project including details for any identified issues oc status --suggest ---- @@ -2865,19 +2865,19 @@ Tag existing images into image streams ---- # Tag the current image for the image stream 'openshift/ruby' and tag '2.0' into the image stream 'yourproject/ruby with tag 'tip' oc tag openshift/ruby:2.0 yourproject/ruby:tip - + # Tag a specific image oc tag openshift/ruby@sha256:6b646fa6bf5e5e4c7fa41056c27910e679c03ebe7f93e361e6515a9da7e258cc yourproject/ruby:tip - + # Tag an external container image oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip - + # Tag an external container image and request pullthrough for it oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --reference-policy=local - + # Tag an external container image and include the full manifest list oc tag --source=docker openshift/origin-control-plane:latest yourproject/ruby:tip --import-mode=PreserveOriginal - + # Remove the specified spec tag from an image stream oc tag openshift/origin-control-plane:latest -d ---- @@ -2892,10 +2892,10 @@ Print the client and server version information ---- # Print the OpenShift client, kube-apiserver, and openshift-apiserver version information for the current context oc version - + # Print the OpenShift client, kube-apiserver, and openshift-apiserver version numbers for the current context oc version --short - + # Print the OpenShift client version information for the current context oc version --client ---- @@ -2910,13 +2910,13 @@ Experimental: Wait for a specific condition on one or many resources ---- # Wait for the pod "busybox1" to contain the status condition of type "Ready" oc wait --for=condition=Ready pod/busybox1 - + # The default value of status condition is true; you can wait for other targets after an equal delimiter (compared after Unicode simple case folding, which is a more general form of case-insensitivity): oc wait --for=condition=Ready=false pod/busybox1 - + # Wait for the pod "busybox1" to contain the status phase to be "Running". oc wait --for=jsonpath='{.status.phase}'=Running pod/busybox1 - + # Wait for the pod "busybox1" to be deleted, with a timeout of 60s, after having issued the "delete" command oc delete pod/busybox1 oc wait --for=delete pod/busybox1 --timeout=60s diff --git a/rest_api/config_apis/image-config-openshift-io-v1.adoc b/rest_api/config_apis/image-config-openshift-io-v1.adoc index f2b709cb14..fd14797f0b 100644 --- a/rest_api/config_apis/image-config-openshift-io-v1.adoc +++ b/rest_api/config_apis/image-config-openshift-io-v1.adoc @@ -11,7 +11,7 @@ toc::[] Description:: + -- -Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname. +Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). -- @@ -169,12 +169,12 @@ Type:: | `allowedRegistries` | `array (string)` -| allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. +| allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. Only one of BlockedRegistries or AllowedRegistries may be set. | `blockedRegistries` | `array (string)` -| blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. +| blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. Only one of BlockedRegistries or AllowedRegistries may be set. | `containerRuntimeSearchRegistries` @@ -209,7 +209,7 @@ Type:: | `internalRegistryHostname` | `string` -| internalRegistryHostname sets the hostname for the default {product-registry}. The value must be in "hostname[:port]" format. This value is set by the image registry operator which controls the {product-registry} hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable. +| internalRegistryHostname sets the hostname for the default internal image registry. The value must be in "hostname[:port]" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable. |=== @@ -390,7 +390,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../config_apis/image-config-openshift-io-v1.adoc#image-config-openshift-io-v1[`Image`] schema -| +| |=== .HTTP responses @@ -459,7 +459,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions[`DeleteOptions`] schema -| +| |=== .HTTP responses @@ -531,7 +531,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.Patch[`Patch`] schema -| +| |=== .HTTP responses @@ -572,7 +572,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../config_apis/image-config-openshift-io-v1.adoc#image-config-openshift-io-v1[`Image`] schema -| +| |=== .HTTP responses @@ -665,7 +665,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.Patch[`Patch`] schema -| +| |=== .HTTP responses @@ -706,7 +706,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../config_apis/image-config-openshift-io-v1.adoc#image-config-openshift-io-v1[`Image`] schema -| +| |=== .HTTP responses diff --git a/rest_api/workloads_apis/build-build-openshift-io-v1.adoc b/rest_api/workloads_apis/build-build-openshift-io-v1.adoc index 82637b216c..5478da985f 100644 --- a/rest_api/workloads_apis/build-build-openshift-io-v1.adoc +++ b/rest_api/workloads_apis/build-build-openshift-io-v1.adoc @@ -37,7 +37,7 @@ Type:: | `metadata` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta[`ObjectMeta`] -| +| | `spec` | `object` @@ -496,7 +496,7 @@ Type:: | `images[]` | `object` -| ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the {product-registry}. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields). +| ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields). | `secrets` | `array` @@ -635,7 +635,7 @@ Type:: Description:: + -- -ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the {product-registry}. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields). +ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields). -- Type:: @@ -2433,7 +2433,7 @@ Defaults to unset | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions[`DeleteOptions`] schema -| +| |=== .HTTP responses @@ -2533,7 +2533,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../workloads_apis/build-build-openshift-io-v1.adoc#build-build-openshift-io-v1[`Build`] schema -| +| |=== .HTTP responses @@ -2678,7 +2678,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions[`DeleteOptions`] schema -| +| |=== .HTTP responses @@ -2741,7 +2741,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.Patch[`Patch`] schema -| +| |=== .HTTP responses @@ -2784,7 +2784,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../workloads_apis/build-build-openshift-io-v1.adoc#build-build-openshift-io-v1[`Build`] schema -| +| |=== .HTTP responses @@ -2922,7 +2922,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../workloads_apis/build-build-openshift-io-v1.adoc#build-build-openshift-io-v1[`Build`] schema -| +| |=== .HTTP responses diff --git a/rest_api/workloads_apis/buildconfig-build-openshift-io-v1.adoc b/rest_api/workloads_apis/buildconfig-build-openshift-io-v1.adoc index 18ab48374e..c76771fda5 100644 --- a/rest_api/workloads_apis/buildconfig-build-openshift-io-v1.adoc +++ b/rest_api/workloads_apis/buildconfig-build-openshift-io-v1.adoc @@ -41,7 +41,7 @@ Required:: | `metadata` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta[`ObjectMeta`] -| +| | `spec` | `object` @@ -512,7 +512,7 @@ Type:: | `images[]` | `object` -| ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the {product-registry}. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields). +| ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields). | `secrets` | `array` @@ -651,7 +651,7 @@ Type:: Description:: + -- -ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the {product-registry}. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields). +ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields). -- Type:: @@ -1940,7 +1940,7 @@ Defaults to unset | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions[`DeleteOptions`] schema -| +| |=== .HTTP responses @@ -2040,7 +2040,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../workloads_apis/buildconfig-build-openshift-io-v1.adoc#buildconfig-build-openshift-io-v1[`BuildConfig`] schema -| +| |=== .HTTP responses @@ -2185,7 +2185,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions[`DeleteOptions`] schema -| +| |=== .HTTP responses @@ -2248,7 +2248,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#io.k8s.apimachinery.pkg.apis.meta.v1.Patch[`Patch`] schema -| +| |=== .HTTP responses @@ -2291,7 +2291,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../workloads_apis/buildconfig-build-openshift-io-v1.adoc#buildconfig-build-openshift-io-v1[`BuildConfig`] schema -| +| |=== .HTTP responses