From cbd74b6ea71cdee6797d0c2cd3695e8aa2663e30 Mon Sep 17 00:00:00 2001 From: Ashley Hardin Date: Mon, 7 Dec 2020 17:10:28 -0500 Subject: [PATCH] Style updates for the Applications section --- _topic_map.yml | 2 +- .../deployments/deployment-strategies.adoc | 30 ++----- .../managing-deployment-processes.adoc | 6 +- .../route-based-deployment-strategies.adoc | 29 ++---- .../deployments/what-deployments-are.adoc | 30 ++----- applications/idling-applications.adoc | 13 +-- .../projects/working-with-projects.adoc | 2 +- ...otas-setting-across-multiple-projects.adoc | 8 +- .../quotas/quotas-setting-per-project.adoc | 9 +- modules/application-health-about.adoc | 19 ++-- modules/application-health-configuring.adoc | 3 +- .../applications-create-using-cli-image.adoc | 23 ++--- .../applications-create-using-cli-modify.adoc | 84 +++++------------ ...ications-create-using-cli-source-code.adoc | 79 +++++----------- ...pplications-create-using-cli-template.adoc | 5 +- modules/deployments-ab-testing-lb.adoc | 89 +++++-------------- .../deployments-accessing-private-repos.adoc | 14 ++- modules/deployments-blue-green.adoc | 33 ++----- modules/deployments-canary-deployments.adoc | 11 +-- ...ployments-comparing-deploymentconfigs.adoc | 68 ++++---------- ...ployments-creating-rolling-deployment.adoc | 30 +++---- modules/deployments-custom-strategy.adoc | 27 ++---- modules/deployments-deploymentconfigs.adoc | 58 ++++-------- .../deployments-exec-cmd-in-container.adoc | 12 +-- modules/deployments-graceful-termination.adoc | 18 +--- modules/deployments-kube-deployments.adoc | 11 +-- modules/deployments-lifecycle-hooks.adoc | 29 ++---- modules/deployments-recreate-strategy.adoc | 27 ++---- modules/deployments-replicasets.adoc | 24 ++--- .../deployments-replicationcontrollers.adoc | 22 ++--- modules/deployments-retrying-deployment.adoc | 10 +-- modules/deployments-rolling-back.adoc | 17 +--- modules/deployments-rolling-strategy.adoc | 57 ++++-------- modules/deployments-running-pod-svc-acct.adoc | 9 +- modules/deployments-scaling-manually.adoc | 12 +-- modules/deployments-setting-resources.adoc | 31 ++----- modules/deployments-setting-triggers.adoc | 3 +- modules/deployments-starting-deployment.adoc | 8 +- modules/deployments-triggers.adoc | 42 +++------ modules/deployments-viewing-deployment.adoc | 10 +-- modules/deployments-viewing-logs.adoc | 10 +-- .../disabling-project-self-provisioning.adoc | 26 ++---- ...gathering-application-diagnostic-data.adoc | 4 +- modules/nodes-containers-volumes-adding.adoc | 4 +- .../nodes-containers-volumes-removing.adoc | 8 +- .../nodes-containers-volumes-updating.adoc | 2 +- .../nodes-pods-autoscaling-creating-cpu.adoc | 14 +-- ...odes-pods-autoscaling-creating-memory.adoc | 12 +-- modules/odc-connecting-components.adoc | 28 +++--- modules/odc-editing-health-checks.adoc | 2 +- modules/odc-grouping-multiple-components.adoc | 4 +- modules/odc-starting-recreate-deployment.adoc | 8 +- modules/odc-starting-rolling-deployment.adoc | 2 +- ...m-creating-etcd-cluster-from-operator.adoc | 6 +- modules/pruning-builds.adoc | 18 ++-- modules/pruning-deployments.adoc | 17 ++-- modules/pruning-hard-pruning-registry.adoc | 4 +- modules/pruning-images-manual.adoc | 82 ++++++----------- modules/pruning-images.adoc | 11 ++- modules/quotas-requiring-explicit-quota.adoc | 2 +- .../quotas-sample-resource-quotas-def.adoc | 47 ++++------ modules/quotas-selecting-projects.adoc | 14 ++- .../quotas-viewing-clusterresourcequotas.adoc | 8 +- ...resource-quota-for-extended-resources.adoc | 16 ++-- ...deploymentconfig-apps-openshift-io-v1.adoc | 26 +++--- 65 files changed, 428 insertions(+), 961 deletions(-) diff --git a/_topic_map.yml b/_topic_map.yml index ff17dbae73..017deb8455 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -1173,7 +1173,7 @@ Topics: File: what-deployments-are - Name: Managing deployment processes File: managing-deployment-processes - - Name: Using DeploymentConfig strategies + - Name: Using deployment strategies File: deployment-strategies - Name: Using route-based deployment strategies File: route-based-deployment-strategies diff --git a/applications/deployments/deployment-strategies.adoc b/applications/deployments/deployment-strategies.adoc index 4236f39c77..d5ee2d1ff0 100644 --- a/applications/deployments/deployment-strategies.adoc +++ b/applications/deployments/deployment-strategies.adoc @@ -1,23 +1,15 @@ [id="deployment-strategies"] -= Using DeploymentConfig strategies += Using deployment strategies include::modules/common-attributes.adoc[] :context: deployment-strategies toc::[] -A _deployment strategy_ is a way to change or upgrade an application. The aim -is to make the change without downtime in a way that the user barely notices the -improvements. +A _deployment strategy_ is a way to change or upgrade an application. The aim is to make the change without downtime in a way that the user barely notices the improvements. -Because the end user usually accesses the application through a route handled by -a router, the deployment strategy can focus on DeploymentConfig features or -routing features. Strategies that focus on the DeploymentConfig impact all -routes that use the application. Strategies that use router features target -individual routes. +Because the end user usually accesses the application through a route handled by a router, the deployment strategy can focus on `DeploymentConfig` object features or routing features. Strategies that focus on the deployment impact all routes that use the application. Strategies that use router features target individual routes. -Many deployment strategies are supported through the DeploymentConfig, and some -additional strategies are supported through router features. DeploymentConfig -strategies are discussed in this section. +Many deployment strategies are supported through the `DeploymentConfig` object, and some additional strategies are supported through router features. Deployment strategies are discussed in this section. //// @@ -35,18 +27,12 @@ xref:../../applications/deployments/route-based-deployment-strategies.adoc#route Consider the following when choosing a deployment strategy: - Long-running connections must be handled gracefully. -- Database conversions can be complex and must be done and rolled back along with -the application. -- If the application is a hybrid of microservices and traditional components, -downtime might be required to complete the transition. +- Database conversions can be complex and must be done and rolled back along with the application. +- If the application is a hybrid of microservices and traditional components, downtime might be required to complete the transition. - You must have the infrastructure to do this. -- If you have a non-isolated test environment, you can break both new and old -versions. +- If you have a non-isolated test environment, you can break both new and old versions. -A deployment strategy uses readiness checks to determine if a new Pod is ready -for use. If a readiness check fails, the DeploymentConfig retries to run the -Pod until it times out. The default timeout is `10m`, a value set in -`TimeoutSeconds` in `dc.spec.strategy.*params`. +A deployment strategy uses readiness checks to determine if a new pod is ready for use. If a readiness check fails, the `DeploymentConfig` object retries to run the pod until it times out. The default timeout is `10m`, a value set in `TimeoutSeconds` in `dc.spec.strategy.*params`. include::modules/deployments-rolling-strategy.adoc[leveloffset=+1] include::modules/deployments-canary-deployments.adoc[leveloffset=+2] diff --git a/applications/deployments/managing-deployment-processes.adoc b/applications/deployments/managing-deployment-processes.adoc index 734a5bbb3c..9a14757ec5 100644 --- a/applications/deployments/managing-deployment-processes.adoc +++ b/applications/deployments/managing-deployment-processes.adoc @@ -6,11 +6,9 @@ include::modules/common-attributes.adoc[] toc::[] [id="deploymentconfig-operations"] -== Managing DeploymentConfigs +== Managing `DeploymentConfig` objects -DeploymentConfigs can be managed from the {product-title} web console's -*Workloads* page or using the `oc` CLI. The following procedures show CLI usage -unless otherwise stated. +`DeploymentConfig` objects can be managed from the {product-title} web console's *Workloads* page or using the `oc` CLI. The following procedures show CLI usage unless otherwise stated. include::modules/deployments-starting-deployment.adoc[leveloffset=+2] include::modules/deployments-viewing-deployment.adoc[leveloffset=+2] diff --git a/applications/deployments/route-based-deployment-strategies.adoc b/applications/deployments/route-based-deployment-strategies.adoc index 47fd44b0de..15fee3dade 100644 --- a/applications/deployments/route-based-deployment-strategies.adoc +++ b/applications/deployments/route-based-deployment-strategies.adoc @@ -5,11 +5,7 @@ include::modules/common-attributes.adoc[] toc::[] -Deployment strategies provide a way for the application to evolve. Some -strategies use DeploymentConfigs to make changes that are seen by users of all -routes that resolve to the application. Other advanced strategies, such as the -ones described in this section, use router features in conjunction with -DeploymentConfigs to impact specific routes. +Deployment strategies provide a way for the application to evolve. Some strategies use `DeploymentConfig` objects to make changes that are seen by users of all routes that resolve to the application. Other advanced strategies, such as the ones described in this section, use router features in conjunction with DeploymentConfig` objects to impact specific routes. //// This link keeps breaking Travis for some reason. @@ -17,31 +13,18 @@ This link keeps breaking Travis for some reason. [NOTE] ==== See -xref:../../applications/deployments/deployment-strategies.adoc#deployment-strategies[Using DeploymentConfig strategies] +xref:../../applications/deployments/deployment-strategies.adoc#deployment-strategies[Using deployment strategies] for more on the basic strategy types. ==== //// -The most common route-based strategy is to use a _blue-green deployment_. The -new version (the blue version) is brought up for testing and evaluation, while -the users still use the stable version (the green version). When ready, the -users are switched to the blue version. If a problem arises, you can switch back -to the green version. +The most common route-based strategy is to use a _blue-green deployment_. The new version (the blue version) is brought up for testing and evaluation, while the users still use the stable version (the green version). When ready, the users are switched to the blue version. If a problem arises, you can switch back to the green version. -A common alternative strategy is to use _A/B versions_ that are both active at -the same time and some users use one version, and some users use the other -version. This can be used for experimenting with user interface changes and -other features to get user feedback. It can also be used to verify proper -operation in a production context where problems impact a limited number of -users. +A common alternative strategy is to use _A/B versions_ that are both active at the same time and some users use one version, and some users use the other version. This can be used for experimenting with user interface changes and other features to get user feedback. It can also be used to verify proper operation in a production context where problems impact a limited number of users. -A canary deployment tests the new version but when a problem is detected it -quickly falls back to the previous version. This can be done with both of the -above strategies. +A canary deployment tests the new version but when a problem is detected it quickly falls back to the previous version. This can be done with both of the above strategies. -The route-based deployment strategies do not scale the number of Pods in the -services. To maintain desired performance characteristics the deployment -configurations might have to be scaled. +The route-based deployment strategies do not scale the number of pods in the services. To maintain desired performance characteristics the deployment configurations might have to be scaled. include::modules/deployments-proxy-shards.adoc[leveloffset=+1] include::modules/deployments-n1-compatibility.adoc[leveloffset=+1] diff --git a/applications/deployments/what-deployments-are.adoc b/applications/deployments/what-deployments-are.adoc index 4c8961e7aa..66593b5cab 100644 --- a/applications/deployments/what-deployments-are.adoc +++ b/applications/deployments/what-deployments-are.adoc @@ -1,23 +1,15 @@ [id="what-deployments-are"] -= Understanding Deployments and DeploymentConfigs += Understanding `Deployment` and `DeploymentConfig` objects include::modules/common-attributes.adoc[] :context: what-deployments-are toc::[] -_Deployments_ and _DeploymentConfigs_ in {product-title} are API objects that -provide two similar but different methods for fine-grained management over -common user applications. They are composed of the following separate API -objects: +The `Deployment` and `DeploymentConfig` API objects in {product-title} provide two similar but different methods for fine-grained management over common user applications. They are composed of the following separate API objects: -- A DeploymentConfig or a Deployment, either of which describes the desired state -of a particular component of the application as a Pod template. -- DeploymentConfigs involve one or more _ReplicationControllers_, which contain a -point-in-time record of the state of a DeploymentConfig as a Pod template. -Similarly, Deployments involve one or more _ReplicaSets_, a successor of -ReplicationControllers. -- One or more Pods, which represent an instance of a particular version of an -application. +* A `DeploymentConfig` or `Deployment` object, either of which describes the desired state of a particular component of the application as a pod template. +* `DeploymentConfig` objects involve one or more _replication controllers_, which contain a point-in-time record of the state of a deployment as a pod template. Similarly, `Deployment` objects involve one or more _replica sets_, a successor of replication controllers. +* One or more pods, which represent an instance of a particular version of an application. //// Update when converted: @@ -36,19 +28,13 @@ xref:../../dev_guide/pod_autoscaling.adoc#dev-guide-pod-autoscaling[autoscaling] [id="what-deployments-are-build-blocks"] == Building blocks of a deployment -Deployments and DeploymentConfigs are enabled by the use of native Kubernetes -API objects ReplicaSets and ReplicationControllers, respectively, as their -building blocks. +Deployments and deployment configs are enabled by the use of native Kubernetes API objects `ReplicaSet` and `ReplicationController`, respectively, as their building blocks. -Users do not have to manipulate ReplicationControllers, ReplicaSets, or Pods -owned by DeploymentConfigs or Deployments. The deployment systems ensures -changes are propagated appropriately. +Users do not have to manipulate replication controllers, replica sets, or pods owned by `DeploymentConfig` objects or deployments. The deployment systems ensure changes are propagated appropriately. [TIP] ==== -If the existing deployment strategies are not suited for your use case and you -must run manual steps during the lifecycle of your deployment, then -you should consider creating a Custom deployment strategy. +If the existing deployment strategies are not suited for your use case and you must run manual steps during the lifecycle of your deployment, then you should consider creating a custom deployment strategy. ==== The following sections provide further details on these objects. diff --git a/applications/idling-applications.adoc b/applications/idling-applications.adoc index a76e677dba..223a827f71 100644 --- a/applications/idling-applications.adoc +++ b/applications/idling-applications.adoc @@ -5,18 +5,11 @@ include::modules/common-attributes.adoc[] toc::[] -Cluster administrators can idle applications to reduce resource consumption. -This is useful when the cluster is deployed on a public cloud where cost is -related to resource consumption. +Cluster administrators can idle applications to reduce resource consumption. This is useful when the cluster is deployed on a public cloud where cost is related to resource consumption. -If any scalable resources are not in use, {product-title} discovers and idles -them by scaling their replicas to `0`. The next time network traffic is directed -to the resources, the resources are unidled by scaling up the replicas, and -normal operation continues. +If any scalable resources are not in use, {product-title} discovers and idles them by scaling their replicas to `0`. The next time network traffic is directed to the resources, the resources are unidled by scaling up the replicas, and normal operation continues. -Applications are made of services, as well as other scalable resources, such as -DeploymentConfigs. The action of idling an application involves idling -all associated resources. +Applications are made of services, as well as other scalable resources, such as deployment configs. The action of idling an application involves idling all associated resources. include::modules/idle-idling-applications.adoc[leveloffset=+1] include::modules/idle-unidling-applications.adoc[leveloffset=+1] diff --git a/applications/projects/working-with-projects.adoc b/applications/projects/working-with-projects.adoc index 7059fc7737..4905377f8d 100644 --- a/applications/projects/working-with-projects.adoc +++ b/applications/projects/working-with-projects.adoc @@ -10,7 +10,7 @@ isolation from other communities. [NOTE] ==== -Projects starting with `openshift-` and `kube-` are xref:../../authentication/using-rbac.adoc#rbac-default-projects_using-rbac[default projects]. These projects host cluster components that run as Pods and other infrastructure components. As such, {product-title} does not allow you to create Projects starting with `openshift-` or `kube-` using the `oc new-project` command. Cluster administrators can create these Projects using the `oc adm new-project` command. +Projects starting with `openshift-` and `kube-` are xref:../../authentication/using-rbac.adoc#rbac-default-projects_using-rbac[default projects]. These projects host cluster components that run as pods and other infrastructure components. As such, {product-title} does not allow you to create projects starting with `openshift-` or `kube-` using the `oc new-project` command. Cluster administrators can create these projects using the `oc adm new-project` command. ==== include::modules/creating-a-project-using-the-web-console.adoc[leveloffset=+1] diff --git a/applications/quotas/quotas-setting-across-multiple-projects.adoc b/applications/quotas/quotas-setting-across-multiple-projects.adoc index f190c5db94..216aee8c57 100644 --- a/applications/quotas/quotas-setting-across-multiple-projects.adoc +++ b/applications/quotas/quotas-setting-across-multiple-projects.adoc @@ -5,13 +5,9 @@ include::modules/common-attributes.adoc[] toc::[] -A multi-project quota, defined by a ClusterResourceQuota object, allows quotas -to be shared across multiple projects. Resources used in each selected project -are aggregated and that aggregate is used to limit resources across all the -selected projects. +A multi-project quota, defined by a `ClusterResourceQuota` object, allows quotas to be shared across multiple projects. Resources used in each selected project are aggregated and that aggregate is used to limit resources across all the selected projects. -This guide describes how cluster administrators can set and manage resource -quotas across multiple projects. +This guide describes how cluster administrators can set and manage resource quotas across multiple projects. include::modules/quotas-selecting-projects.adoc[leveloffset=+1] include::modules/quotas-viewing-clusterresourcequotas.adoc[leveloffset=+1] diff --git a/applications/quotas/quotas-setting-per-project.adoc b/applications/quotas/quotas-setting-per-project.adoc index ca5fa77b7e..12df415ac6 100644 --- a/applications/quotas/quotas-setting-per-project.adoc +++ b/applications/quotas/quotas-setting-per-project.adoc @@ -5,14 +5,9 @@ include::modules/common-attributes.adoc[] toc::[] -A _resource quota_, defined by a ResourceQuota object, provides constraints that -limit aggregate resource consumption per project. It can limit the quantity of -objects that can be created in a project by type, as well as the total amount of -compute resources and storage that may be consumed by resources in that project. +A _resource quota_, defined by a `ResourceQuota` object, provides constraints that limit aggregate resource consumption per project. It can limit the quantity of objects that can be created in a project by type, as well as the total amount of compute resources and storage that might be consumed by resources in that project. -This guide describes how resource quotas work, how cluster administrators can -set and manage resource quotas on a per project basis, and how developers and -cluster administrators can view them. +This guide describes how resource quotas work, how cluster administrators can set and manage resource quotas on a per project basis, and how developers and cluster administrators can view them. include::modules/quotas-resources-managed.adoc[leveloffset=+1] include::modules/quotas-scopes.adoc[leveloffset=+1] diff --git a/modules/application-health-about.adoc b/modules/application-health-about.adoc index b23d297f0b..7b9b456ffd 100644 --- a/modules/application-health-about.adoc +++ b/modules/application-health-about.adoc @@ -12,7 +12,7 @@ You can include one or more probes in the specification for the pod that contain [NOTE] ==== -If you want to add or edit health checks in an existing pod, you must edit the pod deployment configuration or use the *Developer* perspective in the web console. You cannot use the CLI to add or edit health checks for an existing pod. +If you want to add or edit health checks in an existing pod, you must edit the pod `DeploymentConfig` object or use the *Developer* perspective in the web console. You cannot use the CLI to add or edit health checks for an existing pod. ==== Readiness probe:: @@ -34,7 +34,7 @@ A _startup probe_ indicates whether the application within a container is starte + Some applications can require additional start-up time on their first initialization. You can use a startup probe with a liveness or readiness probe to delay that probe long enough to handle lengthy start-up time using the `failureThreshold` and `periodSeconds` parameters. + -For example, you can add a startup probe, with a `failureThreshold` of 30 failures and a `periodSeconds` of 10 seconds (30 * 10s = 300s) for a maximum of 5 minutes, to a liveness probe. After the startup probe succeeds the first time, the liveness probe takes over. +For example, you can add a startup probe, with a `failureThreshold` of 30 failures and a `periodSeconds` of 10 seconds (30 * 10s = 300s) for a maximum of 5 minutes, to a liveness probe. After the startup probe succeeds the first time, the liveness probe takes over. You can configure liveness, readiness, and startup probes with any of the following types of tests: @@ -53,11 +53,11 @@ You can configure several fields to control the behavior of a probe: * `initialDelaySeconds`: The time, in seconds, after the container starts before the probe can be scheduled. The default is 0. * `periodSeconds`: The delay, in seconds, between performing probes. The default is `10`. * `timeoutSeconds`: The number of seconds of inactivity after which the probe times out and the container is assumed to have failed. The default is `1`. -* `successThreshold`: The number of times that the probe must report success after a failure in order to reset the container status to successful. The value must be `1` for a liveness probe. The default is `1`. -* `failureThreshold`: The number of times that the probe is allowed to fail. The default is 3. After the specified attempts: -** for a liveness probe, the container is restarted +* `successThreshold`: The number of times that the probe must report success after a failure in order to reset the container status to successful. The value must be `1` for a liveness probe. The default is `1`. +* `failureThreshold`: The number of times that the probe is allowed to fail. The default is 3. After the specified attempts: +** for a liveness probe, the container is restarted ** for a readiness probe, the pod is marked `Unready` -** for a startup probe, the container is killed and is subject to the pod's `restartPolicy` +** for a startup probe, the container is killed and is subject to the pod's `restartPolicy` + [NOTE] ==== @@ -71,7 +71,7 @@ liveness or readiness probes, as shown in the examples. [id="application-health-examples"] == Example probes -The following are samples of different probes as they would appear in an object specification. +The following are samples of different probes as they would appear in an object specification. .Sample readiness probe with a container command readiness probe in a pod spec [source,yaml] @@ -166,7 +166,7 @@ spec: command: <5> - /bin/bash - '-c' - - timeout 60 /opt/eap/bin/livenessProbe.sh + - timeout 60 /opt/eap/bin/livenessProbe.sh periodSeconds: 10 <6> successThreshold: 1 <7> failureThreshold: 3 <8> @@ -178,7 +178,7 @@ spec: <3> The liveness probe. <4> The type of probe, here a container command probe. <5> The command line to execute inside the container. -<6> How often in seconds to perform the probe. +<6> How often in seconds to perform the probe. <7> The number of number of consecutive successes needed to show success after a failure. <8> The number of times to try the probe after a failure. @@ -215,4 +215,3 @@ spec: ---- <1> The readiness probe. <2> The liveness probe. - diff --git a/modules/application-health-configuring.adoc b/modules/application-health-configuring.adoc index c670b4cad6..10a9d64756 100644 --- a/modules/application-health-configuring.adoc +++ b/modules/application-health-configuring.adoc @@ -9,7 +9,7 @@ To configure readiness, liveness, and startup probes, add one or more probes to [NOTE] ==== -If you want to add or edit health checks in an existing pod, you must edit the pod deployment configuration or use the *Developer* perspective in the web console. You cannot use the CLI to add or edit health checks for an existing pod. +If you want to add or edit health checks in an existing pod, you must edit the pod `DeploymentConfig` object or use the *Developer* perspective in the web console. You cannot use the CLI to add or edit health checks for an existing pod. ==== .Procedure @@ -127,4 +127,3 @@ Events: Normal Pulling 10s (x3 over 47s) kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Pulling image "k8s.gcr.io/liveness" Normal Pulled 10s kubelet, ci-ln-37hz77b-f76d1-wdpjv-worker-b-snzrj Successfully pulled image "k8s.gcr.io/liveness" in 244.116568ms ---- - diff --git a/modules/applications-create-using-cli-image.adoc b/modules/applications-create-using-cli-image.adoc index 94fa638150..b35597546b 100644 --- a/modules/applications-create-using-cli-image.adoc +++ b/modules/applications-create-using-cli-image.adoc @@ -5,24 +5,18 @@ [id="applications-create-using-cli-image_{context}"] = Creating an application from an image -You can deploy an application from an existing image. Images can come from -imagestreams in the {product-title} server, images in a specific registry, or -images in the local Docker server. +You can deploy an application from an existing image. Images can come from image streams in the {product-title} server, images in a specific registry, or images in the local Docker server. -The `new-app` command attempts to determine the type of image specified in the -arguments passed to it. However, you can explicitly tell `new-app` whether the -image is a container image using the `--docker-image` argument or an imagestream -using the `-i|--image-stream` argument. +The `new-app` command attempts to determine the type of image specified in the arguments passed to it. However, you can explicitly tell `new-app` whether the image is a container image using the `--docker-image` argument or an image stream using the `-i|--image-stream` argument. [NOTE] ==== -If you specify an image from your local Docker repository, you must ensure that -the same image is available to the {product-title} cluster nodes. +If you specify an image from your local Docker repository, you must ensure that the same image is available to the {product-title} cluster nodes. ==== -== DockerHub MySQL image +== Docker Hub MySQL image -Create an application from the DockerHub MySQL image, for example: +Create an application from the Docker Hub MySQL image, for example: [source,terminal] ---- @@ -31,17 +25,16 @@ $ oc new-app mysql == Image in a private registry -Create an application using an image in a private registry, specify the full -container image specification: +Create an application using an image in a private registry, specify the full container image specification: [source,terminal] ---- $ oc new-app myregistry:5000/example/myimage ---- -== Existing imagestream and optional imagestreamtag +== Existing image stream and optional image stream tag -Create an application from an existing imagestream and optional imagestreamtag: +Create an application from an existing image stream and optional image stream tag: [source,terminal] ---- diff --git a/modules/applications-create-using-cli-modify.adoc b/modules/applications-create-using-cli-modify.adoc index c6c6be3487..4c34868e0e 100644 --- a/modules/applications-create-using-cli-modify.adoc +++ b/modules/applications-create-using-cli-modify.adoc @@ -5,11 +5,7 @@ [id="applications-create-using-cli-modify_{context}"] = Modifying application creation -The `new-app` command generates {product-title} objects that build, deploy, and -run the application that is created. Normally, these objects are created in the -current project and assigned names that are derived from the input source -repositories or the input images. However, with `new-app` you can modify this -behavior. +The `new-app` command generates {product-title} objects that build, deploy, and run the application that is created. Normally, these objects are created in the current project and assigned names that are derived from the input source repositories or the input images. However, with `new-app` you can modify this behavior. .`new-app` output objects [cols="2,8",options="header"] @@ -18,42 +14,29 @@ behavior. |Object |Description |`BuildConfig` -|A `BuildConfig` is created for each source repository that is specified in the -command line. The `BuildConfig` specifies the strategy to use, the source -location, and the build output location. +|A `BuildConfig` object is created for each source repository that is specified in the command line. The `BuildConfig` object specifies the strategy to use, the source location, and the build output location. |`ImageStreams` -|For `BuildConfig`, two `ImageStreams` are usually created. One -represents the input image. With `Source` builds, this is the builder image. +|For the `BuildConfig` object, two image streams are usually created. One represents the input image. With source builds, this is the builder image. ifndef::openshift-online[] With `Docker` builds, this is the *FROM* image. endif::[] -The second one represents the output image. If a container image was specified -as input to `new-app`, then an imagestream is created for that image as well. +The second one represents the output image. If a container image was specified as input to `new-app`, then an image stream is created for that image as well. |`DeploymentConfig` -|A `DeploymentConfig` is created either to deploy the output of a build, or a -specified image. The `new-app` command creates `emptyDir` volumes for all Docker -volumes that are specified in containers included in the resulting -`DeploymentConfig`. +|A `DeploymentConfig` object is created either to deploy the output of a build, or a specified image. The `new-app` command creates `emptyDir` volumes for all Docker volumes that are specified in containers included in the resulting `DeploymentConfig` object . |`Service` -|The `new-app` command attempts to detect exposed ports in input images. It -uses the lowest numeric exposed port to generate a service that exposes that -port. In order to expose a different port, after `new-app` has completed, simply -use the `oc expose` command to generate additional services. +|The `new-app` command attempts to detect exposed ports in input images. It uses the lowest numeric exposed port to generate a service that exposes that port. In order to expose a different port, after `new-app` has completed, simply use the `oc expose` command to generate additional services. |Other -|Other objects can be generated when instantiating templates, according to the - template. +|Other objects can be generated when instantiating templates, according to the template. |=== == Specifying environment variables -When generating applications from a template, source, or an image, you can use -the `-e|--env` argument to pass environment variables to the application -container at run time: +When generating applications from a template, source, or an image, you can use the `-e|--env` argument to pass environment variables to the application container at run time: [source,terminal] ---- @@ -79,8 +62,7 @@ Read the variables from the file: $ oc new-app openshift/postgresql-92-centos7 --env-file=postgresql.env ---- -Additionally, environment variables can be given on standard input by using -`--env-file=-`: +Additionally, environment variables can be given on standard input by using `--env-file=-`: [source,terminal] ---- @@ -89,15 +71,12 @@ $ cat postgresql.env | oc new-app openshift/postgresql-92-centos7 --env-file=- [NOTE] ==== -Any `BuildConfig` objects created as part of `new-app` processing are not -updated with environment variables passed with the `-e|--env` or `--env-file` argument. +Any `BuildConfig` objects created as part of `new-app` processing are not updated with environment variables passed with the `-e|--env` or `--env-file` argument. ==== == Specifying build environment variables -When generating applications from a template, source, or an image, you can use -the `--build-env` argument to pass environment variables to the build container -at run time: +When generating applications from a template, source, or an image, you can use the `--build-env` argument to pass environment variables to the build container at run time: [source,terminal] ---- @@ -121,8 +100,7 @@ Read the variables from the file: $ oc new-app openshift/ruby-23-centos7 --build-env-file=ruby.env ---- -Additionally, environment variables can be given on standard input by using -`--build-env-file=-`: +Additionally, environment variables can be given on standard input by using `--build-env-file=-`: [source,terminal] ---- @@ -131,10 +109,7 @@ $ cat ruby.env | oc new-app openshift/ruby-23-centos7 --build-env-file=- == Specifying labels -When generating applications from source, images, or templates, you -can use the `-l|--label` argument to add labels to the created objects. Labels -make it easy to collectively select, configure, and delete objects associated -with the application. +When generating applications from source, images, or templates, you can use the `-l|--label` argument to add labels to the created objects. Labels make it easy to collectively select, configure, and delete objects associated with the application. [source,terminal] ---- @@ -143,11 +118,7 @@ $ oc new-app https://github.com/openshift/ruby-hello-world -l name=hello-world == Viewing the output without creation -To see a dry-run of running the `new-app` command, you can use the `-o|--output` -argument with a `yaml` or `json` value. You can then use the output to preview -the objects that are created or redirect it to a file that you can edit. -After you are satisfied, you can use `oc create` to create the {product-title} -objects. +To see a dry-run of running the `new-app` command, you can use the `-o|--output` argument with a `yaml` or `json` value. You can then use the output to preview the objects that are created or redirect it to a file that you can edit. After you are satisfied, you can use `oc create` to create the {product-title} objects. To output `new-app` artifacts to a file, run the following: @@ -173,9 +144,7 @@ $ oc create -f myapp.yaml == Creating objects with different names -Objects created by `new-app` are normally named after the source repository, or -the image used to generate them. You can set the name of the objects produced by -adding a `--name` flag to the command: +Objects created by `new-app` are normally named after the source repository, or the image used to generate them. You can set the name of the objects produced by adding a `--name` flag to the command: [source,terminal] ---- @@ -184,8 +153,7 @@ $ oc new-app https://github.com/openshift/ruby-hello-world --name=myapp == Creating objects in a different project -Normally, `new-app` creates objects in the current project. However, you can -create objects in a different project by using the `-n|--namespace` argument: +Normally, `new-app` creates objects in the current project. However, you can create objects in a different project by using the `-n|--namespace` argument: [source,terminal] ---- @@ -194,10 +162,7 @@ $ oc new-app https://github.com/openshift/ruby-hello-world -n myproject == Creating multiple objects -The `new-app` command allows creating multiple applications specifying multiple -parameters to `new-app`. Labels specified in the command line apply to all -objects created by the single command. Environment variables apply to all -components created from source or images. +The `new-app` command allows creating multiple applications specifying multiple parameters to `new-app`. Labels specified in the command line apply to all objects created by the single command. Environment variables apply to all components created from source or images. To create an application from a source repository and a Docker Hub image: @@ -208,19 +173,12 @@ $ oc new-app https://github.com/openshift/ruby-hello-world mysql [NOTE] ==== -If a source code repository and a builder image are specified as separate -arguments, `new-app` uses the builder image as the builder for the source code -repository. If this is not the intent, specify the required builder image for -the source using the `~` separator. +If a source code repository and a builder image are specified as separate arguments, `new-app` uses the builder image as the builder for the source code repository. If this is not the intent, specify the required builder image for the source using the `~` separator. ==== == Grouping images and source in a single pod -The `new-app` command allows deploying multiple images together in a single pod. -In order to specify which images to group together, use the `+` separator. The -`--group` command line argument can also be used to specify the images that should -be grouped together. To group the image built from a source repository with -other images, specify its builder image in the group: +The `new-app` command allows deploying multiple images together in a single pod. In order to specify which images to group together, use the `+` separator. The `--group` command line argument can also be used to specify the images that should be grouped together. To group the image built from a source repository with other images, specify its builder image in the group: [source,terminal] ---- @@ -239,9 +197,7 @@ $ oc new-app \ == Searching for images, templates, and other inputs -To search for images, templates, and other inputs for the `oc new-app` command, -add the `--search` and `--list` flags. For example, to find all of the images or -templates that include PHP: +To search for images, templates, and other inputs for the `oc new-app` command, add the `--search` and `--list` flags. For example, to find all of the images or templates that include PHP: [source,terminal] ---- diff --git a/modules/applications-create-using-cli-source-code.adoc b/modules/applications-create-using-cli-source-code.adoc index 9a62e672c4..be5aa0393b 100644 --- a/modules/applications-create-using-cli-source-code.adoc +++ b/modules/applications-create-using-cli-source-code.adoc @@ -5,17 +5,11 @@ [id="applications-create-using-cli-source-code_{context}"] = Creating an application from source code -With the `new-app` command you can create applications from source code in a -local or remote Git repository. +With the `new-app` command you can create applications from source code in a local or remote Git repository. -The `new-app` command creates a build configuration, which itself creates a new -application image from your source code. The `new-app` command typically also -creates a deployment configuration to deploy the new image, and a service to -provide load-balanced access to the deployment running your image. +The `new-app` command creates a build configuration, which itself creates a new application image from your source code. The `new-app` command typically also creates a `DeploymentConfig` object to deploy the new image, and a service to provide load-balanced access to the deployment running your image. -{product-title} automatically detects whether the `Pipeline` or `Source` -build strategy should be used, and in the case of `Source` builds, -detects an appropriate language builder image. +{product-title} automatically detects whether the pipeline or source build strategy should be used, and in the case of source builds, detects an appropriate language builder image. == Local @@ -28,9 +22,7 @@ $ oc new-app / [NOTE] ==== -If you use a local Git repository, the repository must have a remote named -`origin` that points to a URL that is accessible by the {product-title} cluster. If -there is no recognized remote, running the `new-app` command will create a binary build. +If you use a local Git repository, the repository must have a remote named `origin` that points to a URL that is accessible by the {product-title} cluster. If there is no recognized remote, running the `new-app` command will create a binary build. ==== == Remote @@ -51,14 +43,10 @@ $ oc new-app https://github.com/youruser/yourprivaterepo --source-secret=yoursec [NOTE] ==== -If you use a private remote Git repository, you can use the `--source-secret` flag -to specify an existing source clone secret that will get injected into your -`BuildConfig` to access the repository. +If you use a private remote Git repository, you can use the `--source-secret` flag to specify an existing source clone secret that will get injected into your build config to access the repository. ==== -You can use a subdirectory of your source code repository by specifying a -`--context-dir` flag. To create an application from a remote Git repository and -a context subdirectory: +You can use a subdirectory of your source code repository by specifying a `--context-dir` flag. To create an application from a remote Git repository and a context subdirectory: [source,terminal] ---- @@ -66,8 +54,7 @@ $ oc new-app https://github.com/sclorg/s2i-ruby-container.git \ --context-dir=2.0/test/puma-test-app ---- -Also, when specifying a remote URL, you can specify a Git branch to use by -appending `#` to the end of the URL: +Also, when specifying a remote URL, you can specify a Git branch to use by appending `#` to the end of the URL: [source,terminal] ---- @@ -76,14 +63,9 @@ $ oc new-app https://github.com/openshift/ruby-hello-world.git#beta4 == Build strategy detection -If a `Jenkinsfile` exists in the root or specified context directory of the -source repository when creating a new application, {product-title} generates a -Pipeline build strategy. +If a Jenkins file exists in the root or specified context directory of the source repository when creating a new application, {product-title} generates a pipeline build strategy. Otherwise, it generates a source build strategy. -Otherwise, it generates a Source build strategy. - -Override the build strategy by setting the `--strategy` flag to either -`pipeline` or `source`. +Override the build strategy by setting the `--strategy` flag to either `pipeline` or `source`. [source,terminal] ---- @@ -92,17 +74,14 @@ $ oc new-app /home/user/code/myapp --strategy=docker [NOTE] ==== -The `oc` command requires that files containing build sources are available in a -remote Git repository. For all source builds, you must use `git remote -v`. +The `oc` command requires that files containing build sources are available in a remote Git repository. For all source builds, you must use `git remote -v`. ==== -== Language Detection +== Language detection -If you use the `Source` build strategy, `new-app` attempts to determine the -language builder to use by the presence of certain files in the root or -specified context directory of the repository: +If you use the source build strategy, `new-app` attempts to determine the language builder to use by the presence of certain files in the root or specified context directory of the repository: -.Languages Detected by `new-app` +.Languages detected by `new-app` [cols="4,8",options="header"] |=== @@ -136,27 +115,19 @@ endif::[] |`Godeps`, `main.go` |=== -After a language is detected, `new-app` searches the {product-title} server for -imagestreamtags that have a `supports` annotation matching the detected language, -or an imagestream that matches the name of the detected language. If a match is -not found, `new-app` searches the link:https://registry.hub.docker.com[Docker Hub -registry] for an image that matches the detected language based on name. +After a language is detected, `new-app` searches the {product-title} server for image stream tags that have a `supports` annotation matching the detected language, or an image stream that matches the name of the detected language. If a match is not found, `new-app` searches the link:https://registry.hub.docker.com[Docker Hub registry] for an image that matches the detected language based on name. -You can override the image the builder uses for a particular source -repository by specifying the image, either an imagestream or container -specification, and the repository with a `~` as a separator. Note that if this -is done, build strategy detection and language detection are not carried out. +You can override the image the builder uses for a particular source repository by specifying the image, either an image stream or container +specification, and the repository with a `~` as a separator. Note that if this is done, build strategy detection and language detection are not carried out. -For example, to use the `myproject/my-ruby` imagestream with the source in a -remote repository: +For example, to use the `myproject/my-ruby` imagestream with the source in a remote repository: [source,terminal] ---- $ oc new-app myproject/my-ruby~https://github.com/openshift/ruby-hello-world.git ---- -To use the `openshift/ruby-20-centos7:latest` container imagestream with -the source in a local repository: +To use the `openshift/ruby-20-centos7:latest` container image stream with the source in a local repository: [source,terminal] ---- @@ -165,17 +136,9 @@ $ oc new-app openshift/ruby-20-centos7:latest~/home/user/code/my-ruby-app [NOTE] ==== -Language detection requires the Git client to be locally installed so that your -repository can be cloned and inspected. If Git is not available, you can avoid -the language detection step by specifying the builder image to use with your -repository with the `~` syntax. +Language detection requires the Git client to be locally installed so that your repository can be cloned and inspected. If Git is not available, you can avoid the language detection step by specifying the builder image to use with your repository with the `~` syntax. -The `-i ` invocation requires that `new-app` attempt -to clone `repository` in order to determine what type of artifact it is, so this -will fail if Git is not available. +The `-i ` invocation requires that `new-app` attempt to clone `repository` in order to determine what type of artifact it is, so this will fail if Git is not available. -The `-i --code ` invocation requires -`new-app` clone `repository` in order to determine whether `image` should be -used as a builder for the source code, or deployed separately, as in the case of -a database image. +The `-i --code ` invocation requires `new-app` clone `repository` in order to determine whether `image` should be used as a builder for the source code, or deployed separately, as in the case of a database image. ==== diff --git a/modules/applications-create-using-cli-template.adoc b/modules/applications-create-using-cli-template.adoc index d7d6714dfc..1ec4ed1853 100644 --- a/modules/applications-create-using-cli-template.adoc +++ b/modules/applications-create-using-cli-template.adoc @@ -31,10 +31,9 @@ To create a new application by referencing a template file in your local file sy $ oc new-app -f examples/sample-app/application-template-stibuild.json ---- -== Template Parameters +== Template parameters -When creating an application based on a template, use the -`-p|--param` argument to set parameter values that are defined by the template: +When creating an application based on a template, use the `-p|--param` argument to set parameter values that are defined by the template: [source,terminal] ---- diff --git a/modules/deployments-ab-testing-lb.adoc b/modules/deployments-ab-testing-lb.adoc index e841546c7d..5f847802f5 100644 --- a/modules/deployments-ab-testing-lb.adoc +++ b/modules/deployments-ab-testing-lb.adoc @@ -5,21 +5,11 @@ [id="deployments-ab-testing-lb_{context}"] = Load balancing for A/B testing -The user sets up a route with multiple services. Each service handles a version -of the application. +The user sets up a route with multiple services. Each service handles a version of the application. -Each service is assigned a `weight` and the portion of requests to each service -is the `service_weight` divided by the `sum_of_weights`. The `weight` for each -service is distributed to the service's endpoints so that the sum of the -endpoint `weights` is the service `weight`. +Each service is assigned a `weight` and the portion of requests to each service is the `service_weight` divided by the `sum_of_weights`. The `weight` for each service is distributed to the service's endpoints so that the sum of the endpoint `weights` is the service `weight`. -The route can have up to four services. The `weight` for the service can be -between `0` and `256`. When the `weight` is `0`, the service does not participate in load-balancing -but continues to serve existing persistent connections. When the service `weight` -is not `0`, each endpoint has a minimum `weight` of `1`. Because of this, a -service with a lot of endpoints can end up with higher `weight` than desired. -In this case, reduce the number of pods to get the desired load balance -`weight`. +The route can have up to four services. The `weight` for the service can be between `0` and `256`. When the `weight` is `0`, the service does not participate in load-balancing but continues to serve existing persistent connections. When the service `weight` is not `0`, each endpoint has a minimum `weight` of `1`. Because of this, a service with a lot of endpoints can end up with higher `weight` than desired. In this case, reduce the number of pods to get the desired load balance `weight`. //// See the @@ -35,10 +25,7 @@ weighting.png[Visualization of Alternate Back Ends in the Web Console] To set up the A/B environment: -. Create the two applications and give them different names. Each creates a -DeploymentConfig. The applications are versions of the same program; one -is usually the current production version and the other the proposed new -version. +. Create the two applications and give them different names. Each creates a `DeploymentConfig` object. The applications are versions of the same program; one is usually the current production version and the other the proposed new version. .. Create the first application. The following example creates an application called `ab-example-a`: + [source,terminal] @@ -55,33 +42,22 @@ $ oc new-app openshift/deployment-example --name=ab-example-b + Both applications are deployed and services are created. -. Make the application available externally via a route. At this point, you can -expose either. It can be convenient to expose the current production version -first and later modify the route to add the new version. +. Make the application available externally via a route. At this point, you can expose either. It can be convenient to expose the current production version first and later modify the route to add the new version. + [source,terminal] ---- $ oc expose svc/ab-example-a ---- + -Browse to the application at `ab-example-.` to verify -that you see the desired version. +Browse to the application at `ab-example-.` to verify that you see the desired version. -. When you deploy the route, the router balances the traffic according to the -`weights` specified for the services. At this point, there is a single service -with default `weight=1` so all requests go to it. Adding the other service as an -`alternateBackends` and adjusting the `weights` brings the A/B setup to -life. This can be done by the `oc set route-backends` command or by editing the -route. +. When you deploy the route, the router balances the traffic according to the `weights` specified for the services. At this point, there is a single service with default `weight=1` so all requests go to it. Adding the other service as an `alternateBackends` and adjusting the `weights` brings the A/B setup to life. This can be done by the `oc set route-backends` command or by editing the route. + -Setting the `oc set route-backend` to `0` means the service does not participate -in load-balancing, but continues to serve existing persistent connections. +Setting the `oc set route-backend` to `0` means the service does not participate in load-balancing, but continues to serve existing persistent connections. + [NOTE] ==== -Changes to the route just change the portion of traffic to the various services. -You might have to scale the DeploymentConfigs to adjust the number of pods -to handle the anticipated loads. +Changes to the route just change the portion of traffic to the various services. You might have to scale the deployment to adjust the number of pods to handle the anticipated loads. ==== + To edit the route, run: @@ -119,7 +95,7 @@ spec: . Navigate to the *Networking* -> *Routes* page. -. Click the Actions menu {kebab} next to the Route you want to edit and select *Edit Route*. +. Click the Actions menu {kebab} next to the route you want to edit and select *Edit Route*. . Edit the YAML file. Update the `weight` to be an integer between `0` and `256` that specifies the relative weight of the target against other target reference objects. The value `0` suppresses requests to this back end. The default is `100`. Run `oc explain routes.spec.alternateBackends` for more information about the options. @@ -149,8 +125,7 @@ spec: .Procedure -. To manage the services and corresponding weights load balanced by the route, -use the `oc set route-backends` command: +. To manage the services and corresponding weights load balanced by the route, use the `oc set route-backends` command: + [source,terminal] ---- @@ -158,20 +133,16 @@ $ oc set route-backends ROUTENAME \ [--zero|--equal] [--adjust] SERVICE=WEIGHT[%] [...] [options] ---- + -For example, the following sets `ab-example-a` as the primary service with -`weight=198` and `ab-example-b` as the first alternate service with a -`weight=2`: +For example, the following sets `ab-example-a` as the primary service with `weight=198` and `ab-example-b` as the first alternate service with a `weight=2`: + [source,terminal] ---- $ oc set route-backends ab-example ab-example-a=198 ab-example-b=2 ---- + -This means 99% of traffic is sent to service `ab-example-a` and 1% to -service `ab-example-b`. +This means 99% of traffic is sent to service `ab-example-a` and 1% to service `ab-example-b`. + -This command does not scale the DeploymentConfigs. You might be required to do -so to have enough pods to handle the request load. +This command does not scale the deployment. You might be required to do so to have enough pods to handle the request load. . Run the command with no flags to verify the current configuration: + @@ -188,11 +159,7 @@ routes/ab-example Service ab-example-a 198 (99%) routes/ab-example Service ab-example-b 2 (1%) ---- -. To alter the weight of an individual service relative to itself or to the -primary service, use the `--adjust` flag. Specifying a percentage adjusts the -service relative to either the primary or the first alternate (if you specify -the primary). If there are other backends, their weights are kept proportional -to the changed. +. To alter the weight of an individual service relative to itself or to the primary service, use the `--adjust` flag. Specifying a percentage adjusts the service relative to either the primary or the first alternate (if you specify the primary). If there are other backends, their weights are kept proportional to the changed. + The following example alters the weight of `ab-example-a` and `ab-example-b` services: + @@ -222,22 +189,19 @@ The `--equal` flag sets the `weight` of all services to `100`: $ oc set route-backends ab-example --equal ---- + -The `--zero` flag sets the `weight` of all services to `0`. All requests then -return with a 503 error. +The `--zero` flag sets the `weight` of all services to `0`. All requests then return with a 503 error. + [NOTE] ==== Not all routers may support multiple or weighted backends. ==== - [id="deployments-ab-one-service-multi-dc_{context}"] -== One service, multiple DeploymentConfigs +== One service, multiple `DeploymentConfig` objects .Procedure -. Create a new application, adding a label `ab-example=true` that will be common -to all shards: +. Create a new application, adding a label `ab-example=true` that will be common to all shards: + [source,terminal] ---- @@ -246,18 +210,16 @@ $ oc new-app openshift/deployment-example --name=ab-example-a + The application is deployed and a service is created. This is the first shard. -. Make the application available via a route (or use the service IP directly): +. Make the application available via a route, or use the service IP directly: + [source,terminal] ---- $ oc expose svc/ab-example-a --name=ab-example ---- -. Browse to the application at `ab-example-.` to verify -you see the `v1` image. +. Browse to the application at `ab-example-.` to verify you see the `v1` image. -. Create a second shard based on the same source image and label as the first -shard, but with a different tagged version and unique environment variables: +. Create a second shard based on the same source image and label as the first shard, but with a different tagged version and unique environment variables: + [source,terminal] ---- @@ -266,10 +228,7 @@ $ oc new-app openshift/deployment-example:v2 \ SUBTITLE="shard B" COLOR="red" ---- -. At this point, both sets of pods are being served under the route. However, -because both browsers (by leaving a connection open) and the router (by default, -through a cookie) attempt to preserve your connection to a back-end server, -you might not see both shards being returned to you. +. At this point, both sets of pods are being served under the route. However, because both browsers (by leaving a connection open) and the router (by default, through a cookie) attempt to preserve your connection to a back-end server, you might not see both shards being returned to you. + To force your browser to one or the other shard: @@ -291,9 +250,7 @@ $ oc scale dc/ab-example-a --replicas=1; oc scale dc/ab-example-b --replicas=0 + Refresh your browser to show `v1` and `shard A` (in blue). -. If you trigger a deployment on either shard, only the pods in that shard are -affected. You can trigger a deployment by changing the `SUBTITLE` environment -variable in either DeploymentConfig: +. If you trigger a deployment on either shard, only the pods in that shard are affected. You can trigger a deployment by changing the `SUBTITLE` environment variable in either `DeploymentConfig` object: + [source,terminal] ---- diff --git a/modules/deployments-accessing-private-repos.adoc b/modules/deployments-accessing-private-repos.adoc index 0a549e2890..a11009f93f 100644 --- a/modules/deployments-accessing-private-repos.adoc +++ b/modules/deployments-accessing-private-repos.adoc @@ -3,20 +3,16 @@ // * applications/deployments/managing-deployment-processes.adoc [id="deployments-accessing-private-repos_{context}"] -= Accessing private repositories from DeploymentConfigs += Accessing private repositories from `DeploymentConfig` objects -You can add a Secret to your DeploymentConfig so that it can access images from -a private repository. This procedure shows the {product-title} web console -method. +You can add a secret to your `DeploymentConfig` object so that it can access images from a private repository. This procedure shows the {product-title} web console method. .Procedure . Create a new project. -. From the *Workloads* page, create a Secret that contains credentials for -accessing a private image repository. +. From the *Workloads* page, create a secret that contains credentials for accessing a private image repository. -. Create a DeploymentConfig. +. Create a `DeploymentConfig` object. -. On the DeploymentConfig editor page, set the *Pull Secret* and save your -changes. +. On the `DeploymentConfig` object editor page, set the *Pull Secret* and save your changes. diff --git a/modules/deployments-blue-green.adoc b/modules/deployments-blue-green.adoc index 551c6b3b3c..b7c56e2cd3 100644 --- a/modules/deployments-blue-green.adoc +++ b/modules/deployments-blue-green.adoc @@ -5,38 +5,25 @@ [id="deployments-blue-green_{context}"] = Blue-green deployments -Blue-green deployments involve running two versions of an application at the -same time and moving traffic from the in-production version (the green version) -to the newer version (the blue version). You can use a Rolling strategy or -switch services in a route. +Blue-green deployments involve running two versions of an application at the same time and moving traffic from the in-production version (the green version) to the newer version (the blue version). You can use a rolling strategy or switch services in a route. -Because many applications depend on persistent data, you must have an -application that supports _N-1 compatibility_, which means it shares data and -implements live migration between the database, store, or disk by creating two -copies of the data layer. +Because many applications depend on persistent data, you must have an application that supports _N-1 compatibility_, which means it shares data and implements live migration between the database, store, or disk by creating two copies of the data layer. -Consider the data used in testing the new version. If it is the production data, -a bug in the new version can break the production version. +Consider the data used in testing the new version. If it is the production data, a bug in the new version can break the production version. [id="deployments-blue-green-setting-up_{context}"] == Setting up a blue-green deployment -Blue-green deployments use two DeploymentConfigs. Both are running, and -the one in production depends on the service the route specifies, with each -DeploymentConfig exposed to a different service. +Blue-green deployments use two `DeploymentConfig` objects. Both are running, and the one in production depends on the service the route specifies, with each `DeploymentConfig` object exposed to a different service. [NOTE] ==== -Routes are intended for web (HTTP and HTTPS) traffic, so this technique is best -suited for web applications. +Routes are intended for web (HTTP and HTTPS) traffic, so this technique is best suited for web applications. ==== -You can create a new route to the new version and test it. When ready, change -the service in the production route to point to the new service and the -new (blue) version is live. +You can create a new route to the new version and test it. When ready, change the service in the production route to point to the new service and the new (blue) version is live. -If necessary, you can roll back to the older (green) version by switching -the service back to the previous version. +If necessary, you can roll back to the older (green) version by switching the service back to the previous version. .Procedure @@ -62,8 +49,7 @@ $ oc new-app openshift/deployment-example:v2 --name=example-blue $ oc expose svc/example-green --name=bluegreen-example ---- -. Browse to the application at `example-green..` to -verify you see the `v1` image. +. Browse to the application at `example-green..` to verify you see the `v1` image. . Edit the route and change the service name to `example-blue`: + @@ -72,5 +58,4 @@ verify you see the `v1` image. $ oc patch route/bluegreen-example -p '{"spec":{"to":{"name":"example-blue"}}}' ---- -. To verify that the route has changed, refresh the browser until you see the -`v2` image. +. To verify that the route has changed, refresh the browser until you see the `v2` image. diff --git a/modules/deployments-canary-deployments.adoc b/modules/deployments-canary-deployments.adoc index 994da2addf..6ac3331f55 100644 --- a/modules/deployments-canary-deployments.adoc +++ b/modules/deployments-canary-deployments.adoc @@ -5,13 +5,6 @@ [id="deployments-canary-deployments_{context}"] = Canary deployments -All Rolling deployments in {product-title} are _canary deployments_; a new -version (the canary) is tested before all of the old instances are replaced. If -the readiness check never succeeds, the canary instance is removed and the -DeploymentConfig will be automatically rolled back. +All rolling deployments in {product-title} are _canary deployments_; a new version (the canary) is tested before all of the old instances are replaced. If the readiness check never succeeds, the canary instance is removed and the `DeploymentConfig` object will be automatically rolled back. -The readiness check is part of the application code and can be as sophisticated -as necessary to ensure the new instance is ready to be used. If you must -implement more complex checks of the application (such as sending real user -workloads to the new instance), consider implementing a Custom deployment or -using a blue-green deployment strategy. +The readiness check is part of the application code and can be as sophisticated as necessary to ensure the new instance is ready to be used. If you must implement more complex checks of the application (such as sending real user workloads to the new instance), consider implementing a custom deployment or using a blue-green deployment strategy. diff --git a/modules/deployments-comparing-deploymentconfigs.adoc b/modules/deployments-comparing-deploymentconfigs.adoc index d4eab62f40..a5d7c1da88 100644 --- a/modules/deployments-comparing-deploymentconfigs.adoc +++ b/modules/deployments-comparing-deploymentconfigs.adoc @@ -3,50 +3,33 @@ // * applications/deployments/what-deployments-are.adoc [id="deployments-comparing-deploymentconfigs_{context}"] -= Comparing Deployments and DeploymentConfigs += Comparing `Deployment` and `DeploymentConfig` objects -Both Kubernetes Deployments and {product-title}-provided -DeploymentConfigs are supported in {product-title}; however, it is -recommended to use Deployments unless you need a specific feature or behavior -provided by DeploymentConfigs. +Both Kubernetes `Deployment` objects and {product-title}-provided `DeploymentConfig` objects are supported in {product-title}; however, it is recommended to use `Deployment` objects unless you need a specific feature or behavior provided by `DeploymentConfig` objects. -The following sections go into more detail on the differences between the two -object types to further help you decide which type to use. +The following sections go into more detail on the differences between the two object types to further help you decide which type to use. [id="deployments-design_{context}"] == Design -One important difference between Deployments and DeploymentConfigs is the -properties of the link:https://en.wikipedia.org/wiki/CAP_theorem[CAP theorem] -that each design has chosen for the rollout process. DeploymentConfigs prefer -consistency, whereas Deployments take availability over consistency. +One important difference between `Deployment` and `DeploymentConfig` objects is the properties of the link:https://en.wikipedia.org/wiki/CAP_theorem[CAP theorem] that each design has chosen for the rollout process. `DeploymentConfig` objects prefer consistency, whereas `Deployments` objects take availability over consistency. -For DeploymentConfigs, if a node running a deployer pod goes down, it will -not get replaced. The process waits until the node comes back online or is -manually deleted. Manually deleting the node also deletes the corresponding pod. -This means that you can not delete the pod to unstick the rollout, as the -kubelet is responsible for deleting the associated pod. +For `DeploymentConfig` objects, if a node running a deployer pod goes down, it will not get replaced. The process waits until the node comes back online or is manually deleted. Manually deleting the node also deletes the corresponding pod. This means that you can not delete the pod to unstick the rollout, as the kubelet is responsible for deleting the associated pod. -However, Deployments rollouts are driven from a controller manager. The -controller manager runs in high availability mode on masters and uses leader -election algorithms to value availability over consistency. During a failure it -is possible for other masters to act on the same Deployment at the same time, -but this issue will be reconciled shortly after the failure occurs. +However, deployment rollouts are driven from a controller manager. The controller manager runs in high availability mode on masters and uses leader election algorithms to value availability over consistency. During a failure it is possible for other masters to act on the same deployment at the same time, but this issue will be reconciled shortly after the failure occurs. [id="delpoymentconfigs-specific-features_{context}"] -== DeploymentConfigs-specific features +== `DeploymentConfig` object-specific features [discrete] ==== Automatic rollbacks -Currently, Deployments do not support automatically rolling back to the last -successfully deployed ReplicaSet in case of a failure. +Currently, deployments do not support automatically rolling back to the last successfully deployed replica set in case of a failure. [discrete] ==== Triggers -Deployments have an implicit `ConfigChange` trigger in that every -change in the pod template of a deployment automatically triggers a new rollout. +Deployments have an implicit config change trigger in that every change in the pod template of a deployment automatically triggers a new rollout. If you do not want new rollouts on pod template changes, pause the deployment: [source,terminal] @@ -62,44 +45,27 @@ Deployments do not yet support any lifecycle hooks. [discrete] ==== Custom strategies -Deployments do not support user-specified Custom deployment -strategies yet. +Deployments do not support user-specified custom deployment strategies yet. [id="delpoyments-specific-features_{context}"] -== Deployments-specific features +== Deployment-specific features [discrete] ==== Rollover -The deployment process for Deployments is driven by a controller -loop, in contrast to DeploymentConfigs which use deployer pods for every -new rollout. This means that a Deployment can have as many active -ReplicaSets as possible, and eventually the deployment controller will scale -down all old ReplicaSets and scale up the newest one. +The deployment process for `Deployment` objects is driven by a controller loop, in contrast to `DeploymentConfig` objects which use deployer pods for every new rollout. This means that the `Deployment` object can have as many active replica sets as possible, and eventually the deployment controller will scale down all old replica sets and scale up the newest one. -DeploymentConfigs can have at most one deployer pod running, otherwise -multiple deployers end up conflicting while trying to scale up what they think -should be the newest ReplicationController. Because of this, only two -ReplicationControllers can be active at any point in time. Ultimately, this -translates to faster rapid rollouts for Deployments. +`DeploymentConfig` objects can have at most one deployer pod running, otherwise multiple deployers end up conflicting while trying to scale up what they think should be the newest replication controller. Because of this, only two replication controllers can be active at any point in time. Ultimately, this translates to faster rapid rollouts for `Deployment` objects. [discrete] ==== Proportional scaling -Because the Deployment controller is the sole source of truth for the sizes of -new and old ReplicaSets owned by a Deployment, it is able to scale ongoing -rollouts. Additional replicas are distributed proportionally based on the size -of each replica set. +Because the deployment controller is the sole source of truth for the sizes of new and old replica sets owned by a deployment, it is able to scale ongoing rollouts. Additional replicas are distributed proportionally based on the size of each replica set. -DeploymentConfigs cannot be scaled when a rollout is ongoing because the -DeploymentConfig controller will end up having issues with the deployer -process about the size of the new ReplicationController. +Deployments cannot be scaled when a rollout is ongoing because the controller will end up having issues with the deployer process about the size of the new replication controller. [discrete] ==== Pausing mid-rollout -Deployments can be paused at any point in time, meaning you can also -pause ongoing rollouts. On the other hand, you cannot pause deployer pods -currently, so if you try to pause a DeploymentConfig in the middle of a -rollout, the deployer process will not be affected and will continue until it -finishes. +Deployments can be paused at any point in time, meaning you can also pause ongoing rollouts. On the other hand, you cannot pause deployer pods +currently, so if you try to pause a deployment in the middle of a rollout, the deployer process will not be affected and will continue until it finishes. diff --git a/modules/deployments-creating-rolling-deployment.adoc b/modules/deployments-creating-rolling-deployment.adoc index 48ba481090..e6a0162f8e 100644 --- a/modules/deployments-creating-rolling-deployment.adoc +++ b/modules/deployments-creating-rolling-deployment.adoc @@ -3,41 +3,36 @@ // * applications/deployments/deployment-strategies.adoc [id="deployments-creating-rolling-deployment_{context}"] -= Creating a Rolling deployment += Creating a rolling deployment -Rolling deployments are the default type in {product-title}. You can create a -Rolling deployment using the CLI. +Rolling deployments are the default type in {product-title}. You can create a rolling deployment using the CLI. .Procedure -. Create an application based on the example deployment images found in -link:https://hub.docker.com/r/openshift/deployment-example/[DockerHub]: +. Create an application based on the example deployment images found in link:https://hub.docker.com/r/openshift/deployment-example/[Docker Hub]: + [source,terminal] ---- $ oc new-app openshift/deployment-example ---- -. If you have the router installed, make the application available via a route (or -use the service IP directly) +. If you have the router installed, make the application available via a route or use the service IP directly. + [source,terminal] ---- $ oc expose svc/deployment-example ---- -. Browse to the application at `deployment-example..` to -verify you see the `v1` image. +. Browse to the application at `deployment-example..` to verify you see the `v1` image. -. Scale the DeploymentConfig up to three replicas: +. Scale the `DeploymentConfig` object up to three replicas: + [source,terminal] ---- $ oc scale dc/deployment-example --replicas=3 ---- -. Trigger a new deployment automatically by tagging a new version of the example -as the `latest` tag: +. Trigger a new deployment automatically by tagging a new version of the example as the `latest` tag: + [source,terminal] ---- @@ -46,18 +41,13 @@ $ oc tag deployment-example:v2 deployment-example:latest . In your browser, refresh the page until you see the `v2` image. -. When using the CLI, the following command shows how many pods are on version 1 -and how many are on version 2. In the web console, the pods are progressively -added to v2 and removed from v1: +. When using the CLI, the following command shows how many pods are on version 1 and how many are on version 2. In the web console, the pods are progressively added to v2 and removed from v1: + [source,terminal] ---- $ oc describe dc deployment-example ---- -During the deployment process, the new ReplicationController is incrementally -scaled up. After the new pods are marked as `ready` (by passing their readiness -check), the deployment process continues. +During the deployment process, the new replication controller is incrementally scaled up. After the new pods are marked as `ready` (by passing their readiness check), the deployment process continues. -If the pods do not become ready, the process aborts, and the DeploymentConfig -rolls back to its previous version. +If the pods do not become ready, the process aborts, and the deployment rolls back to its previous version. diff --git a/modules/deployments-custom-strategy.adoc b/modules/deployments-custom-strategy.adoc index d200c4a14d..66dcb82cc8 100644 --- a/modules/deployments-custom-strategy.adoc +++ b/modules/deployments-custom-strategy.adoc @@ -5,9 +5,9 @@ [id="deployments-custom-strategy_{context}"] = Custom strategy -The Custom strategy allows you to provide your own deployment behavior. +The custom strategy allows you to provide your own deployment behavior. -.Example Custom strategy definition +.Example custom strategy definition [source,yaml] ---- strategy: @@ -20,34 +20,25 @@ strategy: value: VALUE_1 ---- -In the above example, the `organization/strategy` container image provides the -deployment behavior. The optional `command` array overrides any `CMD` directive -specified in the image's `Dockerfile`. The optional environment variables -provided are added to the execution environment of the strategy process. +In the above example, the `organization/strategy` container image provides the deployment behavior. The optional `command` array overrides any `CMD` directive specified in the image's `Dockerfile`. The optional environment variables provided are added to the execution environment of the strategy process. -Additionally, {product-title} provides the following environment variables to the -deployment process: +Additionally, {product-title} provides the following environment variables to the deployment process: [cols="4,8",options="header"] |=== |Environment variable |Description .^|`OPENSHIFT_DEPLOYMENT_NAME` -|The name of the new deployment (a ReplicationController). +|The name of the new deployment, a replication controller. .^|`OPENSHIFT_DEPLOYMENT_NAMESPACE` |The name space of the new deployment. |=== -The replica count of the new deployment will initially be zero. The -responsibility of the strategy is to make the new deployment active using the +The replica count of the new deployment will initially be zero. The responsibility of the strategy is to make the new deployment active using the logic that best serves the needs of the user. -Alternatively, use `customParams` to inject the custom deployment logic into the -existing deployment strategies. Provide a custom shell script logic and call the -`openshift-deploy` binary. Users do not have to supply their custom deployer -container image; in this case, the default {product-title} deployer image is -used instead: +Alternatively, use the `customParams` object to inject the custom deployment logic into the existing deployment strategies. Provide a custom shell script logic and call the `openshift-deploy` binary. Users do not have to supply their custom deployer container image; in this case, the default {product-title} deployer image is used instead: [source,yaml] ---- @@ -82,6 +73,4 @@ Halfway there Complete ---- -If the custom deployment strategy process requires access to the {product-title} -API or the Kubernetes API the container that executes the strategy can use the -service account token available inside the container for authentication. +If the custom deployment strategy process requires access to the {product-title} API or the Kubernetes API the container that executes the strategy can use the service account token available inside the container for authentication. diff --git a/modules/deployments-deploymentconfigs.adoc b/modules/deployments-deploymentconfigs.adoc index a2cccc1476..c70d86298b 100644 --- a/modules/deployments-deploymentconfigs.adoc +++ b/modules/deployments-deploymentconfigs.adoc @@ -3,41 +3,24 @@ // * applications/deployments/what-deployments-are.adoc [id="deployments-and-deploymentconfigs_{context}"] -= DeploymentConfigs += `DeploymentConfig` objects -Building on ReplicationControllers, {product-title} adds expanded support for -the software development and deployment lifecycle with the concept of -_DeploymentConfigs_. In the simplest case, a DeploymentConfig creates a new -ReplicationController and lets it start up pods. +Building on replication controllers, {product-title} adds expanded support for the software development and deployment lifecycle with the concept of `DeploymentConfig` objects. In the simplest case, a `DeploymentConfig` object creates a new replication controller and lets it start up pods. -However, {product-title} deployments from DeploymentConfigs also provide the -ability to transition from an existing deployment of an image to a new one and -also define hooks to be run before or after creating the ReplicationController. +However, {product-title} deployments from `DeploymentConfig` objects also provide the ability to transition from an existing deployment of an image to a new one and also define hooks to be run before or after creating the replication controller. -The DeploymentConfig deployment system provides the following capabilities: +The `DeploymentConfig` deployment system provides the following capabilities: -- A DeploymentConfig, which is a template for running applications. -- Triggers that drive automated deployments in response to events. -- User-customizable deployment strategies to transition from the previous version -to the new version. A strategy runs inside a Pod commonly referred as the -deployment process. -- A set of hooks (lifecycle hooks) for executing custom behavior in different -points during the lifecycle of a deployment. -- Versioning of your application in order to support rollbacks either manually or -automatically in case of deployment failure. -- Manual replication scaling and autoscaling. +* A `DeploymentConfig` object, which is a template for running applications. +* Triggers that drive automated deployments in response to events. +* User-customizable deployment strategies to transition from the previous version to the new version. A strategy runs inside a pod commonly referred as the deployment process. +* A set of hooks (lifecycle hooks) for executing custom behavior in different points during the lifecycle of a deployment. +* Versioning of your application in order to support rollbacks either manually or automatically in case of deployment failure. +* Manual replication scaling and autoscaling. -When you create a DeploymentConfig, a ReplicationController is created -representing the DeploymentConfig's Pod template. If the DeploymentConfig -changes, a new ReplicationController is created with the latest Pod template, -and a deployment process runs to scale down the old ReplicationController and -scale up the new one. +When you create a `DeploymentConfig` object, a replication controller is created representing the `DeploymentConfig` object's pod template. If the deployment changes, a new replication controller is created with the latest pod template, and a deployment process runs to scale down the old replication controller and scale up the new one. -Instances of your application are automatically added and removed from both -service load balancers and routers as they are created. As long as your -application supports graceful shutdown when it receives the `TERM` signal, you -can ensure that running user connections are given a chance to complete -normally. +Instances of your application are automatically added and removed from both service load balancers and routers as they are created. As long as your application supports graceful shutdown when it receives the `TERM` signal, you can ensure that running user connections are given a chance to complete normally. The {product-title} `DeploymentConfig` object defines the following details: @@ -46,15 +29,10 @@ The {product-title} `DeploymentConfig` object defines the following details: . The strategy for transitioning between deployments. . Lifecycle hooks. -Each time a deployment is triggered, whether manually or automatically, a -deployer Pod manages the deployment (including scaling down the old -ReplicationController, scaling up the new one, and running hooks). The -deployment pod remains for an indefinite amount of time after it completes the -Deployment in order to retain its logs of the Deployment. When a deployment is -superseded by another, the previous ReplicationController is retained to enable -easy rollback if needed. +Each time a deployment is triggered, whether manually or automatically, a deployer pod manages the deployment (including scaling down the old +replication controller, scaling up the new one, and running hooks). The deployment pod remains for an indefinite amount of time after it completes the deployment in order to retain its logs of the deployment. When a deployment is superseded by another, the previous replication controller is retained to enable easy rollback if needed. -.Example DeploymentConfig definition +.Example `DeploymentConfig` definition [source,yaml] ---- apiVersion: v1 @@ -79,6 +57,6 @@ spec: strategy: type: Rolling <3> ---- -<1> A `ConfigChange` trigger causes a new Deployment to be created any time the ReplicationController template changes. -<2> An `ImageChange` trigger causes a new Deployment to be created each time a new version of the backing image is available in the named imagestream. -<3> The default `Rolling` strategy makes a downtime-free transition between Deployments. +<1> A config change trigger causes a new deployment to be created any time the replication controller template changes. +<2> An image change trigger causes a new deployment to be created each time a new version of the backing image is available in the named image stream. +<3> The default `Rolling` strategy makes a downtime-free transition between deployments. diff --git a/modules/deployments-exec-cmd-in-container.adoc b/modules/deployments-exec-cmd-in-container.adoc index a4e9d0904e..d792bf5c0c 100644 --- a/modules/deployments-exec-cmd-in-container.adoc +++ b/modules/deployments-exec-cmd-in-container.adoc @@ -5,16 +5,11 @@ [id="deployments-exe-cmd-in-container_{context}"] = Executing commands inside a container -You can add a command to a container, which modifies the container's startup -behavior by overruling the image's `ENTRYPOINT`. This is different from a -lifecycle hook, which instead can be run once per deployment at a specified -time. +You can add a command to a container, which modifies the container's start-up behavior by overruling the image's `ENTRYPOINT`. This is different from a lifecycle hook, which instead can be run once per deployment at a specified time. .Procedure -. Add the `command` parameters to the `spec` field of the DeploymentConfig. You -can also add an `args` field, which modifies the `command` (or the `ENTRYPOINT` -if `command` does not exist). +. Add the `command` parameters to the `spec` field of the `DeploymentConfig` object. You can also add an `args` field, which modifies the `command` (or the `ENTRYPOINT` if `command` does not exist). + [source,yaml] ---- @@ -31,8 +26,7 @@ spec: - '' ---- + -For example, to execute the `java` command with the `-jar` and -`/opt/app-root/springboots2idemo.jar` arguments: +For example, to execute the `java` command with the `-jar` and `/opt/app-root/springboots2idemo.jar` arguments: + [source,yaml] ---- diff --git a/modules/deployments-graceful-termination.adoc b/modules/deployments-graceful-termination.adoc index abf4b15955..ba10afd428 100644 --- a/modules/deployments-graceful-termination.adoc +++ b/modules/deployments-graceful-termination.adoc @@ -5,19 +5,9 @@ [id="deployments-graceful-termination_{context}"] = Graceful termination -{product-title} and Kubernetes give application instances time to shut down -before removing them from load balancing rotations. However, applications must -ensure they cleanly terminate user connections as well before they exit. +{product-title} and Kubernetes give application instances time to shut down before removing them from load balancing rotations. However, applications must ensure they cleanly terminate user connections as well before they exit. -On shutdown, {product-title} sends a `TERM` signal to the processes in the -container. Application code, on receiving `SIGTERM`, stop accepting new -connections. This ensures that load balancers route traffic to other active -instances. The application code then waits until all open connections are closed -(or gracefully terminate individual connections at the next opportunity) before -exiting. +On shutdown, {product-title} sends a `TERM` signal to the processes in the container. Application code, on receiving `SIGTERM`, stop accepting new connections. This ensures that load balancers route traffic to other active instances. The application code then waits until all open connections are closed, or gracefully terminate individual connections at the next opportunity, before exiting. -After the graceful termination period expires, a process that has not exited is -sent the `KILL` signal, which immediately ends the process. The -`terminationGracePeriodSeconds` attribute of a Pod or Pod template controls the -graceful termination period (default 30 seconds) and may be customized per -application as necessary. +After the graceful termination period expires, a process that has not exited is sent the `KILL` signal, which immediately ends the process. The +`terminationGracePeriodSeconds` attribute of a pod or pod template controls the graceful termination period (default 30 seconds) and can be customized per application as necessary. diff --git a/modules/deployments-kube-deployments.adoc b/modules/deployments-kube-deployments.adoc index 14662a519b..831d85a783 100644 --- a/modules/deployments-kube-deployments.adoc +++ b/modules/deployments-kube-deployments.adoc @@ -5,16 +5,11 @@ [id="deployments-kube-deployments_{context}"] = Deployments -Kubernetes provides a first-class, native API object type in {product-title} -called _Deployments_. Deployments serve as a descendant of the -{product-title}-specific DeploymentConfig. +Kubernetes provides a first-class, native API object type in {product-title} called `Deployment`. `Deployment` objects serve as a descendant of the {product-title}-specific `DeploymentConfig` object. -Like DeploymentConfigs, Deployments describe the desired state of a particular -component of an application as a Pod template. Deployments create ReplicaSets, -which orchestrate Pod lifecycles. +Like `DeploymentConfig` objects, `Deployment` objects describe the desired state of a particular component of an application as a pod template. Deployments create replica sets, which orchestrate pod lifecycles. -For example, the following Deployment definition creates a ReplicaSet to bring -up one `hello-openshift` Pod: +For example, the following deployment definition creates a replica set to bring up one `hello-openshift` pod: .Deployment definition [source,yaml] diff --git a/modules/deployments-lifecycle-hooks.adoc b/modules/deployments-lifecycle-hooks.adoc index 58dabd91f9..2067cf1745 100644 --- a/modules/deployments-lifecycle-hooks.adoc +++ b/modules/deployments-lifecycle-hooks.adoc @@ -5,9 +5,7 @@ [id="deployments-lifecycle-hooks_{context}"] = Lifecycle hooks -The Rolling and Recreate strategies support _lifecycle hooks_, or deployment -hooks, which allow behavior to be injected into the deployment process at -predefined points within the strategy: +The rolling and recreate strategies support _lifecycle hooks_, or deployment hooks, which allow behavior to be injected into the deployment process at predefined points within the strategy: .Example `pre` lifecycle hook [source,yaml] @@ -18,8 +16,7 @@ pre: ---- <1> `execNewPod` is a pod-based lifecycle hook. -Every hook has a `failurePolicy`, which defines the action the strategy should -take when a hook failure is encountered: +Every hook has a _failure policy_, which defines the action the strategy should take when a hook failure is encountered: [cols="2,8"] |=== @@ -34,18 +31,14 @@ take when a hook failure is encountered: |Any hook failure should be ignored and the deployment should proceed. |=== -Hooks have a type-specific field that describes how to execute the hook. -Currently, pod-based hooks are the only supported hook type, specified by the -`execNewPod` field. +Hooks have a type-specific field that describes how to execute the hook. Currently, pod-based hooks are the only supported hook type, specified by the `execNewPod` field. [discrete] ==== Pod-based lifecycle hook -Pod-based lifecycle hooks execute hook code in a new pod derived from the -template in a DeploymentConfig. +Pod-based lifecycle hooks execute hook code in a new pod derived from the template in a `DeploymentConfig` object. -The following simplified example DeploymentConfig uses the Rolling strategy. -Triggers and some other minor details are omitted for brevity: +The following simplified example deployment uses the rolling strategy. Triggers and some other minor details are omitted for brevity: [source,yaml] ---- @@ -84,25 +77,21 @@ spec: <3> `env` is an optional set of environment variables for the hook container. <4> `volumes` is an optional set of volume references for the hook container. -In this example, the `pre` hook will be executed in a new pod using the -`openshift/origin-ruby-sample` image from the `helloworld` container. The hook -pod has the following properties: +In this example, the `pre` hook will be executed in a new pod using the `openshift/origin-ruby-sample` image from the `helloworld` container. The hook pod has the following properties: * The hook command is `/usr/bin/command arg1 arg2`. * The hook container has the `CUSTOM_VAR1=custom_value1` environment variable. * The hook failure policy is `Abort`, meaning the deployment process fails if the hook fails. -* The hook pod inherits the `data` volume from the DeploymentConfig pod. +* The hook pod inherits the `data` volume from the `DeploymentConfig` object pod. [id="deployments-setting-lifecycle-hooks_{context}"] == Setting lifecycle hooks -You can set lifecycle hooks, or deployment hooks, for a DeploymentConfig using -the CLI. +You can set lifecycle hooks, or deployment hooks, for a deployment using the CLI. .Procedure -. Use the `oc set deployment-hook` command to set the type of hook you want: -`--pre`, `--mid`, or `--post`. For example, to set a pre-deployment hook: +. Use the `oc set deployment-hook` command to set the type of hook you want: `--pre`, `--mid`, or `--post`. For example, to set a pre-deployment hook: + [source,terminal] ---- diff --git a/modules/deployments-recreate-strategy.adoc b/modules/deployments-recreate-strategy.adoc index 84d90de044..dca8222765 100644 --- a/modules/deployments-recreate-strategy.adoc +++ b/modules/deployments-recreate-strategy.adoc @@ -5,10 +5,9 @@ [id="deployments-recreate-strategy_{context}"] = Recreate strategy -The Recreate strategy has basic rollout behavior and supports lifecycle hooks -for injecting code into the deployment process. +The recreate strategy has basic rollout behavior and supports lifecycle hooks for injecting code into the deployment process. -.Example Recreate strategy definition +.Example recreate strategy definition [source,yaml] ---- strategy: @@ -22,7 +21,7 @@ strategy: <1> `recreateParams` are optional. <2> `pre`, `mid`, and `post` are lifecycle hooks. -The Recreate strategy: +The recreate strategy: . Executes any `pre` lifecycle hook. . Scales down the previous deployment to zero. @@ -32,21 +31,13 @@ The Recreate strategy: [IMPORTANT] ==== -During scale up, if the replica count of the deployment is greater than one, the -first replica of the deployment will be validated for readiness before fully -scaling up the deployment. If the validation of the first replica fails, the -deployment will be considered a failure. +During scale up, if the replica count of the deployment is greater than one, the first replica of the deployment will be validated for readiness before fully scaling up the deployment. If the validation of the first replica fails, the deployment will be considered a failure. ==== -*When to use a Recreate deployment:* +*When to use a recreate deployment:* -- When you must run migrations or other data transformations before your new code -starts. -- When you do not support having new and old versions of your application code -running at the same time. -- When you want to use a RWO volume, which is not supported being shared between -multiple replicas. +- When you must run migrations or other data transformations before your new code starts. +- When you do not support having new and old versions of your application code running at the same time. +- When you want to use a RWO volume, which is not supported being shared between multiple replicas. -A Recreate deployment incurs downtime because, for a brief period, no instances -of your application are running. However, your old code and new code do not run -at the same time. +A recreate deployment incurs downtime because, for a brief period, no instances of your application are running. However, your old code and new code do not run at the same time. diff --git a/modules/deployments-replicasets.adoc b/modules/deployments-replicasets.adoc index 4a2acccdc5..fe2bbb4820 100644 --- a/modules/deployments-replicasets.adoc +++ b/modules/deployments-replicasets.adoc @@ -3,22 +3,13 @@ // * applications/deployments/what-deployments-are.adoc [id="deployments-repliasets_{context}"] -= ReplicaSets += Replica sets -Similar to a ReplicationController, a ReplicaSet is a native Kubernetes API -object that ensures a specified number of pod replicas are running at any given -time. The difference between a ReplicaSet and a ReplicationController is that -a ReplicaSet supports set-based selector requirements whereas a replication -controller only supports equality-based selector requirements. +Similar to a replication controller, a `ReplicaSet` is a native Kubernetes API object that ensures a specified number of pod replicas are running at any given time. The difference between a replica set and a replication controller is that a replica set supports set-based selector requirements whereas a replication controller only supports equality-based selector requirements. [NOTE] ==== -Only use ReplicaSets if you require custom update orchestration or do not -require updates at all. Otherwise, use Deployments. ReplicaSets can be used -independently, but are used by deployments to orchestrate pod creation, -deletion, and updates. Deployments manage their ReplicaSets automatically, -provide declarative updates to pods, and do not have to manually manage the -ReplicaSets that they create. +Only use replica sets if you require custom update orchestration or do not require updates at all. Otherwise, use deployments. Replica sets can be used independently, but are used by deployments to orchestrate pod creation, deletion, and updates. Deployments manage their replica sets automatically, provide declarative updates to pods, and do not have to manually manage the replica sets that they create. ==== The following is an example `ReplicaSet` definition: @@ -51,9 +42,6 @@ spec: protocol: TCP restartPolicy: Always ---- -<1> A label query over a set of resources. The result of `matchLabels` and -`matchExpressions` are logically conjoined. -<2> Equality-based selector to specify resources with labels that match the -selector. -<3> Set-based selector to filter keys. This selects all resources with key equal -to `tier` and value equal to `frontend`. +<1> A label query over a set of resources. The result of `matchLabels` and `matchExpressions` are logically conjoined. +<2> Equality-based selector to specify resources with labels that match the selector. +<3> Set-based selector to filter keys. This selects all resources with key equal to `tier` and value equal to `frontend`. diff --git a/modules/deployments-replicationcontrollers.adoc b/modules/deployments-replicationcontrollers.adoc index 9434c4a165..eef1154585 100644 --- a/modules/deployments-replicationcontrollers.adoc +++ b/modules/deployments-replicationcontrollers.adoc @@ -3,30 +3,22 @@ // * applications/deployments/what-deployments-are.adoc [id="deployments-replicationcontrollers_{context}"] -= ReplicationControllers += Replication controllers -A ReplicationController ensures that a specified number of replicas of a pod are running at -all times. If pods exit or are deleted, the ReplicationController acts to -instantiate more up to the defined number. Likewise, if there are more running -than desired, it deletes as many as necessary to match the defined amount. +A replication controller ensures that a specified number of replicas of a pod are running at all times. If pods exit or are deleted, the replication controller acts to instantiate more up to the defined number. Likewise, if there are more running than desired, it deletes as many as necessary to match the defined amount. -A ReplicationController configuration consists of: +A replication controller configuration consists of: -* The number of replicas desired (which can be adjusted at runtime). +* The number of replicas desired, which can be adjusted at run time. * A `Pod` definition to use when creating a replicated pod. * A selector for identifying managed pods. -A selector is a set of labels assigned to -the pods that are managed by the ReplicationController. These labels are -included in the `Pod` definition that the ReplicationController instantiates. -The ReplicationController uses the selector to determine how many -instances of the pod are already running in order to adjust as needed. +A selector is a set of labels assigned to the pods that are managed by the replication controller. These labels are included in the `Pod` definition that the replication controller instantiates. The replication controller uses the selector to determine how many instances of the pod are already running in order to adjust as needed. -The ReplicationController does not perform auto-scaling based on load or -traffic, as it does not track either. Rather, this requires its replica +The replication controller does not perform auto-scaling based on load or traffic, as it does not track either. Rather, this requires its replica count to be adjusted by an external auto-scaler. -The following is an example definition of a ReplicationController: +The following is an example definition of a replication controller: [source,yaml] ---- diff --git a/modules/deployments-retrying-deployment.adoc b/modules/deployments-retrying-deployment.adoc index a3c74693be..acbff09d25 100644 --- a/modules/deployments-retrying-deployment.adoc +++ b/modules/deployments-retrying-deployment.adoc @@ -5,8 +5,7 @@ [id="deployments-retrying-deployment_{context}"] = Retrying a deployment -If the current revision of your DeploymentConfig failed to deploy, you can -restart the deployment process. +If the current revision of your `DeploymentConfig` object failed to deploy, you can restart the deployment process. .Procedure @@ -17,12 +16,9 @@ restart the deployment process. $ oc rollout retry dc/ ---- + -If the latest revision of it was deployed successfully, the command displays a -message and the deployment process is not be retried. +If the latest revision of it was deployed successfully, the command displays a message and the deployment process is not retried. + [NOTE] ==== -Retrying a deployment restarts the deployment process and does not create a new -deployment revision. The restarted ReplicationController has the same -configuration it had when it failed. +Retrying a deployment restarts the deployment process and does not create a new deployment revision. The restarted replication controller has the same configuration it had when it failed. ==== diff --git a/modules/deployments-rolling-back.adoc b/modules/deployments-rolling-back.adoc index 91408da870..f6f722c2ca 100644 --- a/modules/deployments-rolling-back.adoc +++ b/modules/deployments-rolling-back.adoc @@ -5,8 +5,7 @@ [id="deployments-rolling-back_{context}"] = Rolling back a deployment -Rollbacks revert an application back to a previous revision and can be -performed using the REST API, the CLI, or the web console. +Rollbacks revert an application back to a previous revision and can be performed using the REST API, the CLI, or the web console. .Procedure @@ -17,14 +16,9 @@ performed using the REST API, the CLI, or the web console. $ oc rollout undo dc/ ---- + -The DeploymentConfig's template is reverted to match the deployment -revision specified in the undo command, and a new ReplicationController is -started. If no revision is specified with `--to-revision`, then the last -successfully deployed revision is used. +The `DeploymentConfig` object's template is reverted to match the deployment revision specified in the undo command, and a new replication controller is started. If no revision is specified with `--to-revision`, then the last successfully deployed revision is used. -. Image change triggers on the DeploymentConfig are disabled as part of -the rollback to prevent accidentally starting a new deployment process soon after -the rollback is complete. +. Image change triggers on the `DeploymentConfig` object are disabled as part of the rollback to prevent accidentally starting a new deployment process soon after the rollback is complete. + To re-enable the image change triggers: + @@ -35,8 +29,5 @@ $ oc set triggers dc/ --auto [NOTE] ==== -DeploymentConfigs also support automatically rolling back to the last successful -revision of the configuration in case the latest deployment process fails. In -that case, the latest template that failed to deploy stays intact by the system -and it is up to users to fix their configurations. +Deployment configs also support automatically rolling back to the last successful revision of the configuration in case the latest deployment process fails. In that case, the latest template that failed to deploy stays intact by the system and it is up to users to fix their configurations. ==== diff --git a/modules/deployments-rolling-strategy.adoc b/modules/deployments-rolling-strategy.adoc index 5fcacbdb92..fcb6e019ad 100644 --- a/modules/deployments-rolling-strategy.adoc +++ b/modules/deployments-rolling-strategy.adoc @@ -5,25 +5,18 @@ [id="deployments-rolling-strategy_{context}"] = Rolling strategy -A rolling deployment slowly replaces instances of the previous version of an -application with instances of the new version of the application. The Rolling -strategy is the default deployment strategy used if no strategy is specified on -a DeploymentConfig. +A rolling deployment slowly replaces instances of the previous version of an application with instances of the new version of the application. The rolling strategy is the default deployment strategy used if no strategy is specified on a `DeploymentConfig` object. -A rolling deployment typically waits for new pods to become `ready` via a -`readiness check` before scaling down the old components. If a significant issue -occurs, the rolling deployment can be aborted. +A rolling deployment typically waits for new pods to become `ready` via a readiness check before scaling down the old components. If a significant issue occurs, the rolling deployment can be aborted. -*When to use a Rolling deployment:* +*When to use a rolling deployment:* - When you want to take no downtime during an application update. - When your application supports having old code and new code running at the same time. -A Rolling deployment means you to have both old and new versions of your code -running at the same time. This typically requires that your application handle -N-1 compatibility. +A rolling deployment means you to have both old and new versions of your code running at the same time. This typically requires that your application handle N-1 compatibility. -.Example Rolling strategy definition +.Example rolling strategy definition [source,yaml] ---- strategy: @@ -37,47 +30,33 @@ strategy: pre: {} <6> post: {} ---- -<1> The time to wait between individual Pod updates. If unspecified, this value defaults to `1`. +<1> The time to wait between individual pod updates. If unspecified, this value defaults to `1`. <2> The time to wait between polling the deployment status after update. If unspecified, this value defaults to `1`. -<3> The time to wait for a scaling event before giving up. Optional; the default is `600`. Here, _giving up_ means -automatically rolling back to the previous complete deployment. +<3> The time to wait for a scaling event before giving up. Optional; the default is `600`. Here, _giving up_ means automatically rolling back to the previous complete deployment. <4> `maxSurge` is optional and defaults to `25%` if not specified. See the information below the following procedure. <5> `maxUnavailable` is optional and defaults to `25%` if not specified. See the information below the following procedure. <6> `pre` and `post` are both lifecycle hooks. -The Rolling strategy: +The rolling strategy: . Executes any `pre` lifecycle hook. -. Scales up the new ReplicationController based on the surge count. -. Scales down the old ReplicationController based on the max unavailable count. -. Repeats this scaling until the new ReplicationController has reached the desired -replica count and the old ReplicationController has been scaled to zero. +. Scales up the new replication controller based on the surge count. +. Scales down the old replication controller based on the max unavailable count. +. Repeats this scaling until the new replication controller has reached the desired replica count and the old replication controller has been scaled to zero. . Executes any `post` lifecycle hook. [IMPORTANT] ==== -When scaling down, the Rolling strategy waits for pods to become ready so it can -decide whether further scaling would affect availability. If scaled up pods -never become ready, the deployment process will eventually time out and result in a -deployment failure. +When scaling down, the rolling strategy waits for pods to become ready so it can decide whether further scaling would affect availability. If scaled up pods never become ready, the deployment process will eventually time out and result in a deployment failure. ==== -The `maxUnavailable` parameter is the maximum number of pods that can be -unavailable during the update. The `maxSurge` parameter is the maximum number -of pods that can be scheduled above the original number of pods. Both parameters -can be set to either a percentage (e.g., `10%`) or an absolute value (e.g., -`2`). The default value for both is `25%`. +The `maxUnavailable` parameter is the maximum number of pods that can be unavailable during the update. The `maxSurge` parameter is the maximum number of pods that can be scheduled above the original number of pods. Both parameters can be set to either a percentage (e.g., `10%`) or an absolute value (e.g., `2`). The default value for both is `25%`. -These parameters allow the deployment to be tuned for availability and speed. For -example: +These parameters allow the deployment to be tuned for availability and speed. For example: -- `maxUnavailable*=0` and `maxSurge*=20%` ensures full capacity is maintained -during the update and rapid scale up. -- `maxUnavailable*=10%` and `maxSurge*=0` performs an update using no extra -capacity (an in-place update). -- `maxUnavailable*=10%` and `maxSurge*=10%` scales up and down quickly with -some potential for capacity loss. +- `maxUnavailable*=0` and `maxSurge*=20%` ensures full capacity is maintained during the update and rapid scale up. +- `maxUnavailable*=10%` and `maxSurge*=0` performs an update using no extra capacity (an in-place update). +- `maxUnavailable*=10%` and `maxSurge*=10%` scales up and down quickly with some potential for capacity loss. -Generally, if you want fast rollouts, use `maxSurge`. If you have to take into -account resource quota and can accept partial unavailability, use +Generally, if you want fast rollouts, use `maxSurge`. If you have to take into account resource quota and can accept partial unavailability, use `maxUnavailable`. diff --git a/modules/deployments-running-pod-svc-acct.adoc b/modules/deployments-running-pod-svc-acct.adoc index 5dc776d506..f456bded63 100644 --- a/modules/deployments-running-pod-svc-acct.adoc +++ b/modules/deployments-running-pod-svc-acct.adoc @@ -3,21 +3,20 @@ // * applications/deployments/managing-deployment-processes.adoc [id="deployments-running-pod-svc-acct_{context}"] -= Running a Pod with a different service account += Running a pod with a different service account -You can run a Pod with a service account other than the default. +You can run a pod with a service account other than the default. .Procedure -. Edit the DeploymentConfig: +. Edit the `DeploymentConfig` object: + [source,terminal] ---- $ oc edit dc/ ---- -. Add the `serviceAccount` and `serviceAccountName` parameters to the `spec` -field, and specify the service account you want to use: +. Add the `serviceAccount` and `serviceAccountName` parameters to the `spec` field, and specify the service account you want to use: + [source,yaml] ---- diff --git a/modules/deployments-scaling-manually.adoc b/modules/deployments-scaling-manually.adoc index 33a2cc950e..5d86ba4a47 100644 --- a/modules/deployments-scaling-manually.adoc +++ b/modules/deployments-scaling-manually.adoc @@ -5,24 +5,20 @@ [id="deployments-scaling-manually_{context}"] = Scaling manually -In addition to rollbacks, you can exercise fine-grained control over the number -of replicas by manually scaling them. +In addition to rollbacks, you can exercise fine-grained control over the number of replicas by manually scaling them. [NOTE] ==== -Pods can also be autoscaled using the `oc autoscale` command. +Pods can also be auto-scaled using the `oc autoscale` command. ==== .Procedure -. To manually scale a DeploymentConfig, use the `oc scale` command. For example, -the following command sets the replicas in the `frontend` DeploymentConfig to -`3`. +. To manually scale a `DeploymentConfig` object, use the `oc scale` command. For example, the following command sets the replicas in the `frontend` `DeploymentConfig` object to `3`. + [source,terminal] ---- $ oc scale dc frontend --replicas=3 ---- + -The number of replicas eventually propagates to the desired and current -state of the deployment configured by the DeploymentConfig `frontend`. +The number of replicas eventually propagates to the desired and current state of the deployment configured by the `DeploymentConfig` object `frontend`. diff --git a/modules/deployments-setting-resources.adoc b/modules/deployments-setting-resources.adoc index fc5008d009..58c0013116 100644 --- a/modules/deployments-setting-resources.adoc +++ b/modules/deployments-setting-resources.adoc @@ -7,23 +7,16 @@ [NOTE] ==== -This resource is available only if a cluster administrator has enabled the -ephemeral storage technology preview. This feature is disabled by default. +This resource is available only if a cluster administrator has enabled the ephemeral storage technology preview. This feature is disabled by default. ==== -A deployment is completed by a Pod that consumes resources (memory, CPU, and -ephemeral storage) on a node. By default, pods consume unbounded node resources. -However, if a project specifies default container limits, then pods consume -resources up to those limits. +A deployment is completed by a pod that consumes resources (memory, CPU, and ephemeral storage) on a node. By default, pods consume unbounded node resources. However, if a project specifies default container limits, then pods consume resources up to those limits. -You can also limit resource use by specifying resource limits as part of the -deployment strategy. Deployment resources can be used with the Recreate, -Rolling, or Custom deployment strategies. +You can also limit resource use by specifying resource limits as part of the deployment strategy. Deployment resources can be used with the recreate, rolling, or custom deployment strategies. .Procedure -. In the following example, each of `resources`, `cpu`, `memory`, and -`ephemeral-storage` is optional: +. In the following example, each of `resources`, `cpu`, `memory`, and `ephemeral-storage` is optional: + [source,yaml] ---- @@ -36,12 +29,9 @@ resources: ---- <1> `cpu` is in CPU units: `100m` represents 0.1 CPU units (100 * 1e-3). <2> `memory` is in bytes: `256Mi` represents 268435456 bytes (256 * 2 ^ 20). -<3> `ephemeral-storage` is in bytes: `1Gi` represents 1073741824 bytes (2 ^ 30). -This applies only if your cluster administrator enabled the ephemeral storage -technology preview. +<3> `ephemeral-storage` is in bytes: `1Gi` represents 1073741824 bytes (2 ^ 30). This applies only if your cluster administrator enabled the ephemeral storage technology preview. + -However, if a quota has been defined for your project, one of the following two -items is required: +However, if a quota has been defined for your project, one of the following two items is required: + -- - A `resources` section set with an explicit `requests`: @@ -55,12 +45,9 @@ items is required: memory: "256Mi" ephemeral-storage: "1Gi" ---- -<1> The `requests` object contains the list of resources that correspond to -the list of resources in the quota. +<1> The `requests` object contains the list of resources that correspond to the list of resources in the quota. -- A limit range defined in your project, where the defaults from the `LimitRange` -object apply to pods created during the deployment process. +- A limit range defined in your project, where the defaults from the `LimitRange` object apply to pods created during the deployment process. -- + -To set deployment resources, choose one of the above options. Otherwise, deploy -Pod creation fails, citing a failure to satisfy quota. +To set deployment resources, choose one of the above options. Otherwise, deploy pod creation fails, citing a failure to satisfy quota. diff --git a/modules/deployments-setting-triggers.adoc b/modules/deployments-setting-triggers.adoc index fe5ea9f8eb..b7ab501bd9 100644 --- a/modules/deployments-setting-triggers.adoc +++ b/modules/deployments-setting-triggers.adoc @@ -7,8 +7,7 @@ .Procedure -. You can set deployment triggers for a DeploymentConfig using the `oc set triggers` -command. For example, to set a `ImageChangeTrigger`, use the following command: +. You can set deployment triggers for a `DeploymentConfig` object using the `oc set triggers` command. For example, to set a image change trigger, use the following command: + [source,terminal] ---- diff --git a/modules/deployments-starting-deployment.adoc b/modules/deployments-starting-deployment.adoc index 4051f38992..2bce3ca3df 100644 --- a/modules/deployments-starting-deployment.adoc +++ b/modules/deployments-starting-deployment.adoc @@ -5,12 +5,11 @@ [id="deployments-starting-a-deployment_{context}"] = Starting a deployment -You can start a _rollout_ to begin the deployment process of your application. +You can start a rollout to begin the deployment process of your application. .Procedure -. To start a new deployment process from an existing DeploymentConfig, run the -following command: +. To start a new deployment process from an existing `DeploymentConfig` object, run the following command: + [source,terminal] ---- @@ -19,6 +18,5 @@ $ oc rollout latest dc/ + [NOTE] ==== -If a deployment process is already in progress, the command displays a -message and a new ReplicationController will not be deployed. +If a deployment process is already in progress, the command displays a message and a new replication controller will not be deployed. ==== diff --git a/modules/deployments-triggers.adoc b/modules/deployments-triggers.adoc index 344aecc46b..5140677f95 100644 --- a/modules/deployments-triggers.adoc +++ b/modules/deployments-triggers.adoc @@ -5,31 +5,25 @@ [id="deployments-triggers_{context}"] = Deployment triggers -A DeploymentConfig can contain triggers, which drive the creation of new -deployment processes in response to events inside the cluster. +A `DeploymentConfig` object can contain triggers, which drive the creation of new deployment processes in response to events inside the cluster. [WARNING] ==== -If no triggers are defined on a DeploymentConfig, a `ConfigChange` -trigger is added by default. If triggers are defined as an empty field, deployments -must be started manually. +If no triggers are defined on a `DeploymentConfig` object, a config change trigger is added by default. If triggers are defined as an empty field, deployments must be started manually. ==== [discrete] [id="deployments-configchange-trigger_{context}"] -=== ConfigChange deployment triggers +=== Config change deployment triggers -The `ConfigChange` trigger results in a new ReplicationController whenever -configuration changes are detected in the Pod template of the DeploymentConfig. +The config change trigger results in a new replication controller whenever configuration changes are detected in the pod template of the `DeploymentConfig` object. [NOTE] ==== -If a `ConfigChange` trigger is defined on a DeploymentConfig, the first -ReplicationController is automatically created soon after the DeploymentConfig -itself is created and it is not paused. +If a config change trigger is defined on a `DeploymentConfig` object, the first replication controller is automatically created soon after the `DeploymentConfig` object itself is created and it is not paused. ==== -.ConfigChange deployment trigger +.Config change deployment trigger [source,yaml] ---- triggers: @@ -38,13 +32,11 @@ triggers: [discrete] [id="deployments-imagechange-trigger_{context}"] -=== ImageChange deployment triggers +=== Image change deployment triggers -The `ImageChange` trigger results in a new ReplicationController whenever the -content of an imagestreamtag changes (when a new version of the image is -pushed). +The image change trigger results in a new replication controller whenever the content of an image stream tag changes (when a new version of the image is pushed). -.ImageChange deployment trigger +.Image change deployment trigger [source,yaml] ---- triggers: @@ -58,21 +50,11 @@ triggers: containerNames: - "helloworld" ---- -<1> If the `imageChangeParams.automatic` field is set to `false`, the trigger is -disabled. +<1> If the `imageChangeParams.automatic` field is set to `false`, the trigger is disabled. - -With the above example, when the `latest` tag value of the `origin-ruby-sample` -imagestream changes and the new image value differs from the current image -specified in the DeploymentConfig's `helloworld` container, a new -ReplicationController is created using the new image for the `helloworld` -container. +With the above example, when the `latest` tag value of the `origin-ruby-sample` image stream changes and the new image value differs from the current image specified in the `DeploymentConfig` object's `helloworld` container, a new replication controller is created using the new image for the `helloworld` container. [NOTE] ==== -If an `ImageChange` trigger is defined on a DeploymentConfig (with a -`ConfigChange` trigger and `automatic=false`, or with `automatic=true`) and the -`ImageStreamTag` pointed by the `ImageChange` trigger does not exist yet, then -the initial deployment process will automatically start as soon as an image is -imported or pushed by a build to the `ImageStreamTag`. +If an image change trigger is defined on a `DeploymentConfig` object (with a config change trigger and `automatic=false`, or with `automatic=true`) and the image stream tag pointed by the image change trigger does not exist yet, the initial deployment process will automatically start as soon as an image is imported or pushed by a build to the image stream tag. ==== diff --git a/modules/deployments-viewing-deployment.adoc b/modules/deployments-viewing-deployment.adoc index 940ac23aa8..897f144050 100644 --- a/modules/deployments-viewing-deployment.adoc +++ b/modules/deployments-viewing-deployment.adoc @@ -5,14 +5,11 @@ [id="deployments-viewing-a-deployment_{context}"] = Viewing a deployment -You can view a deployment to get basic information about all the available -revisions of your application. +You can view a deployment to get basic information about all the available revisions of your application. .Procedure -. To show details about all recently created ReplicationControllers for the -provided DeploymentConfig, including any currently running deployment process, -run the following command: +. To show details about all recently created replication controllers for the provided `DeploymentConfig` object, including any currently running deployment process, run the following command: + [source,terminal] ---- @@ -26,8 +23,7 @@ $ oc rollout history dc/ $ oc rollout history dc/ --revision=1 ---- -. For more detailed information about a deployment configuration and its latest -revision, use the `oc describe` command: +. For more detailed information about a `DeploymentConfig` object and its latest revision, use the `oc describe` command: + [source,terminal] ---- diff --git a/modules/deployments-viewing-logs.adoc b/modules/deployments-viewing-logs.adoc index d3d5065ae6..7a0393b9d9 100644 --- a/modules/deployments-viewing-logs.adoc +++ b/modules/deployments-viewing-logs.adoc @@ -7,20 +7,16 @@ .Procedure -. To stream the logs of the latest revision for a given DeploymentConfig: +. To stream the logs of the latest revision for a given `DeploymentConfig` object: + [source,terminal] ---- $ oc logs -f dc/ ---- + -If the latest revision is running or failed, the command returns the logs of the -process that is responsible for deploying your pods. If it is successful, it -returns the logs from a Pod of your application. +If the latest revision is running or failed, the command returns the logs of the process that is responsible for deploying your pods. If it is successful, it returns the logs from a pod of your application. -. You can also view logs from older failed deployment processes, if and only if -these processes (old ReplicationControllers and their deployer pods) exist and -have not been pruned or deleted manually: +. You can also view logs from older failed deployment processes, if and only if these processes (old replication controllers and their deployer pods) exist and have not been pruned or deleted manually: + [source,terminal] ---- diff --git a/modules/disabling-project-self-provisioning.adoc b/modules/disabling-project-self-provisioning.adoc index 936dc8dfaf..c17965b91d 100644 --- a/modules/disabling-project-self-provisioning.adoc +++ b/modules/disabling-project-self-provisioning.adoc @@ -11,8 +11,7 @@ You can prevent an authenticated user group from self-provisioning new projects. . Log in as a user with `cluster-admin` privileges. -. View the `self-provisioners` cluster role binding usage by running the -following command: +. View the `self-provisioners` cluster role binding usage by running the following command: + [source,terminal] ---- @@ -36,21 +35,16 @@ Subjects: + Review the subjects in the `self-provisioners` section. -. Remove the `self-provisioner` cluster role from the group -`system:authenticated:oauth`. +. Remove the `self-provisioner` cluster role from the group `system:authenticated:oauth`. -** If the `self-provisioners` cluster role binding binds only the -`self-provisioner` role to the `system:authenticated:oauth` group, run the -following command: +** If the `self-provisioners` cluster role binding binds only the `self-provisioner` role to the `system:authenticated:oauth` group, run the following command: + [source,terminal] ---- $ oc patch clusterrolebinding.rbac self-provisioners -p '{"subjects": null}' ---- -** If the `self-provisioners` cluster role binding binds the `self-provisioner` -role to more users, groups, or service accounts than the -`system:authenticated:oauth` group, run the following command: +** If the `self-provisioners` cluster role binding binds the `self-provisioner` role to more users, groups, or service accounts than the `system:authenticated:oauth` group, run the following command: + [source,terminal] ---- @@ -59,8 +53,7 @@ $ oc adm policy \ system:authenticated:oauth ---- -. Edit the `self-provisioners` cluster role binding to prevent automatic updates -to the role. Automatic updates reset the cluster roles to the default state. +. Edit the `self-provisioners` cluster role binding to prevent automatic updates to the role. Automatic updates reset the cluster roles to the default state. ** To update the role binding using the CLI: @@ -71,9 +64,7 @@ to the role. Automatic updates reset the cluster roles to the default state. $ oc edit clusterrolebinding.rbac self-provisioners ---- -... In the displayed role binding, set the -`rbac.authorization.kubernetes.io/autoupdate` parameter value to `false`, as -shown in the following example: +... In the displayed role binding, set the `rbac.authorization.kubernetes.io/autoupdate` parameter value to `false`, as shown in the following example: + [source,yaml] ---- @@ -92,7 +83,7 @@ metadata: $ oc patch clusterrolebinding.rbac self-provisioners -p '{ "metadata": { "annotations": { "rbac.authorization.kubernetes.io/autoupdate": "false" } } }' ---- -. Login as an authenticated user and verify that it can no longer self-provision a project: +. Log in as an authenticated user and verify that it can no longer self-provision a project: + [source,terminal] ---- @@ -105,5 +96,4 @@ $ oc new-project test Error from server (Forbidden): You may not request a new project via this API. ---- + -Consider customizing this project request message to provide more helpful -instructions specific to your organization. +Consider customizing this project request message to provide more helpful instructions specific to your organization. diff --git a/modules/gathering-application-diagnostic-data.adoc b/modules/gathering-application-diagnostic-data.adoc index fbb8c05e1e..a40cb4377f 100644 --- a/modules/gathering-application-diagnostic-data.adoc +++ b/modules/gathering-application-diagnostic-data.adoc @@ -41,7 +41,7 @@ $ oc logs -f pod/my-app-1-akdlg $ oc exec my-app-1-akdlg -- cat /var/log/my-application.log ---- + -.. If root access is required to view an application log, you can start a debug container with root privileges and then view the log file from within the container. Start the debug container from the project's deployment configuration. Pod users typically run with non-root privileges, but running troubleshooting pods with temporary root privileges can be useful during issue investigation: +.. If root access is required to view an application log, you can start a debug container with root privileges and then view the log file from within the container. Start the debug container from the project's `DeploymentConfig` object. Pod users typically run with non-root privileges, but running troubleshooting pods with temporary root privileges can be useful during issue investigation: + [source,terminal] ---- @@ -67,7 +67,7 @@ $ oc exec -it my-app-1-akdlg /bin/bash + [NOTE] ==== -Root privileges are required to run some diagnostic binaries. In these situations you can start a debug pod with root access, based on a problematic pod's deployment configuration, by running `oc debug dc/ --as-root`. Then, you can run diagnostic binaries as root from within the debug pod. +Root privileges are required to run some diagnostic binaries. In these situations you can start a debug pod with root access, based on a problematic pod's `DeploymentConfig` object, by running `oc debug dc/ --as-root`. Then, you can run diagnostic binaries as root from within the debug pod. ==== . If diagnostic binaries are not available within a container, you can run a host's diagnostic binaries within a container's namespace by using `nsenter`. The following example runs `ip ad` within a container's namespace, using the host`s `ip` binary. diff --git a/modules/nodes-containers-volumes-adding.adoc b/modules/nodes-containers-volumes-adding.adoc index d3d09963ec..ce362312e0 100644 --- a/modules/nodes-containers-volumes-adding.adoc +++ b/modules/nodes-containers-volumes-adding.adoc @@ -75,7 +75,7 @@ values: `json`, `yaml`. For example: -* To add a new volume source *emptyDir* to the *registry* deployment config: +* To add a new volume source *emptyDir* to the *registry* `DeploymentConfig` object: + [source,terminal] ---- @@ -92,7 +92,7 @@ $ oc set volume rc/r1 --add --name=v1 --type=secret --secret-name='secret1' --mo * To add existing persistent volume *v1* with claim name *pvc1* to deployment configuration *_dc.json_* on disk, mount the volume on container *c1* at -*_/data_*, and update the deployment config on the server: +*_/data_*, and update the `DeploymentConfig` object on the server: + [source,terminal] ---- diff --git a/modules/nodes-containers-volumes-removing.adoc b/modules/nodes-containers-volumes-removing.adoc index b0dff53f43..c5313de1a1 100644 --- a/modules/nodes-containers-volumes-removing.adoc +++ b/modules/nodes-containers-volumes-removing.adoc @@ -35,8 +35,7 @@ $ oc set volume / --remove [options] | |`-o, --output` -|Display the modified objects instead of updating them on the server. Supported -values: `json`, `yaml`. +|Display the modified objects instead of updating them on the server. Supported values: `json`, `yaml`. | |`--output-version` @@ -46,15 +45,14 @@ values: `json`, `yaml`. For example: -* To remove a volume *v1* from the deployment config *d1*: +* To remove a volume *v1* from the `DeploymentConfig` object *d1*: + [source,terminal] ---- $ oc set volume dc/d1 --remove --name=v1 ---- -* To unmount volume *v1* from container *c1* for the deployment config *d1* and -remove the volume *v1* if it is not referenced by any containers on *d1*: +* To unmount volume *v1* from container *c1* for the `DeploymentConfig` object *d1* and remove the volume *v1* if it is not referenced by any containers on *d1*: + [source,terminal] ---- diff --git a/modules/nodes-containers-volumes-updating.adoc b/modules/nodes-containers-volumes-updating.adoc index 42a72a353a..6a8dae50f5 100644 --- a/modules/nodes-containers-volumes-updating.adoc +++ b/modules/nodes-containers-volumes-updating.adoc @@ -26,7 +26,7 @@ persistent volume claim *pvc1*: $ oc set volume rc/r1 --add --overwrite --name=v1 --type=persistentVolumeClaim --claim-name=pvc1 ---- -* To change deployment config *d1* mount point to *_/opt_* for volume *v1*: +* To change the `DeploymentConfig` object *d1* mount point to *_/opt_* for volume *v1*: + [source,terminal] ---- diff --git a/modules/nodes-pods-autoscaling-creating-cpu.adoc b/modules/nodes-pods-autoscaling-creating-cpu.adoc index c5fae5056e..e1ff2c9dcd 100644 --- a/modules/nodes-pods-autoscaling-creating-cpu.adoc +++ b/modules/nodes-pods-autoscaling-creating-cpu.adoc @@ -56,7 +56,7 @@ To create a horizontal pod autoscaler for CPU utilization: . Perform one of the following one of the following: -** To scale based on the percent of CPU utilization, create a `HorizontalPodAutoscaler` object for an existing deployment config: +** To scale based on the percent of CPU utilization, create a `HorizontalPodAutoscaler` object for an existing `DeploymentConfig` object: + [source,terminal] ---- @@ -66,7 +66,7 @@ $ oc autoscale dc/ \// <1> --cpu-percent= <4> ---- + -<1> Specify the name of the deployment config. The object must exist. +<1> Specify the name of the `DeploymentConfig` object. The object must exist. <2> Optionally, specify the minimum number of replicas when scaling down. <3> Specify the maximum number of replicas when scaling up. <4> Specify the target average CPU utilization over all the pods, represented as a percent of requested CPU. If not specified or negative, a default autoscaling policy is used. @@ -86,7 +86,7 @@ $ oc autoscale rc/ <1> <3> Specify the maximum number of replicas when scaling up. <4> Specify the target average CPU utilization over all the pods, represented as a percent of requested CPU. If not specified or negative, a default autoscaling policy is used. -** To scale for a specific CPU value, create a YAML file similar to the following for an existing deployment config or replication controller: +** To scale for a specific CPU value, create a YAML file similar to the following for an existing `DeploymentConfig` object or replication controller: + .. Create a YAML file similar to the following: + @@ -116,7 +116,7 @@ spec: <2> Specify a name for this horizontal pod autoscaler object. <3> Specify the API version of the object to scale: * For a replication controller, use `v1`, -* For a deployment config, use `apps.openshift.io/v1`. +* For a `DeploymentConfig` object, use `apps.openshift.io/v1`. <4> Specify the kind of object to scale, either `ReplicationController` or `DeploymentConfig`. <5> Specify the name of the object to scale. The object must exist. <6> Specify the minimum number of replicas when scaling down. @@ -147,7 +147,7 @@ NAME REFERENCE TARGETS MINPODS MAXPOD cpu-autoscale ReplicationController/example 173m/500m 1 10 1 20m ---- -For example, the following command creates a horizontal pod autoscaler that maintains between 3 and 7 replicas of the pods that are controlled by the `image-registry` deployment config in order to maintain an average CPU utilization of 75% across all pods. +For example, the following command creates a horizontal pod autoscaler that maintains between 3 and 7 replicas of the pods that are controlled by the `image-registry` `DeploymentConfig` object in order to maintain an average CPU utilization of 75% across all pods. [source,terminal] ---- @@ -192,7 +192,7 @@ status: desiredReplicas: 0 ---- -The following example shows autoscaling for the `image-registry` deployment config. The initial deployment requires 3 pods. The HPA object increased that minimum to 5 and will increase the pods up to 7 if CPU usage on the pods reaches 75%: +The following example shows autoscaling for the `image-registry` `DeploymentConfig` object. The initial deployment requires 3 pods. The HPA object increased that minimum to 5 and will increase the pods up to 7 if CPU usage on the pods reaches 75%: . View the current state of the `image-registry` deployment: + @@ -208,7 +208,7 @@ NAME REVISION DESIRED CURRENT TRIGGERED BY image-registry 1 3 3 config ---- -. Autoscale the `image-registry` deployment config: +. Autoscale the `image-registry` `DeploymentConfig` object: + [source,terminal] ---- diff --git a/modules/nodes-pods-autoscaling-creating-memory.adoc b/modules/nodes-pods-autoscaling-creating-memory.adoc index ec34205b7c..643a0b7dbb 100644 --- a/modules/nodes-pods-autoscaling-creating-memory.adoc +++ b/modules/nodes-pods-autoscaling-creating-memory.adoc @@ -7,10 +7,10 @@ = Creating a horizontal pod autoscaler object for memory utilization You can create a horizontal pod autoscaler (HPA) for an existing `DeploymentConfig` object or `ReplicationController` object -that automatically scales the pods associated with that object in order to maintain the average memory utilization you specify, +that automatically scales the pods associated with that object in order to maintain the average memory utilization you specify, either a direct value or a percentage of requested memory. -The HPA increases and decreases the number of replicas between the minimum and maximum numbers to maintain +The HPA increases and decreases the number of replicas between the minimum and maximum numbers to maintain the specified memory utilization across all pods. For memory utilization, you can specify the minimum and maximum number of pods and the average memory utilization @@ -74,7 +74,7 @@ To create a horizontal pod autoscaler for memory utilization: . Create a YAML file for one of the following: -** To scale for a specific memory value, create a `HorizontalPodAutoscaler` object similar to the following for an existing deployment config or replication controller: +** To scale for a specific memory value, create a `HorizontalPodAutoscaler` object similar to the following for an existing `DeploymentConfig` object or replication controller: + .Example output [source,yaml,options="nowrap"] @@ -103,7 +103,7 @@ spec: <2> Specify a name for this horizontal pod autoscaler object. <3> Specify the API version of the object to scale: * For a replication controller, use `v1`, -* For a deployment config, use `apps.openshift.io/v1`. +* For a `DeploymentConfig` object, use `apps.openshift.io/v1`. <4> Specify the kind of object to scale, either `ReplicationController` or `DeploymentConfig`. <5> Specify the name of the object to scale. The object must exist. <6> Specify the minimum number of replicas when scaling down. @@ -142,7 +142,7 @@ spec: <2> Specify a name for this horizontal pod autoscaler object. <3> Specify the API version of the object to scale: * For a replication controller, use `v1`, -* For a deployment config, use `apps.openshift.io/v1`. +* For a `DeploymentConfig` object, use `apps.openshift.io/v1`. <4> Specify the kind of object to scale, either `ReplicationController` or `DeploymentConfig`. <5> Specify the name of the object to scale. The object must exist. <6> Specify the minimum number of replicas when scaling down. @@ -150,7 +150,7 @@ spec: <8> Use the `metrics` parameter for memory utilization. <9> Specify `memory` for memory utilization. <10> Set to `Utilization`. -<11> Specify `averageUtilization` and a target average memory utilization over all the pods, +<11> Specify `averageUtilization` and a target average memory utilization over all the pods, represented as a percent of requested memory. The target pods must have memory requests configured. . Create the horizontal pod autoscaler: diff --git a/modules/odc-connecting-components.adoc b/modules/odc-connecting-components.adoc index da9b92d868..e43f5ae000 100644 --- a/modules/odc-connecting-components.adoc +++ b/modules/odc-connecting-components.adoc @@ -7,8 +7,7 @@ In addition to grouping multiple components within an application, you can also use the *Topology* view to connect components with each other. You can either use a binding connector or a visual one to connect components. -A binding connection between the components can be established only if the target node is an Operator-backed service. This is indicated by the *Create a binding connector* tool-tip which appears when you drag an arrow to such a target node. -When an application is connected to a service using a binding connector a `ServiceBindingRequest` is created. The *Service Binding Operator* controller then uses an intermediate `Secret` to inject the necessary binding data into the application `Deployment` as environment variables. After the request is successful, the application is redeployed establishing an interaction between the connected components. +A binding connection between the components can be established only if the target node is an Operator-backed service. This is indicated by the *Create a binding connector* tool-tip which appears when you drag an arrow to such a target node. When an application is connected to a service using a binding connector a service binding request is created. Then, the Service Binding Operator controller uses an intermediate secret to inject the necessary binding data into the application deployment as environment variables. After the request is successful, the application is redeployed establishing an interaction between the connected components. A visual connector establishes only a visual connection between the components, depicting an intent to connect. No interaction between the components is established. If the target node is not an Operator-backed service the *Create a visual connector* tool-tip is displayed when you drag an arrow to a target node. @@ -42,18 +41,11 @@ image::odc_connecting_multiple_applications.png[] [IMPORTANT] ==== -Service Binding is a Technology Preview feature only. Technology Preview features -are not supported with Red Hat production service level agreements (SLAs) and -might not be functionally complete. Red Hat does not recommend using them -in production. These features provide early access to upcoming product -features, enabling customers to test functionality and provide feedback during -the development process. +Service Binding is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. -For more information about the support scope of Red Hat Technology Preview -features, see https://access.redhat.com/support/offerings/techpreview/. +For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/. ==== - [NOTE] ==== Currently, a few specific Operators like the *etcd* and the *PostgresSQL Database* Operator's service instances are bindable. @@ -61,16 +53,16 @@ Currently, a few specific Operators like the *etcd* and the *PostgresSQL Databas You can establish a binding connection with Operator-backed components. -This procedure walks through an example of creating a binding connection between a PostgreSQL Database service and a Node.js application. To create a binding connection with a service that is backed by the PostgreSQL Database Operator, you must first add the Red Hat-provided PostgreSQL Database Operator to the *OperatorHub* using a backing `OperatorSource`, and then install the Operator. +This procedure walks through an example of creating a binding connection between a PostgreSQL Database service and a Node.js application. To create a binding connection with a service that is backed by the PostgreSQL Database Operator, you must first add the Red Hat-provided PostgreSQL Database Operator to the OperatorHub using a backing Operator source, and then install the Operator. .Prerequisite * Ensure that you have created and deployed a Node.js application using the *Developer* perspective. -* Ensure that you have installed the *Service Binding Operator* from *OperatorHub*. +* Ensure that you have installed the *Service Binding Operator* from OperatorHub. .Procedure -. Create a backing `OperatorSource` that adds the PostgresSQL Operator provided by Red Hat to the *OperatorHub*. A backing `OperatorSource` exposes the binding information in secrets, ConfigMaps, status, and spec attributes. +. Create a backing Operator source that adds the PostgresSQL Operator provided by Red Hat to the OperatorHub. A backing Operator source exposes the binding information in secrets, config maps, status, and spec attributes. .. In the *Add* view, click the *YAML* option to see the *Import YAML* screen. -.. Add the following YAML file to apply the `OperatorSource`: +.. Add the following YAML file to apply the Operator source: + [source,yaml] ---- @@ -84,8 +76,8 @@ spec: endpoint: https://quay.io/cnr registryNamespace: pmacik ---- -.. Click *Create* to create the `OperatorSource` in your cluster. -. Install the Red Hat-provided *PostgreSQL Database* Operator: +.. Click *Create* to create the Operator source in your cluster. +. Install the Red Hat-provided PostgreSQL Database Operator: .. In the *Administrator* perspective of the console, navigate to *Operators -> OperatorHub*. .. In the *Database* category, select the *PostgreSQL Database* Operator and install it. . Create a database (DB) instance for the application: @@ -108,7 +100,7 @@ spec: A DB instance is now deployed in the *Topology* view. . In the *Topology* view, hover over the Node.js component to see a dangling arrow on the node. -. Click and drag the arrow towards the *db-demo-postgresql* service to make a binding connection with the Node.js application. A `ServiceBindingRequest` is created and the *Service Binding Operator* controller injects the DB connection information into the application `Deployment` as environment variables. After the request is successful, the application is redeployed and the connection is established. +. Click and drag the arrow towards the *db-demo-postgresql* service to make a binding connection with the Node.js application. A service binding request is created and the *Service Binding Operator* controller injects the DB connection information into the application deployment as environment variables. After the request is successful, the application is redeployed and the connection is established. + .Binding connector image::odc-binding-connector.png[] diff --git a/modules/odc-editing-health-checks.adoc b/modules/odc-editing-health-checks.adoc index cd22ca2493..8b267ab469 100644 --- a/modules/odc-editing-health-checks.adoc +++ b/modules/odc-editing-health-checks.adoc @@ -27,6 +27,6 @@ You can use the *Topology* view to edit health checks added to your application, .. Click *Add Liveness Probe*, to see a form containing the parameters for the probe. .. Edit the probe parameters as required, and click the check mark at the bottom of the form. The *Liveness Probe Added* message is displayed. . Click *Save* to save your modifications and add the additional probes to your container. You are redirected to the *Topology* view. -. In the side panel, verify that the probes have been added by clicking on the deployed Pod under the *Pods* section. +. In the side panel, verify that the probes have been added by clicking on the deployed pod under the *Pods* section. . In the *Pod Details* page, click the listed container in the *Containers* section. . In the *Container Details* page, verify that the Liveness probe - `HTTP Get 10.129.4.65:8080/` has been added to the container, in addition to the earlier existing probes. diff --git a/modules/odc-grouping-multiple-components.adoc b/modules/odc-grouping-multiple-components.adoc index bf05c29fd6..617266de01 100644 --- a/modules/odc-grouping-multiple-components.adoc +++ b/modules/odc-grouping-multiple-components.adoc @@ -21,7 +21,7 @@ The following procedure adds a MongoDB database service to an existing applicati .. Click *Instantiate Template* to see an automatically populated template with details for the MongoDB service, and click *Create* to create the service. . On the left navigation panel, click *Topology* to see the MongoDB service deployed in your project. -. To add the MongoDB service to the existing application group, select the *mongodb* Pod and drag it to the application; the MongoDB service is added to the existing application group. +. To add the MongoDB service to the existing application group, select the *mongodb* pod and drag it to the application; the MongoDB service is added to the existing application group. . Dragging a component and adding it to an application group automatically adds the required labels to the component. Click on the MongoDB service node to see the label `app.kubernetes.io/part-of=myapp` added to the *Labels* section in the *Overview* Panel. + .Application grouping @@ -29,7 +29,7 @@ image::odc_app_grouping_label.png[] Alternatively, you can also add the component to an application as follows: -. To add the MongoDB service to your application, click on the *mongodb* Pod to see the *Overview* panel to the right. +. To add the MongoDB service to your application, click on the *mongodb* pod to see the *Overview* panel to the right. . Click the *Actions* drop-down menu on the upper right of the panel and select *Edit Application Grouping*. . In the *Edit Application Grouping* dialog box, click the *Select an Application* drop-down list, and select the appropriate application group. . Click *Save* to see the MongoDB service added to the application group. diff --git a/modules/odc-starting-recreate-deployment.adoc b/modules/odc-starting-recreate-deployment.adoc index 28a2b1f7fd..8545329fa4 100644 --- a/modules/odc-starting-recreate-deployment.adoc +++ b/modules/odc-starting-recreate-deployment.adoc @@ -3,9 +3,9 @@ // * applications/deployments/deployment-strategies.adoc [id="odc-starting-recreate-deployment_{context}"] -= Starting a Recreate deployment using the Developer perspective += Starting a recreate deployment using the Developer perspective -You can switch the deployment strategy from the default Rolling update to a Recreate update using the *Developer* perspective in the web console. +You can switch the deployment strategy from the default rolling update to a recreate update using the *Developer* perspective in the web console. .Prerequisites * Ensure that you are in the *Developer* perspective of the web console. @@ -13,12 +13,12 @@ You can switch the deployment strategy from the default Rolling update to a Recr .Procedure -To switch to a Recreate update strategy and to upgrade an application: +To switch to a recreate update strategy and to upgrade an application: . In the *Actions* drop-down menu, select *Edit Deployment Config* to see the deployment configuration details of the application. . In the YAML editor, change the `spec.strategy.type` to `Recreate` and click *Save*. . In the *Topology* view, select the node to see the *Overview* tab in the side panel. The *Update Strategy* is now set to *Recreate*. -. Use the *Actions* drop-down menu to select *Start Rollout* to start an update using the Recreate strategy. The Recreate strategy first terminates pods for the older version of the application and then spins up pods for the new version. +. Use the *Actions* drop-down menu to select *Start Rollout* to start an update using the recreate strategy. The recreate strategy first terminates pods for the older version of the application and then spins up pods for the new version. + .Recreate update image::odc-recreate-update.png[] diff --git a/modules/odc-starting-rolling-deployment.adoc b/modules/odc-starting-rolling-deployment.adoc index 72eecd6bcd..369a1de673 100644 --- a/modules/odc-starting-rolling-deployment.adoc +++ b/modules/odc-starting-rolling-deployment.adoc @@ -3,7 +3,7 @@ // * applications/deployments/deployment-strategies.adoc [id="odc-starting-rolling-deployment_{context}"] -= Starting a Rolling deployment using the Developer perspective += Starting a rolling deployment using the Developer perspective .Prerequisites * Ensure that you are in the *Developer* perspective of the web console. diff --git a/modules/olm-creating-etcd-cluster-from-operator.adoc b/modules/olm-creating-etcd-cluster-from-operator.adoc index a1834bbf72..ef74eb343d 100644 --- a/modules/olm-creating-etcd-cluster-from-operator.adoc +++ b/modules/olm-creating-etcd-cluster-from-operator.adoc @@ -9,8 +9,8 @@ This procedure walks through creating a new etcd cluster using the etcd Operator .Prerequisites -- Access to an {product-title} {product-version} cluster. -- The etcd Operator already installed cluster-wide by an administrator. +* Access to an {product-title} {product-version} cluster. +* The etcd Operator already installed cluster-wide by an administrator. .Procedure @@ -36,7 +36,7 @@ As shown under *Provided APIs*, this Operator makes available three new resource .. In the *etcd Cluster* API box, click *Create New*. -.. The next screen allows you to make any modifications to the minimal starting template of an `EtcdCluster` object, such as the size of the cluster. For now, click *Create* to finalize. This triggers the Operator to start up the pods, Services, and other components of the new etcd cluster. +.. The next screen allows you to make any modifications to the minimal starting template of an `EtcdCluster` object, such as the size of the cluster. For now, click *Create* to finalize. This triggers the Operator to start up the pods, services, and other components of the new etcd cluster. . Click the *Resources* tab to see that your project now contains a number of resources created and configured automatically by the Operator. + diff --git a/modules/pruning-builds.adoc b/modules/pruning-builds.adoc index 9cccfa1f00..1edbe30703 100644 --- a/modules/pruning-builds.adoc +++ b/modules/pruning-builds.adoc @@ -5,8 +5,7 @@ [id="pruning-builds_{context}"] = Pruning builds -In order to prune builds that are no longer required by the system due to age -and status, administrators can run the following command: +In order to prune builds that are no longer required by the system due to age and status, administrators can run the following command: [source,terminal] ---- @@ -23,20 +22,16 @@ $ oc adm prune builds [] |Indicate that pruning should occur, instead of performing a dry-run. .^|`--orphans` -|Prune all builds whose Build Configuration no longer exists, status is complete, -failed, error, or canceled. +|Prune all builds whose build configuration no longer exists, status is complete, failed, error, or canceled. .^|`--keep-complete=` -|Per Build Configuration, keep the last `N` builds whose status is complete (default -`5`). +|Per build configuration, keep the last `N` builds whose status is complete (default `5`). .^|`--keep-failed=` -|Per Build Configuration, keep the last `N` builds whose status is failed, error, or -canceled (default `1`). +|Per build configuration, keep the last `N` builds whose status is failed, error, or canceled (default `1`). .^|`--keep-younger-than=` -|Do not prune any object that is younger than `` relative to the -current time (default `60m`). +|Do not prune any object that is younger than `` relative to the current time (default `60m`). |=== To see what a pruning operation would delete: @@ -57,6 +52,5 @@ $ oc adm prune builds --orphans --keep-complete=5 --keep-failed=1 \ [NOTE] ==== -Developers can enable automatic build pruning by modifying their Build -Configuration. +Developers can enable automatic build pruning by modifying their build configuration. ==== diff --git a/modules/pruning-deployments.adoc b/modules/pruning-deployments.adoc index 4685eaec14..3830d878b0 100644 --- a/modules/pruning-deployments.adoc +++ b/modules/pruning-deployments.adoc @@ -5,8 +5,7 @@ [id="pruning-deployments_{context}"] = Pruning deployments -In order to prune deployments that are no longer required by the system due to -age and status, administrators can run the following command: +In order to prune deployments that are no longer required by the system due to age and status, administrators can run the following command: [source,terminal] ---- @@ -23,22 +22,16 @@ $ oc adm prune deployments [] |Indicate that pruning should occur, instead of performing a dry-run. .^|`--orphans` -|Prune all deployments that no longer have a DeploymentConfig, has status -is `Complete` or `Failed`, and has a replica count of zero. +|Prune all deployments that no longer have a `DeploymentConfig` object, has status of `Complete` or `Failed`, and has a replica count of zero. .^|`--keep-complete=` -|Per DeploymentConfig, keep the last `N` deployments that have a status -of `Complete` and replica count of zero. (default `5`) +|Per the `DeploymentConfig` object, keep the last `N` deployments that have a status of `Complete` and replica count of zero. (default `5`) .^|`--keep-failed=` -|Per DeploymentConfig, keep the last `N` deployments that have a status -of `Failed` and replica count of zero. (default `1`) +|Per the `DeploymentConfig` object, keep the last `N` deployments that have a status of `Failed` and replica count of zero. (default `1`) .^|`--keep-younger-than=` -|Do not prune any object that is younger than `` relative to the -current time. (default `60m`) Valid units of measurement include nanoseconds -(`ns`), microseconds (`us`), milliseconds (`ms`), seconds (`s`), minutes (`m`), -and hours (`h`). +|Do not prune any object that is younger than `` relative to the current time. (default `60m`) Valid units of measurement include nanoseconds (`ns`), microseconds (`us`), milliseconds (`ms`), seconds (`s`), minutes (`m`), and hours (`h`). |=== To see what a pruning operation would delete: diff --git a/modules/pruning-hard-pruning-registry.adoc b/modules/pruning-hard-pruning-registry.adoc index 0c7ec12443..1dbbcd0a80 100644 --- a/modules/pruning-hard-pruning-registry.adoc +++ b/modules/pruning-hard-pruning-registry.adoc @@ -99,7 +99,7 @@ $ oc adm policy add-cluster-role-to-user \ . *(Optional) Run the pruner in dry-run mode.* + -To see how many blobs would be removed, run the hard pruner in dry-run mode. No changes are actually made. The following example references an image registry Pod called `image-registry-3-vhndw`: +To see how many blobs would be removed, run the hard pruner in dry-run mode. No changes are actually made. The following example references an image registry pod called `image-registry-3-vhndw`: + [source,terminal] ---- @@ -132,7 +132,7 @@ Use -prune=delete to actually delete the data . *Run the hard prune.* + -Execute the following command inside one running instance of a `image-registry` Pod to run the hard prune. The following example references an image registry Pod called `image-registry-3-vhndw`: +Execute the following command inside one running instance of a `image-registry` pod to run the hard prune. The following example references an image registry pod called `image-registry-3-vhndw`: + [source,terminal] ---- diff --git a/modules/pruning-images-manual.adoc b/modules/pruning-images-manual.adoc index e2283ba477..d69374f2e7 100644 --- a/modules/pruning-images-manual.adoc +++ b/modules/pruning-images-manual.adoc @@ -5,7 +5,7 @@ [id="pruning-images-manual_{context}"] = Manually pruning images -The Pruning Custom Resource enables automatic image pruning. However, administrators can manually prune images that are no longer required by the system due to age, status, or exceed limits. There are two methods to manually prune images: +The pruning custom resource enables automatic image pruning. However, administrators can manually prune images that are no longer required by the system due to age, status, or exceed limits. There are two methods to manually prune images: * Running image pruning as a `Job` or `CronJob` on the cluster. * Running the `oc adm prune images` command. @@ -93,16 +93,11 @@ items: $ oc adm prune images [] ---- + -Pruning images removes data from the integrated registry unless -`--prune-registry=false` is used. +Pruning images removes data from the integrated registry unless `--prune-registry=false` is used. + -Pruning images with the `--namespace` flag does not remove images, only -imagestreams. Images are non-namespaced resources. Therefore, limiting pruning to a particular namespace makes it impossible to calculate its current usage. +Pruning images with the `--namespace` flag does not remove images, only image streams. Images are non-namespaced resources. Therefore, limiting pruning to a particular namespace makes it impossible to calculate its current usage. + -By default, the integrated registry caches metadata of blobs to reduce the number -of requests to storage, and to increase the request-processing speed. -Pruning does not update the integrated registry cache. Images that still contain pruned layers after pruning will be broken because the pruned layers -that have metadata in the cache will not be pushed. Therefore, you must redeploy the registry to clear the cache after pruning: +By default, the integrated registry caches metadata of blobs to reduce the number of requests to storage, and to increase the request-processing speed. Pruning does not update the integrated registry cache. Images that still contain pruned layers after pruning will be broken because the pruned layers that have metadata in the cache will not be pushed. Therefore, you must redeploy the registry to clear the cache after pruning: + [source,terminal] ---- @@ -111,8 +106,7 @@ $ oc rollout restart deployment/image-registry -n openshift-image-registry + If the integrated registry uses a Redis cache, you must clean the database manually. + -If redeploying the registry after pruning is not an option, then you must -permanently disable the cache. +If redeploying the registry after pruning is not an option, then you must permanently disable the cache. + `oc adm prune images` operations require a route for your registry. Registry routes are not created by default. + @@ -161,7 +155,7 @@ This flag cannot be combined with `--keep-tag-revisions` nor .^|`--registry-url` |The address to use when contacting the registry. The command attempts to use a -cluster-internal URL determined from managed images and imagestreams. In case +cluster-internal URL determined from managed images and image streams. In case it fails (the registry cannot be resolved or reached), an alternative route that works needs to be provided using this flag. The registry host name can be prefixed by `https://` or `http://`, which enforces particular connection @@ -184,52 +178,41 @@ You can apply conditions to your manually pruned images. * To remove any image managed by {product-title}, or images with the annotation `openshift.io/image.managed`: ** Created at least `--keep-younger-than` minutes ago and are not currently referenced by any: *** Pods created less than `--keep-younger-than` minutes ago -*** Imagestreams created less than `--keep-younger-than` minutes ago +*** Image streams created less than `--keep-younger-than` minutes ago *** Running pods *** Pending pods -*** ReplicationControllers +*** Replication controllers *** Deployments -*** DeploymentConfigs -*** ReplicaSets -*** Build Configurations +*** Deployment configs +*** Replica sets +*** Build configurations *** Builds *** `--keep-tag-revisions` most recent items in `stream.status.tags[].items` ** That are exceeding the smallest limit defined in the same project and are not currently referenced by any: *** Running pods *** Pending pods -*** ReplicationControllers +*** Replication controllers *** Deployments -*** DeploymentConfigs -*** ReplicaSets -*** Build Configurations +*** Deployment configs +*** Replica sets +*** Build configurations *** Builds * There is no support for pruning from external registries. * When an image is pruned, all references to the image are removed from all -imagestreams that have a reference to the image in `status.tags`. +image streams that have a reference to the image in `status.tags`. * Image layers that are no longer referenced by any images are removed. [NOTE] ==== -The `--prune-over-size-limit` flag cannot be combined with the -`--keep-tag-revisions` flag nor the `--keep-younger-than` flags. Doing so returns +The `--prune-over-size-limit` flag cannot be combined with the `--keep-tag-revisions` flag nor the `--keep-younger-than` flags. Doing so returns information that this operation is not allowed. ==== -Separating the removal of {product-title} image API objects and image data from -the registry by using `--prune-registry=false`, followed by hard pruning the -registry, can narrow timing windows and is safer when compared to trying to -prune both through one command. However, timing windows are not completely -removed. +Separating the removal of {product-title} image API objects and image data from the registry by using `--prune-registry=false`, followed by hard pruning the registry, can narrow timing windows and is safer when compared to trying to prune both through one command. However, timing windows are not completely removed. -For example, you can still create a Pod referencing an image as pruning -identifies that image for pruning. You should still keep track of an API object -created during the pruning operations that might reference images so that you can -mitigate any references to deleted content. +For example, you can still create a Pod referencing an image as pruning identifies that image for pruning. You should still keep track of an API object created during the pruning operations that might reference images so that you can mitigate any references to deleted content. -Re-doing the pruning without the `--prune-registry` option or with -`--prune-registry=true` does not lead to pruning the associated storage in the image registry for images previously pruned by `--prune-registry=false`. -Any images that were pruned with `--prune-registry=false` can only be deleted from -registry storage by hard pruning the registry. +Re-doing the pruning without the `--prune-registry` option or with `--prune-registry=true` does not lead to pruning the associated storage in the image registry for images previously pruned by `--prune-registry=false`. Any images that were pruned with `--prune-registry=false` can only be deleted from registry storage by hard pruning the registry. [id="pruning-images-running-operation_{context}"] == Running the image prune operation @@ -238,8 +221,7 @@ registry storage by hard pruning the registry. . To see what a pruning operation would delete: -.. Keeping up to three tag revisions, and keeping resources (images, imagestreams, -and pods) younger than 60 minutes: +.. Keeping up to three tag revisions, and keeping resources (images, image streams, and pods) younger than 60 minutes: + [source,terminal] ---- @@ -286,10 +268,8 @@ or choosing the insecure connection when prompted. [IMPORTANT] ==== -If the registry is secured by a certificate authority different from the one -used by {product-title}, it must be specified using the -`--certificate-authority` flag. Otherwise, the `prune` command fails with an -error. +If the registry is secured by a certificate authority different from the one used by {product-title}, it must be specified using the +`--certificate-authority` flag. Otherwise, the `prune` command fails with an error. ==== [id="pruning-images-problems_{context}"] @@ -308,7 +288,7 @@ Ensure that images you want removed occur at higher positions in each tag history than your chosen tag revisions threshold. For example, consider an old and obsolete image named `sha:abz`. By running the following command in namespace `N`, where the image is tagged, the image is tagged three times in a -single imagestream named `myapp`: +single image stream named `myapp`: [source,terminal] ---- @@ -334,9 +314,7 @@ considered for pruning, the administrator must either: + [WARNING] ==== -This action removes all the tags from all the namespaces with -underlying images, unless they are younger or they are referenced by objects -younger than the specified threshold. +This action removes all the tags from all the namespaces with underlying images, unless they are younger or they are referenced by objects younger than the specified threshold. ==== * Delete all the `istags` where the position is below the revision threshold, @@ -385,16 +363,13 @@ error: error communicating with registry: [Get https://172.30.30.30:5000/healthz By default, the certificate authority data stored in the user's configuration files is used; the same is true for communication with the master API. -Use the `--certificate-authority` option to provide the right certificate -authority for the container image registry server. +Use the `--certificate-authority` option to provide the right certificate authority for the container image registry server. [discrete] [id="pruning-images-wrong-ca_{context}"] ==== Using the wrong certificate authority -The following error means that the certificate authority used to sign the -certificate of the secured container image registry is different from the -authority used by the client: +The following error means that the certificate authority used to sign the certificate of the secured container image registry is different from the authority used by the client: [source,terminal] ---- @@ -403,5 +378,4 @@ error: error communicating with registry: Get https://172.30.30.30:5000/: x509: Make sure to provide the right one with the flag `--certificate-authority`. -As a workaround, the `--force-insecure` flag can be added instead. However, this -is not recommended. +As a workaround, the `--force-insecure` flag can be added instead. However, this is not recommended. diff --git a/modules/pruning-images.adoc b/modules/pruning-images.adoc index a743f5203f..a02f72dc9f 100644 --- a/modules/pruning-images.adoc +++ b/modules/pruning-images.adoc @@ -5,12 +5,11 @@ [id="pruning-images_{context}"] = Automatically pruning images -Images that are no longer required by the system due to age, -status, or exceed limits are automatically pruned. Cluster administrators can configure the Pruning Custom Resource, or delete it to disable it. +Images that are no longer required by the system due to age, status, or exceed limits are automatically pruned. Cluster administrators can configure the pruning custom resource, or delete it to disable it. [NOTE] ==== -When the Pruning Custom Resource is deleted, the pruning `CronJob` and its related components should also be deleted. +When the pruning custom resource is deleted, the pruning `CronJob` and its related components should also be deleted. ==== .Prerequisites @@ -61,9 +60,9 @@ status: <3> `keepTagRevisions`: The number of revisions per tag to keep. This is an optional field, and it defaults to `3` if not set. <4> `keepYoungerThan`: Retain images younger than this duration. This is an optional field, and it defaults `60m` if not set. <5> `resources`: Standard `Pod` resource requests and limits. This is an optional field. -<6> `affinity`: Standard Pod affinity. This is an optional field. -<7> `nodeSelector`: Standard Pod node selector for the image pruner pod. This is an optional field. -<8> `tolerations`: Standard Pod tolerations. This is an optional field. +<6> `affinity`: Standard pod affinity. This is an optional field. +<7> `nodeSelector`: Standard pod node selector for the image pruner pod. This is an optional field. +<8> `tolerations`: Standard pod tolerations. This is an optional field. <9> `startingDeadlineSeconds`: Start deadline for `CronJob`. This is an optional field. <10> `successfulJobsHistoryLimit`: The maximum number of successful jobs to retain. Must be `>= 1` to ensure metrics are reported. Defaults to `3` if not set. <11> `failedJobsHistoryLimit`: The maximum number of failed jobs to retain. Must be `>= 1` to ensure metrics are reported. Defaults to `3` if not set. diff --git a/modules/quotas-requiring-explicit-quota.adoc b/modules/quotas-requiring-explicit-quota.adoc index 9a760fb8d0..31d5905445 100644 --- a/modules/quotas-requiring-explicit-quota.adoc +++ b/modules/quotas-requiring-explicit-quota.adoc @@ -88,7 +88,7 @@ $ oc get templates -n openshift-config $ oc edit template -n openshift-config ---- + -.. Add a resource quota definition, such as the preceding 'storage-consumption' example, into the existing template. The definition must be added before the `parameters:` section in the template. +.. Add a resource quota definition, such as the preceding `storage-consumption` example, into the existing template. The definition must be added before the `parameters:` section in the template. . If you created a project request template, reference it in the cluster's project configuration resource: .. Access the project configuration resource for editing: diff --git a/modules/quotas-sample-resource-quotas-def.adoc b/modules/quotas-sample-resource-quotas-def.adoc index 1ab993586d..1bac9aa6d7 100644 --- a/modules/quotas-sample-resource-quotas-def.adoc +++ b/modules/quotas-sample-resource-quotas-def.adoc @@ -24,7 +24,7 @@ spec: <1> The total number of `ConfigMap` objects that can exist in the project. <2> The total number of persistent volume claims (PVCs) that can exist in the project. -<3> The total number of ReplicationControllers that can exist in the project. +<3> The total number of replication controllers that can exist in the project. <4> The total number of secrets that can exist in the project. <5> The total number of services that can exist in the project. <6> The total number of services of type `LoadBalancer` that can exist in the project. @@ -40,7 +40,7 @@ spec: hard: openshift.io/imagestreams: "10" <1> ---- -<1> The total number of imagestreams that can exist in the project. +<1> The total number of image streams that can exist in the project. .`compute-resources.yaml` [source,yaml] @@ -59,20 +59,13 @@ spec: limits.memory: 2Gi <6> limits.ephemeral-storage: 4Gi <7> ---- -<1> The total number of pods in a non-terminal state that can exist in the -project. -<2> Across all pods in a non-terminal state, the sum of CPU requests cannot -exceed 1 core. -<3> Across all pods in a non-terminal state, the sum of memory requests cannot -exceed 1Gi. -<4> Across all pods in a non-terminal state, the sum of ephemeral storage requests cannot -exceed 2Gi. -<5> Across all pods in a non-terminal state, the sum of CPU limits cannot exceed -2 cores. -<6> Across all pods in a non-terminal state, the sum of memory limits cannot -exceed 2Gi. -<7> Across all pods in a non-terminal state, the sum of ephemeral storage limits cannot -exceed 4Gi. +<1> The total number of pods in a non-terminal state that can exist in the project. +<2> Across all pods in a non-terminal state, the sum of CPU requests cannot exceed 1 core. +<3> Across all pods in a non-terminal state, the sum of memory requests cannot exceed 1Gi. +<4> Across all pods in a non-terminal state, the sum of ephemeral storage requests cannot exceed 2Gi. +<5> Across all pods in a non-terminal state, the sum of CPU limits cannot exceed 2 cores. +<6> Across all pods in a non-terminal state, the sum of memory limits cannot exceed 2Gi. +<7> Across all pods in a non-terminal state, the sum of ephemeral storage limits cannot exceed 4Gi. .`besteffort.yaml` [source,yaml] @@ -87,10 +80,8 @@ spec: scopes: - BestEffort <2> ---- -<1> The total number of pods in a non-terminal state with `BestEffort` quality -of service that can exist in the project. -<2> Restricts the quota to only matching pods that have `BestEffort` quality of -service for either memory or CPU. +<1> The total number of pods in a non-terminal state with `BestEffort` quality of service that can exist in the project. +<2> Restricts the quota to only matching pods that have `BestEffort` quality of service for either memory or CPU. .`compute-resources-long-running.yaml` [source,yaml] @@ -109,15 +100,10 @@ spec: - NotTerminating <5> ---- <1> The total number of pods in a non-terminal state. -<2> Across all pods in a non-terminal state, the sum of CPU limits cannot exceed -this value. -<3> Across all pods in a non-terminal state, the sum of memory limits cannot exceed -this value. -<4> Across all pods in a non-terminal state, the sum of ephemeral storage limits cannot exceed -this value. -<5> Restricts the quota to only matching pods where `spec.activeDeadlineSeconds` is -set to `nil`. Build pods will fall under `NotTerminating` unless the -`RestartNever` policy is applied. +<2> Across all pods in a non-terminal state, the sum of CPU limits cannot exceed this value. +<3> Across all pods in a non-terminal state, the sum of memory limits cannot exceed this value. +<4> Across all pods in a non-terminal state, the sum of ephemeral storage limits cannot exceed this value. +<5> Restricts the quota to only matching pods where `spec.activeDeadlineSeconds` is set to `nil`. Build pods will fall under `NotTerminating` unless the `RestartNever` policy is applied. .`compute-resources-time-bound.yaml` [source,yaml] @@ -139,8 +125,7 @@ spec: <2> Across all pods in a non-terminal state, the sum of CPU limits cannot exceed this value. <3> Across all pods in a non-terminal state, the sum of memory limits cannot exceed this value. <4> Across all pods in a non-terminal state, the sum of ephemeral storage limits cannot exceed this value. -<5> Restricts the quota to only matching pods where `spec.activeDeadlineSeconds >=0`. For example, -this quota would charge for build or deployer pods, but not long running pods like a web server or database. +<5> Restricts the quota to only matching pods where `spec.activeDeadlineSeconds >=0`. For example, this quota would charge for build or deployer pods, but not long running pods like a web server or database. .`storage-consumption.yaml` [source,yaml] diff --git a/modules/quotas-selecting-projects.adoc b/modules/quotas-selecting-projects.adoc index 394d922102..785d51950a 100644 --- a/modules/quotas-selecting-projects.adoc +++ b/modules/quotas-selecting-projects.adoc @@ -5,8 +5,7 @@ [id="quotas-setting-projects_{context}"] = Selecting multiple projects during quota creation -When creating quotas, you can select multiple projects based on annotation -selection, label selection, or both. +When creating quotas, you can select multiple projects based on annotation selection, label selection, or both. .Procedure @@ -20,7 +19,7 @@ $ oc create clusterquota for-user \ --hard secrets=20 ---- + -This creates the following ClusterResourceQuota object: +This creates the following `ClusterResourceQuota` object: + [source,yaml] ---- @@ -61,9 +60,7 @@ status: <4> A per-namespace map that describes current quota usage in each selected project. <5> The aggregate usage across all selected projects. + -This multi-project quota document controls all projects requested by -`` using the default project request endpoint. You are limited to 10 -pods and 20 secrets. +This multi-project quota document controls all projects requested by `` using the default project request endpoint. You are limited to 10 pods and 20 secrets. . Similarly, to select projects based on labels, run this command: + @@ -74,11 +71,10 @@ $ oc create clusterresourcequota for-name \//<1> --hard=pods=10 --hard=secrets=20 ---- + -<1> Both `clusterresourcequota` and `clusterquota` are aliases of the same -command. `for-name` is the name of the ClusterResourceQuota object. +<1> Both `clusterresourcequota` and `clusterquota` are aliases of the same command. `for-name` is the name of the `ClusterResourceQuota` object. <2> To select projects by label, provide a key-value pair by using the format `--project-label-selector=key=value`. + -This creates the following ClusterResourceQuota object definition: +This creates the following `ClusterResourceQuota` object definition: + [source,yaml] ---- diff --git a/modules/quotas-viewing-clusterresourcequotas.adoc b/modules/quotas-viewing-clusterresourcequotas.adoc index 49668cbb4b..9ae159e3bd 100644 --- a/modules/quotas-viewing-clusterresourcequotas.adoc +++ b/modules/quotas-viewing-clusterresourcequotas.adoc @@ -3,13 +3,9 @@ // * applications/quotas/quotas-setting-across-multiple-projects.adoc [id="quotas-viewing-clusterresourcequotas_{context}"] -= Viewing applicable ClusterResourceQuotas += Viewing applicable cluster resource quotas -A project administrator is not allowed to create or modify the multi-project -quota that limits his or her project, but the administrator is allowed to view the -multi-project quota documents that are applied to his or her project. The -project administrator can do this via the `AppliedClusterResourceQuota` -resource. +A project administrator is not allowed to create or modify the multi-project quota that limits his or her project, but the administrator is allowed to view the multi-project quota documents that are applied to his or her project. The project administrator can do this via the `AppliedClusterResourceQuota` resource. .Procedure diff --git a/modules/setting-resource-quota-for-extended-resources.adoc b/modules/setting-resource-quota-for-extended-resources.adoc index 405257e0a7..d8fbb8c54a 100644 --- a/modules/setting-resource-quota-for-extended-resources.adoc +++ b/modules/setting-resource-quota-for-extended-resources.adoc @@ -5,11 +5,7 @@ [id="setting-resource-quota-for-extended-resources_{context}"] = Setting resource quota for extended resources -Overcommitment of resources is not allowed for extended resources, so you must -specify `requests` and `limits` for the same extended resource in a quota. -Currently, only quota items with the prefix `requests.` is allowed for extended -resources. The following is an example scenario of how to set resource quota for -the GPU resource `nvidia.com/gpu`. +Overcommitment of resources is not allowed for extended resources, so you must specify `requests` and `limits` for the same extended resource in a quota. Currently, only quota items with the prefix `requests.` is allowed for extended resources. The following is an example scenario of how to set resource quota for the GPU resource `nvidia.com/gpu`. .Procedure @@ -83,7 +79,7 @@ Resource Used Hard requests.nvidia.com/gpu 0 1 ---- -. Define a Pod that asks for a single GPU. The following example definition file is called `gpu-pod.yaml`: +. Define a pod that asks for a single GPU. The following example definition file is called `gpu-pod.yaml`: + [source,yaml] ---- @@ -111,7 +107,7 @@ spec: nvidia.com/gpu: 1 ---- -. Create the Pod: +. Create the pod: + [source,terminal] ---- @@ -149,8 +145,7 @@ Resource Used Hard requests.nvidia.com/gpu 1 1 ---- -. Attempt to create a second GPU pod in the `nvidia` namespace. This is -technically available on the node because it has 2 GPUs: +. Attempt to create a second GPU pod in the `nvidia` namespace. This is technically available on the node because it has 2 GPUs: + [source,terminal] ---- @@ -163,5 +158,4 @@ technically available on the node because it has 2 GPUs: Error from server (Forbidden): error when creating "gpu-pod.yaml": pods "gpu-pod-f7z2w" is forbidden: exceeded quota: gpu-quota, requested: requests.nvidia.com/gpu=1, used: requests.nvidia.com/gpu=1, limited: requests.nvidia.com/gpu=1 ---- + -This *Forbidden* error message is expected because you have a quota of 1 GPU and -this pod tried to allocate a second GPU, which exceeds its quota. +This *Forbidden* error message is expected because you have a quota of 1 GPU and this pod tried to allocate a second GPU, which exceeds its quota. diff --git a/rest_api/workloads_apis/deploymentconfig-apps-openshift-io-v1.adoc b/rest_api/workloads_apis/deploymentconfig-apps-openshift-io-v1.adoc index b9d5df6b78..f9311d8810 100644 --- a/rest_api/workloads_apis/deploymentconfig-apps-openshift-io-v1.adoc +++ b/rest_api/workloads_apis/deploymentconfig-apps-openshift-io-v1.adoc @@ -35,7 +35,7 @@ Required:: | `.metadata` | xref:../objects/index.adoc#objectmeta-meta-v1[`ObjectMeta meta/v1`] -| +| | `.spec` | `object` @@ -1214,7 +1214,7 @@ Defaults to unset | Parameter | Type | Description | `body` | xref:../objects/index.adoc#deleteoptions-meta-v1[`DeleteOptions meta/v1`] -| +| |=== .HTTP responses @@ -1311,7 +1311,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../workloads_apis/deploymentconfig-apps-openshift-io-v1.adoc#deploymentconfig-apps-openshift-io-v1[`DeploymentConfig apps.openshift.io/v1`] -| +| |=== .HTTP responses @@ -1383,7 +1383,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#deleteoptions-meta-v1[`DeleteOptions meta/v1`] -| +| |=== .HTTP responses @@ -1456,7 +1456,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#patch-meta-v1[`Patch meta/v1`] -| +| |=== .HTTP responses @@ -1494,7 +1494,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../workloads_apis/deploymentconfig-apps-openshift-io-v1.adoc#deploymentconfig-apps-openshift-io-v1[`DeploymentConfig apps.openshift.io/v1`] -| +| |=== .HTTP responses @@ -1646,7 +1646,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#patch-meta-v1[`Patch meta/v1`] -| +| |=== .HTTP responses @@ -1684,7 +1684,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#scale-extensions-v1beta1[`Scale extensions/v1beta1`] -| +| |=== .HTTP responses @@ -1768,7 +1768,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#patch-meta-v1[`Patch meta/v1`] -| +| |=== .HTTP responses @@ -1806,7 +1806,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../workloads_apis/deploymentconfig-apps-openshift-io-v1.adoc#deploymentconfig-apps-openshift-io-v1[`DeploymentConfig apps.openshift.io/v1`] -| +| |=== .HTTP responses @@ -1865,7 +1865,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#deploymentconfigrollback-apps-openshift-io-v1[`DeploymentConfigRollback apps.openshift.io/v1`] -| +| |=== .HTTP responses @@ -1926,7 +1926,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../objects/index.adoc#deploymentrequest-apps-openshift-io-v1[`DeploymentRequest apps.openshift.io/v1`] -| +| |=== .HTTP responses @@ -1942,5 +1942,3 @@ Description:: | 401 - Unauthorized | Empty |=== - -