diff --git a/_topic_map.yml b/_topic_map.yml index e9b171a62e..3be54992bc 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -217,14 +217,20 @@ Topics: File: creating-project-other-user Distros: openshift-enterprise,openshift-origin --- -Name: Control Plane management -Dir: control-plane-management +Name: Machine management +Dir: machine_management Distros: openshift-origin, openshift-enterprise Topics: - Name: Deploying machine health checks File: deploying-machine-health-checks - Name: Applying autoscaling to a cluster File: applying-autoscaling +- Name: Manually scaling a MachineSet + File: manually-scaling-machineset +- Name: Creating a MachineSet + File: creating-machineset +- Name: Creating infrastructure MachineSets + File: creating-infrastructure-machinesets --- Name: Networking Dir: networking diff --git a/installing-aws/installing-aws-account.adoc b/installing-aws/installing-aws-account.adoc index f1ffb8e0fa..9a9761e976 100644 --- a/installing-aws/installing-aws-account.adoc +++ b/installing-aws/installing-aws-account.adoc @@ -18,6 +18,7 @@ include::modules/installation-aws-permissions.adoc[leveloffset=+1] include::modules/installation-aws-iam-user.adoc[leveloffset=+1] +include::modules/installation-aws-regions.adoc[leveloffset=+1] .Next steps diff --git a/installing-aws/installing-customizations-cloud.adoc b/installing-aws/installing-customizations-cloud.adoc index 8710f83c09..5ef7c0ad77 100644 --- a/installing-aws/installing-customizations-cloud.adoc +++ b/installing-aws/installing-customizations-cloud.adoc @@ -33,3 +33,7 @@ include::modules/installation-configuration-parameters.adoc[leveloffset=+2] include::modules/installation-launching-installer.adoc[leveloffset=+1] include::modules/installing-customizations-cloud.adoc[leveloffset=+1] + +include::modules/cli-install.adoc[leveloffset=+1] + +include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] diff --git a/installing-aws/installing-quickly-cloud.adoc b/installing-aws/installing-quickly-cloud.adoc index b6c0c932e8..850d412f93 100644 --- a/installing-aws/installing-quickly-cloud.adoc +++ b/installing-aws/installing-quickly-cloud.adoc @@ -25,4 +25,8 @@ include::modules/installation-obtaining-installer.adoc[leveloffset=+1] include::modules/installation-launching-installer.adoc[leveloffset=+1] +include::modules/cli-install.adoc[leveloffset=+1] + +include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] + include::modules/installation-default-aws-config-yaml.adoc[leveloffset=+1] diff --git a/control-plane-management/applying-autoscaling.adoc b/machine_management/applying-autoscaling.adoc similarity index 72% rename from control-plane-management/applying-autoscaling.adoc rename to machine_management/applying-autoscaling.adoc index 7eb3ed9e58..83d0391c8c 100644 --- a/control-plane-management/applying-autoscaling.adoc +++ b/machine_management/applying-autoscaling.adoc @@ -1,7 +1,7 @@ [id='applying-autoscaling'] = Applying autoscaling to a {product-title} cluster include::modules/common-attributes.adoc[] -:context: pplying-autoscaling +:context: applying-autoscaling toc::[] @@ -21,7 +21,13 @@ include::modules/machine-autoscaler-about.adoc[leveloffset=+1] First, deploy the ClusterAutoscaler to manage automatic resource scaling in your {product-title} cluster. -include::modules/cluster-autoscaler-crd.adoc[leveloffset=+2] +[NOTE] +==== +Because the ClusterAutoscaler is scoped to the entire cluster, you can make only +one ClusterAutoscaler for the cluster. +==== + +include::modules/cluster-autoscaler-cr.adoc[leveloffset=+2] :FeatureName: ClusterAutoscaler include::modules/deploying-resource.adoc[leveloffset=+2] @@ -35,11 +41,12 @@ cluster to manage deployments of individual machines. [NOTE] ==== -You must configure separate resources for each MachineSet that you want to -autoscale. +You must configure separate resources for each MachineSet. Remember that +MachineSets are different in each AWS region, so consider whether you want to +enable machine scaling in multiple regions. ==== -include::modules/machine-autoscaler-crd.adoc[leveloffset=+2] +include::modules/machine-autoscaler-cr.adoc[leveloffset=+2] :FeatureName: MachineAutoscaler include::modules/deploying-resource.adoc[leveloffset=+2] diff --git a/machine_management/creating-infrastructure-machinesets.adoc b/machine_management/creating-infrastructure-machinesets.adoc new file mode 100644 index 0000000000..269c01e92a --- /dev/null +++ b/machine_management/creating-infrastructure-machinesets.adoc @@ -0,0 +1,44 @@ +[id='creating-infrastructure-machinesets'] += Creating infrastructure MachineSets +include::modules/common-attributes.adoc[] +:context: creating-infrastructure-machinesets + +toc::[] + +{nbsp} + + +You can create a MachineSet to host only infrastructure components. +You apply specific Kubernetes labels to these Machines and then +update the infrastructure components to run on only those Machines.These +infrastructure nodes are not counted toward the total number of subscriptions +that are required to run the environment. + +include::modules/infrastructure-components.adoc[leveloffset=+1] + +include::modules/machine-api-overview.adoc[leveloffset=+1] + +[id='creating-infrastructure-machinesets-production'] +== Creating infrastructure MachineSets for production environments + +In a production deployment, deploy at least three MachineSets to hold +infrastructure components. Both the logging aggregation solution and +the service mesh deploy ElasticSearch, and ElasticSearch requires three +instances that are installed on different nodes. For high availability, install +deploy these nodes to different availability zones. Since you need different +MachineSets for each availability zone, create at least three MachineSets. + +include::modules/machineset-creating.adoc[leveloffset=+2] + +[id='moving-resources-to-infrastructure-machinesets'] +== Moving resources to infrastructure MachineSets + +Some of the infrastructure resources are deployed in your cluster by default. +You can move them to the infrastructure MachineSets that you created. + +include::modules/infrastructure-moving-router.adoc[leveloffset=+2] + +include::modules/infrastructure-moving-registry.adoc[leveloffset=+2] + +include::modules/infrastructure-moving-monitoring.adoc[leveloffset=+2] + +include::modules/infrastructure-moving-logging.adoc[leveloffset=+2] diff --git a/machine_management/creating-machineset.adoc b/machine_management/creating-machineset.adoc new file mode 100644 index 0000000000..fc1ca1d20e --- /dev/null +++ b/machine_management/creating-machineset.adoc @@ -0,0 +1,11 @@ +[id='creating-machineset'] += Creating a MachineSet +include::modules/common-attributes.adoc[] +:context: creating-machineset + +toc::[] + +{nbsp} + + +include::modules/machineset-creating.adoc[leveloffset=+1] + diff --git a/control-plane-management/deploying-machine-health-checks.adoc b/machine_management/deploying-machine-health-checks.adoc similarity index 100% rename from control-plane-management/deploying-machine-health-checks.adoc rename to machine_management/deploying-machine-health-checks.adoc diff --git a/control-plane-management/images b/machine_management/images similarity index 100% rename from control-plane-management/images rename to machine_management/images diff --git a/machine_management/manually-scaling-machineset.adoc b/machine_management/manually-scaling-machineset.adoc new file mode 100644 index 0000000000..a10db7737c --- /dev/null +++ b/machine_management/manually-scaling-machineset.adoc @@ -0,0 +1,11 @@ +[id='manually-scaling-machineset'] += Manually scaling a MachineSet +include::modules/common-attributes.adoc[] +:context: manually-scaling-machineset + +toc::[] + +{nbsp} + + +include::modules/machineset-manually-scaling.adoc[leveloffset=+1] + diff --git a/control-plane-management/modules b/machine_management/modules similarity index 100% rename from control-plane-management/modules rename to machine_management/modules diff --git a/modules/cli-install.adoc b/modules/cli-install.adoc new file mode 100644 index 0000000000..b021cac7aa --- /dev/null +++ b/modules/cli-install.adoc @@ -0,0 +1,21 @@ +// Module included in the following assemblies: +// +// * installing-aws/installing-customizations-cloud.adoc +// * installing-aws/installing-quickly-cloud.adoc + +[id='cli-install-{context}'] += Installing the {product-title} CLI + +You can download and install the `oc` command line interface (CLI) + +.Procedure + +. From the link:https://cloud.openshift.com/clusters/install[OpenShift start page], +click *Download Command-line Tools*. + +. From the site that is displayed, click the link for your operating system, +and then download the compressed file. + +. Extract the compressed file and place it in a directory that is on your PATH. + + diff --git a/modules/cli-logging-in-kubeadmin.adoc b/modules/cli-logging-in-kubeadmin.adoc new file mode 100644 index 0000000000..5227fc7383 --- /dev/null +++ b/modules/cli-logging-in-kubeadmin.adoc @@ -0,0 +1,38 @@ +// Module included in the following assemblies: +// +// * installing-aws/installing-quickly-cloud.adoc +// * installing-aws/installing-customizations-cloud.adoc + +[id='cli-logging-in-kubeadmin-{context}'] += Logging in to the `oc` CLI with the `kubeadmin` credentials + +You can log in to the `oc` command line interface (CLI) by using the +default `kubeadmin` user. + +.Prerequisites + +* Deploy a {product-title} cluster. +* Install the `oc` CLI. + +.Procedure + +. Export the `kubeadmin` credentials: ++ +[source,bash] +---- +$ export KUBECONFIG=/filenameineedtoget <1> +---- +<1> `` is the path to the directory that you stored +the installation files in. + +. Log in to the `oc` CLI: ++ +[source,bash] +---- +$ oc login +---- ++ +Specify `kubeadmin` as the user and the password that displayed when the +installation process completed. If you no longer have the password for the `kubeadmin` +user, it is also listed in the `.openshift_install.log` file in your +installation directory. \ No newline at end of file diff --git a/modules/cluster-autoscaler-about.adoc b/modules/cluster-autoscaler-about.adoc index fe28b093d5..b9457be1a1 100644 --- a/modules/cluster-autoscaler-about.adoc +++ b/modules/cluster-autoscaler-about.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * control-plane-management/applying-autoscaling.adoc +// * machine_management/applying-autoscaling.adoc [id='cluster-autoscaler-about-{context}'] = About the ClusterAutoscaler @@ -8,7 +8,8 @@ The ClusterAutoscaler adjusts the size of an {product-title} cluster to meet its current deployment needs. It uses declarative, Kubernetes-style arguments to provide infrastructure management that does not rely on objects of a specific -cloud provider. +cloud provider. The ClusterAutoscaler has a cluster scope, and is not associated +with a particular namespace. The ClusterAutoscaler increases the size of the cluster when there are pods that failed to schedule on any of the current nodes due to insufficient diff --git a/modules/cluster-autoscaler-crd.adoc b/modules/cluster-autoscaler-cr.adoc similarity index 95% rename from modules/cluster-autoscaler-crd.adoc rename to modules/cluster-autoscaler-cr.adoc index 36579da93c..bc0b8fd639 100644 --- a/modules/cluster-autoscaler-crd.adoc +++ b/modules/cluster-autoscaler-cr.adoc @@ -1,8 +1,8 @@ // Module included in the following assemblies: // -// * control-plane-management/applying-autoscaling.adoc +// * machine_management/applying-autoscaling.adoc -[id='cluster-autoscaler-crd-{context}'] +[id='cluster-autoscaler-cr-{context}'] = ClusterAutoscaler resource definition This `ClusterAutoscaler` resource definition shows the parameters and sample diff --git a/modules/cluster-autoscaler-deploying.adoc b/modules/cluster-autoscaler-deploying.adoc index 3a11b42d5f..9741fd6886 100644 --- a/modules/cluster-autoscaler-deploying.adoc +++ b/modules/cluster-autoscaler-deploying.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * control-plane-management/applying-autoscaling.adoc +// * machine_management/applying-autoscaling.adoc [id='cluster-autoscaler-deploying-{context}'] = Deploying the ClusterAutoscaler diff --git a/modules/deploying-resource.adoc b/modules/deploying-resource.adoc index 50334bdd06..e7bedc51e4 100644 --- a/modules/deploying-resource.adoc +++ b/modules/deploying-resource.adoc @@ -4,7 +4,7 @@ // :FeatureName: ClusterAutoscaler // Module included in the following assemblies: // -// * control-plane-management/applying-autoscaling.adoc +// * machine_management/applying-autoscaling.adoc diff --git a/modules/infrastructure-components.adoc b/modules/infrastructure-components.adoc new file mode 100644 index 0000000000..8e21ee4dd7 --- /dev/null +++ b/modules/infrastructure-components.adoc @@ -0,0 +1,18 @@ +// Module included in the following assemblies: +// +// * machine_management/creating-infrastructure-machinesets.adoc + +[id='infrastructure-components-{context}'] += {product-title} infrastructure components + +The following {product-title} components are infrastructure components: + +* Kubernetes and {product-title} control plane services that run on masters +* The default router +* The container image registry +* The cluster metrics collection, or monitoring service +* Cluster aggregated logging +* Service brokers + +Any node that runs any other container, pod, or component is a worker node that +your subscription must cover. \ No newline at end of file diff --git a/modules/infrastructure-moving-logging.adoc b/modules/infrastructure-moving-logging.adoc new file mode 100644 index 0000000000..3b5f660186 --- /dev/null +++ b/modules/infrastructure-moving-logging.adoc @@ -0,0 +1,12 @@ +// Module included in the following assemblies: +// +// * machine_management/creating-infrastructure-machinesets.adoc + +[id='infrastructure-moving-logging-{context}'] += Moving the logging aggregation solution + +[IMPORTANT] +==== +The log aggregation solution in {product-title} is not installed by default and +cannot currently be deployed. +==== \ No newline at end of file diff --git a/modules/infrastructure-moving-monitoring.adoc b/modules/infrastructure-moving-monitoring.adoc new file mode 100644 index 0000000000..1b43f35db1 --- /dev/null +++ b/modules/infrastructure-moving-monitoring.adoc @@ -0,0 +1,14 @@ +// Module included in the following assemblies: +// +// * machine_management/creating-infrastructure-machinesets.adoc + +[id='infrastructure-moving-monitoring-{context}'] += Moving the monitoring solution + +[IMPORTANT] +==== +The monitoring solution uses Cluster Version Operator (CVO) to create the +ConfigMap that the monitoring Operator uses to determine how to deploy its +resources. Because it uses the CVO and users cannot modify the CVO, you cannot +change where the Operator deploys resources. +==== \ No newline at end of file diff --git a/modules/infrastructure-moving-registry.adoc b/modules/infrastructure-moving-registry.adoc new file mode 100644 index 0000000000..22021d7edb --- /dev/null +++ b/modules/infrastructure-moving-registry.adoc @@ -0,0 +1,71 @@ +// Module included in the following assemblies: +// +// * machine_management/creating-infrastructure-machinesets.adoc + +[id='infrastructure-moving-registry-{context}'] += Moving the default registry + +You configure the registry Operator to deploy its pods to different nodes. + +.Prerequisites + +* Configure additional MachineSets in your {product-title} cluster. + +.Procedure + +. View the `config/instance` object: ++ +[source,bash] +---- +$ oc get config/instance -o yaml +---- ++ +The output resembles the following text: ++ +[source,yaml] +---- +apiVersion: imageregistry.operator.openshift.io/v1 +kind: Config +metadata: + creationTimestamp: 2019-02-05T13:52:05Z + finalizers: + - imageregistry.operator.openshift.io/finalizer + generation: 1 + name: instance + resourceVersion: "56174" + selfLink: /apis/imageregistry.operator.openshift.io/v1/configs/instance + uid: 36fd3724-294d-11e9-a524-12ffeee2931b +spec: + httpSecret: d9a012ccd117b1e6616ceccb2c3bb66a5fed1b5e481623 + logging: 2 + managementState: Managed + proxy: {} + replicas: 1 + requests: + read: {} + write: {} + storage: + s3: + bucket: image-registry-us-east-1-c92e88cad85b48ec8b312344dff03c82-392c + region: us-east-1 +status: +... +---- + +. Edit the `config/instance` object: ++ +[source,bash] +---- +$ oc edit config/instance +---- + +. Add the following lines of text the `spec` section of the object: ++ +[source,yaml] +---- + nodeSelector: + node-role.kubernetes.io/infra: "" +---- ++ +After you save and exit you can see the registry pod being moving to the +infrastructure node. \ No newline at end of file diff --git a/modules/infrastructure-moving-router.adoc b/modules/infrastructure-moving-router.adoc new file mode 100644 index 0000000000..06a34bee48 --- /dev/null +++ b/modules/infrastructure-moving-router.adoc @@ -0,0 +1,105 @@ +// Module included in the following assemblies: +// +// * machine_management/creating-infrastructure-machinesets.adoc + +[id='infrastructure-moving-router-{context}'] += Moving the router + +You can deploy the router Pod to a different MachineSet. By default, the Pod +is displayed to a worker node. + +.Prerequisites + +* Configure additional MachineSets in your {product-title} cluster. + +.Procedure + +. View the `clusteringress` Custom Resource for the router Operator: ++ +[source,bash] +---- +$ oc get clusteringress default -n openshift-ingress-operator -o yaml <1> +---- +<1> The router is managed by an Operator that is named +`openshift-ingress-operator`, and its Pod is in the `openshift-ingress-operator` +project. ++ +The command output resembles the following text: ++ +[source,yaml] +---- +apiVersion: ingress.openshift.io/v1alpha1 +kind: ClusterIngress +metadata: + creationTimestamp: 2019-01-28T17:23:39Z + finalizers: + - ingress.openshift.io/default-cluster-ingress + generation: 2 + name: default + namespace: openshift-ingress-operator + resourceVersion: "1294295" + selfLink: /apis/ingress.openshift.io/v1alpha1/namespaces/openshift-ingress-operator/clusteringresses/default + uid: 73ff7bfd-2321-11e9-8ff2-026a37856868 +spec: + defaultCertificateSecret: null + highAvailability: + type: Cloud + ingressDomain: apps.beta-190128-2.ocp4testing.openshiftdemos.com + namespaceSelector: null + nodePlacement: + nodeSelector: + matchLabels: + node-role.kubernetes.io/worker: "" <1> + replicas: 1 + routeSelector: null + unsupportedExtensions: null +status: + labelSelector: app=router,router=router-default + replicas: 1 +---- +<1> Note that the `nodeSelector` is configured to match the `worker` label. + +. Edit the `clusteringress` resource and change the `nodeSelector` to use the +`infra` label: ++ +[source,bash] +---- +$ oc edit clusteringress default -n openshift-ingress-operator -o yaml +---- ++ +Update the `nodeSelector` stanza to reference the `infra` label as shown: ++ +[source,yaml] +---- + nodeSelector: + matchLabels: + node-role.kubernetes.io/infra: "" +... +---- + +. Confirm that the router pod is running on the `infra` node. +.. View the list of router pods and note the node name of the running pod: ++ +[source,bash] +---- +$ oc get pod -n openshift-ingress + +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE +router-default-86798b4b5d-bdlvd 1/1 Running 0 28s 10.130.2.4 ip-10-0-217-226.ec2.internal +router-default-955d875f4-255g8 0/1 Terminating 0 19h 10.129.2.4 ip-10-0-148-172.ec2.internal +---- ++ +In this example, the running pod is on the `ip-10-0-217-226.ec2.internal` node. + +.. View the node status of the running pod: ++ +[source,bash] +---- +$ oc get node <1> + +NAME STATUS ROLES AGE VERSION +ip-10-0-217-226.ec2.internal Ready infra,worker 17h v1.11.0+406fc897d8 +---- +<1> Specify the `` that you obtained from the pod list. ++ +Because the role list includes `infra`, the pod is running on the correct node. \ No newline at end of file diff --git a/modules/installation-aws-regions.adoc b/modules/installation-aws-regions.adoc new file mode 100644 index 0000000000..97d2d3948c --- /dev/null +++ b/modules/installation-aws-regions.adoc @@ -0,0 +1,24 @@ +// Module included in the following assemblies: +// +// * installing-aws/installing-aws-account.adoc + +[id='installation-aws-regions-{context}'] += Supported AWS regions + +You can deploy an {product-title} cluster to the following regions: + +* ap-northeast-1 (Tokyo) +* ap-northeast-2 (Seoul) +* ap-south-1 (Mumbai) +* ap-southeast-1 (Singapore) +* ap-southeast-2 (Sydney) +* ca-central-1 (Central) +* eu-central-1 (Frankfurt) +* eu-west-1 (Ireland) +* eu-west-2 (London) +* eu-west-3 (Paris) +* sa-east-1 (São Paulo) +* us-east-1 (N. Virginia) +* us-east-2 (Ohio) +* us-west-1 (N. California) +* us-west-2 (Oregon) \ No newline at end of file diff --git a/modules/installation-aws-route53.adoc b/modules/installation-aws-route53.adoc index 986ce6f08d..24ccde60dc 100644 --- a/modules/installation-aws-route53.adoc +++ b/modules/installation-aws-route53.adoc @@ -14,6 +14,15 @@ cluster DNS resolution and name lookup for external connections to the cluster. . Identify your domain, or subdomain, and registrar. You can transfer an existing domain and registrar or obtain a new one through AWS or another source. ++ +[NOTE] +==== +If you purchase a new domain through, it takes time for the relevant DNS +changes to propagate. For more information about purchasing domains +through AWS, see +link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar.html[Registering Domain Names Using Amazon Route 53] +in the AWS documentation. +==== . If you are using an existing domain and registrar, migrate its DNS to AWS. See link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html[Making Amazon Route 53 the DNS Service for an Existing Domain] @@ -31,8 +40,8 @@ link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/GetInfoAboutHoste in the AWS documentation. . Update the registrar records for the AWS Route53 name servers that your domain -uses. For example, if you registered your domain to a Route53 service in a -different accounts, see the following topic in the AWS documentation: +uses. For example, if you registered your domain to a Route53 service in a +different accounts, see the following topic in the AWS documentation: link:https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-name-servers-glue-records.html#domain-name-servers-glue-records-procedure[Adding or Changing Name Servers or Glue Records]. . If you use a subdomain, follow your company's procedures to add its delegation diff --git a/modules/installation-launching-installer.adoc b/modules/installation-launching-installer.adoc index 5fca85a051..b7555d225b 100644 --- a/modules/installation-launching-installer.adoc +++ b/modules/installation-launching-installer.adoc @@ -15,7 +15,7 @@ You can run the installation program only once, during initial installation. .Prerequisites -* Configure an Amazon Web Services (AWS) account to host your cluster. +* Configure an Amazon Web Services (AWS) account to host your cluster. * Obtain the {product-title} installer and the pull secret for your cluster. .Procedure @@ -43,8 +43,8 @@ at the prompts: When the cluster deployment completes, directions for accessing your cluster, including a link to its web console and credentials for the `kubeadmin` user, display in your terminal. - + . Optionally, remove or disable the `AdministratorAccess` policy from the IAM account that you used to install the cluster. - + . Optionally, delete the installation media. diff --git a/modules/installation-obtaining-installer.adoc b/modules/installation-obtaining-installer.adoc index 628133dcea..115d8b56c1 100644 --- a/modules/installation-obtaining-installer.adoc +++ b/modules/installation-obtaining-installer.adoc @@ -17,11 +17,11 @@ installation file on a local computer. .Procedure -. Access the link:https://try.openshift.com[the OpenShift start page]. If you +. Access the link:https://try.openshift.com[the OpenShift start page]. If you have a Red Hat account, log in with your credentials. If you do not, create an account. -. Download the installation program and place the file in the directory where +. Download the installation program and place the file in the directory where you will store the installation configuration files. + [IMPORTANT] diff --git a/modules/installation-overview.adoc b/modules/installation-overview.adoc index 79bd18c457..b2e6ed14db 100644 --- a/modules/installation-overview.adoc +++ b/modules/installation-overview.adoc @@ -20,7 +20,7 @@ the underlying infrastructure for the cluster. //// In supported cloud environments, the installation program can also provision the underlying infrastructure for the cluster. If possible, use this feature to avoid -having to provision and maintain the cluster infrastructure. In all other +having to provision and maintain the cluster infrastructure. In all other environments, you use the installation program to generate the assets that you need to provision your cluster infrastructure. //// @@ -29,7 +29,7 @@ provision your cluster infrastructure. ==== In {product-title} version 4.0, the installation program can only provision infrastructure for clusters on AWS. -//If you use another cloud provider or on-premises hardware, you must provide the infrastructure. +//If you use another cloud provider or on-premises hardware, you must provide the infrastructure. ==== [discrete] @@ -73,3 +73,10 @@ The result of this bootstrapping process is a fully running {product-title} cluster. The cluster then downloads and configures remaining components needed for the day-to-day operation, including the creation of worker machines in supported environments. + +[discrete] +== Installation scope + +The scope of the {product-title} 4 installation program is intentionally narrow. +It is designed for simplicity and ensured success. You can complete many +more configuration tasks after installation completes. diff --git a/modules/installation-provide-credentials.adoc b/modules/installation-provide-credentials.adoc index 38ebf3b573..9d613452df 100644 --- a/modules/installation-provide-credentials.adoc +++ b/modules/installation-provide-credentials.adoc @@ -27,15 +27,24 @@ and provide details about your account at the prompts: + [source,bash] ---- -$ aws configure +$ aws configure --profile= <1> -AWS Access Key ID [None]: accesskey <1> -AWS Secret Access Key [None]: secretkey <1> -Default region name [None]: us-west-2 <2> +AWS Access Key ID [None]: accesskey <2> +AWS Secret Access Key [None]: secretkey <2> +Default region name [None]: us-west-2 <3> Default output format [None]: ---- -<1> Enter the access key ID and secret access key values for the user that you +<1> Enter a `` to store the credentials in. +<2> Enter the access key ID and secret access key values for the user that you configured to run the installation program. -<2> Enter the name of the region that you plan to deploy your cluster to. +<3> Enter the name of the region that you plan to deploy your cluster to. + +. Save the AWS profile: ++ +[source,bash] +---- +$ export AWS_PROFILE= <1> +---- +<1> Enter the `` that you specified. . Optionally, create an SSH key to use to access machines in your cluster. diff --git a/modules/installation-uninstall-aws.adoc b/modules/installation-uninstall-aws.adoc index 5ccc0a8fb7..2ae4c2bff9 100644 --- a/modules/installation-uninstall-aws.adoc +++ b/modules/installation-uninstall-aws.adoc @@ -9,6 +9,17 @@ You can remove a cluster that you installed on Amazon Web Services (AWS). .Procedure +. Optionally, from the computer that you used to install the cluster, run the +following command and record the UUID that it outputs: ++ +[source,bash] +---- +$ oc get clusterversion -o jsonpath='{.spec.clusterID}{"\n"}' version +---- ++ +If not all of the cluster resources are removed from AWS, you can use this UUID +to locate them and remove them. + . From the computer that you used to install the cluster, run the following command: + [source,bash] diff --git a/modules/machine-autoscaler-about.adoc b/modules/machine-autoscaler-about.adoc index f236a69c73..5c973b6490 100644 --- a/modules/machine-autoscaler-about.adoc +++ b/modules/machine-autoscaler-about.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * control-plane-management/applying-autoscaling.adoc +// * machine_management/applying-autoscaling.adoc [id='machine-autoscaler-about-{context}'] = About the MachineAutoscaler diff --git a/modules/machine-autoscaler-crd.adoc b/modules/machine-autoscaler-cr.adoc similarity index 84% rename from modules/machine-autoscaler-crd.adoc rename to modules/machine-autoscaler-cr.adoc index 9ab3abdbb2..2d1a25ff15 100644 --- a/modules/machine-autoscaler-crd.adoc +++ b/modules/machine-autoscaler-cr.adoc @@ -1,8 +1,8 @@ // Module included in the following assemblies: // -// * control-plane-management/applying-autoscaling.adoc +// * machine_management/applying-autoscaling.adoc -[id='machine-autoscaler-crd-{context}'] +[id='machine-autoscaler-cr-{context}'] = MachineAutoscaler resource definition This MachineAutoscaler resource definition shows the parameters and sample @@ -26,7 +26,8 @@ spec: ---- <1> Specify the `MachineAutoscaler` name. To make it easier to identify which MachineSet this MachineAutoscaler scales, specify or include the name of -the MachineSet to scale. +the MachineSet to scale. The MachineSet name takes the following form: +`--` <2> Specify the minimum number Machines of the specified type to deploy in the specified AWS zone. <3> Specify the maxiumum number Machines of the specified type to deploy in the diff --git a/modules/machineset-cr.adoc b/modules/machineset-cr.adoc new file mode 100644 index 0000000000..d4305d7421 --- /dev/null +++ b/modules/machineset-cr.adoc @@ -0,0 +1,93 @@ +// Module included in the following assemblies: +// +// * machine_management/creating-infrastructure-machinesets.adoc + +[id='machineset-cr-{context}'] += Sample MachineSet Custom Resource + +[source,yaml] +---- +apiVersion: cluster.k8s.io/v1alpha1 +kind: MachineSet +metadata: + creationTimestamp: 2019-02-15T16:32:56Z + generation: 1 + labels: + sigs.k8s.io/cluster-api-cluster: <1> + sigs.k8s.io/cluster-api-machine-role: <2> + sigs.k8s.io/cluster-api-machine-type: <2> + name: -- <3> + namespace: openshift-cluster-api + resourceVersion: "9249" + selfLink: /apis/cluster.k8s.io/v1alpha1/namespaces/openshift-cluster-api/machinesets/-- <3> + uid: 59ba0425-313f-11e9-861e-0a18047f0a28 +spec: + replicas: 1 + selector: + matchLabels: + sigs.k8s.io/cluster-api-cluster: cluster_name <1> + sigs.k8s.io/cluster-api-machineset: -- <3> + template: + metadata: + creationTimestamp: null + labels: + sigs.k8s.io/cluster-api-cluster: <1> + sigs.k8s.io/cluster-api-machine-role: <2> + sigs.k8s.io/cluster-api-machine-type: <2> + sigs.k8s.io/cluster-api-machineset: -- <3> + spec: <4> + metadata: + creationTimestamp: null + providerSpec: <5> + value: + ami: + id: ami-0e2bcd33dfff9c73e <6> + apiVersion: awsproviderconfig.k8s.io/v1alpha1 + blockDevices: + - ebs: + iops: 0 + volumeSize: 120 + volumeType: gp2 + deviceIndex: 0 + iamInstanceProfile: + id: --profile + instanceType: m4.large <6> + kind: AWSMachineProviderConfig + metadata: + creationTimestamp: null + placement: + availabilityZone: + region: + publicIp: null + securityGroups: + - filters: + - name: tag:Name + values: + - testcluster2_worker_sg + subnet: + filters: + - name: tag:Name + values: + - -- + tags: + - name: openshiftClusterID + value: 5a21bfc0-1c56-4400-81bb-7fd66644f871 + - name: kubernetes.io/cluster/ <1> + value: owned + userDataSecret: + name: -user-data + versions: + kubelet: "" +status: + availableReplicas: 1 + fullyLabeledReplicas: 1 + observedGeneration: 1 + readyReplicas: 1 + replicas: 1 + +---- +<1> Specify the name of the cluster to apply the MachineSet to. +<2> Specify the name of the new MachineSet. +<3> Specify the MachineSet name. +<4> Specify a label to add to the MachineSet. +<5> Do not modify this section. \ No newline at end of file diff --git a/modules/machineset-creating.adoc b/modules/machineset-creating.adoc new file mode 100644 index 0000000000..3517f2edfc --- /dev/null +++ b/modules/machineset-creating.adoc @@ -0,0 +1,261 @@ +// Module included in the following assemblies: +// +// * machine_management/creating-infrastructure-machinesets.adoc + +[id='machineset-creating-{context}'] += Creating a MachineSet + +You can create more MachineSets. Because the MachineSet definition contains +details that are specific to the AWS region that the cluster is deployed in, +you copy an existing MachineSet from your cluster and modify it. + +.Prerequisites + +* Deploy an {product-title} cluster. +* Install the `oc` command line and log in as a user with . + +.Procedure + +. View the current MachineSets. ++ +[source,bash] +---- +$ oc get machinesets -n openshift-cluster-api + +NAME DESIRED CURRENT READY AVAILABLE AGE +190125-3-worker-us-west-1b 2 2 2 2 3h +190125-3-worker-us-west-1c 1 1 1 1 3 +---- + +. Export the source of a MachineSet to a text file: ++ +[source,bash] +---- +$ oc get machineset -n \ + openshift-cluster-api -o yaml > .yaml +---- ++ +In this command, `` is the name of the current MachineSet that +is in the AWS region you want to place your new MachineSet in, such +as `190125-3-worker-us-west-1c`, and `` is the name of your new +MachineSet definition. + +. Update the `metadata` section of `.yaml`: ++ +[source,yaml] +---- +metadata: + creationTimestamp: 2019-02-15T16:32:56Z <1> + generation: 1 <1> + labels: + sigs.k8s.io/cluster-api-cluster: <2> + sigs.k8s.io/cluster-api-machine-role: <3> + sigs.k8s.io/cluster-api-machine-type: <3> + name: -- <3> <4> + namespace: openshift-cluster-api + resourceVersion: "9249" <1> + selfLink: /apis/cluster.k8s.io/v1alpha1/namespaces/openshift-cluster-api/machinesets/-- <1> + uid: 59ba0425-313f-11e9-861e-0a18047f0a28 <1> +---- +<1> Remove this line. +<2> Do not change the ``. +<3> For each `` instance, specify the name of the new MachineSet. +<4> Ensure that the AWS availability zone is correct in each instance of the +`` parameter. ++ +The `metadata` section resembles the following YAML: ++ +[source,yaml] +---- +metadata: + labels: + sigs.k8s.io/cluster-api-cluster: + sigs.k8s.io/cluster-api-machine-role: + sigs.k8s.io/cluster-api-machine-type: + name: -- + namespace: openshift-cluster-api +---- + +. In `.yaml`, delete the `status` stanza: ++ +[source,yaml] +---- +status: + availableReplicas: 1 + fullyLabeledReplicas: 1 + observedGeneration: 1 + readyReplicas: 1 + replicas: 1 +---- + +. In `.yaml`, update both instances of the `sigs.k8s.io/cluster-api-machineset` parameter +values in the `spec` section to match the `name` that you defined in the `metadata` section: ++ +[source,yaml] +---- +spec: + replicas: 1 + selector: + matchLabels: + sigs.k8s.io/cluster-api-cluster: cluster_name + sigs.k8s.io/cluster-api-machineset: -- <1> + template: + metadata: + creationTimestamp: null + labels: + sigs.k8s.io/cluster-api-cluster: + sigs.k8s.io/cluster-api-machine-role: + sigs.k8s.io/cluster-api-machine-type: + sigs.k8s.io/cluster-api-machineset: -- <1> +... +---- +<1> Ensure that both the `sigs.k8s.io/cluster-api-machineset` parameter values +match the `name` that you defined in the `metadata` section. + +. In `.yaml`, add the node label definition to the spec. The label +definition resembles the following stanza: ++ +[source,yaml] +---- + spec: + metadata: + labels: + node-role.kubernetes.io/: "" <1> +---- +<1> In this definition, `` is the node label to add. For example, to +add the `infra` label to the nodes, specify `node-role.kubernetes.io/infra`. ++ +The updated `spec` section resembles this example: ++ +[source,yaml] +---- +spec: + replicas: 1 + selector: + matchLabels: + sigs.k8s.io/cluster-api-cluster: cluster_name + sigs.k8s.io/cluster-api-machineset: -- + template: + metadata: + creationTimestamp: null + labels: + sigs.k8s.io/cluster-api-cluster: + sigs.k8s.io/cluster-api-machine-role: + sigs.k8s.io/cluster-api-machine-type: + sigs.k8s.io/cluster-api-machineset: -- + spec: <1> + metadata: + labels: + node-role.kubernetes.io/: "" +... +---- +<1> Place the `spec` stanza here. + +. Optionally, modify the EC2 instance type and modify the storage volumes. ++ +[IMPORTANT] +==== +Take care to modify only the parameters that describe the EC2 instance type +and storage volumes. You must not change the other parameters value in the +`providerSpec` section. +==== ++ +[source,yaml] +---- +providerSpec: + value: + ami: + id: ami-0e2bcd33dfff9c73e <1> + apiVersion: awsproviderconfig.k8s.io/v1alpha1 + blockDevices: <2> + - ebs: + iops: 0 + volumeSize: 120 + volumeType: gp2 + deviceIndex: 0 + iamInstanceProfile: <3> + id: --profile + instanceType: m4.large <4> + kind: AWSMachineProviderConfig + metadata: + creationTimestamp: null + placement: <3> + availabilityZone: + region: + publicIp: null + securityGroups: + - filters: + - name: tag:Name + values: + - testcluster2_worker_sg + subnet: <3> + filters: + - name: tag:Name + values: + - -- + tags: + - name: openshiftClusterID + value: 5a21bfc0-1c56-4400-81bb-7fd66644f871 + - name: kubernetes.io/cluster/ + value: owned + userDataSecret: <3> + name: -user-data +---- +<1> You can specify a different valid AMI. +<2> You can customize the volume characteristics for the MachineSet. See the AWS +documentation. +<3> Do not modify this section. +<4> Specify a valid `instanceType` for the AMI that you specified. + +. Create the new `MachineSet`: ++ +[source,bash] +---- +$ oc create -f .yaml +---- + +. View the list of MachineSets: ++ +[source,bash] +---- +$ oc get machineset -n openshift-cluster-api + + +NAME DESIRED CURRENT READY AVAILABLE AGE +190125-3-worker-us-west-1b 2 2 2 2 4h +190125-3-worker-us-west-1c 1 1 1 1 4h +infrastructure-us-west-1b 1 1 4s +---- ++ +When the new MachineSet is available, the `DESIRED` and `CURRENT` values match. +If the MachineSet is not available, wait a few minutes and run the command again. + +. After the new MachineSet is available, check the machine status: ++ +[source,bash] +---- +$ oc get machine -n openshift-cluster-api +---- + +. View the new node: ++ +[source,bash] +---- +$ oc get node +---- ++ +The new node is the one with the lowest `AGE`. ip-10-0-128-138.us-west-1.compute.internal + +. Confirm that the new node has the label that you specified: ++ +[source,bash] +---- +$ oc get node --show-labels +---- ++ +Review the command output and confirm that `node-role.kubernetes.io/` +is in the `LABELS` list. + +.Next steps +If you need MachineSets in other availability zones, repeat this +process to create more MachineSets. \ No newline at end of file diff --git a/modules/machineset-manually-scaling.adoc b/modules/machineset-manually-scaling.adoc new file mode 100644 index 0000000000..800b860f6b --- /dev/null +++ b/modules/machineset-manually-scaling.adoc @@ -0,0 +1,51 @@ +// Module included in the following assemblies: +// +// * machine_management/manually-scale-machines.adoc + +[id='machineset-manually-scaling-{context}'] += Scaling a MachineSet manually + +If you need to add or remove an instance of a machine in a MachineSet, you can +manually scale the MachineSet. + +.Prerequisites + +* Install an {product-title} cluster and the `oc` command line. +* Log into `oc` as a user with . + +. View the MachineSets that are in the cluster: ++ +[source,bash] +---- +$ oc get machinesets -n openshift-cluster-api +---- ++ +The MachineSets are listed in the form of `-worker-`. + +. Scale the MachineSet: ++ +[source,bash] +---- +$ oc edit -n openshift-cluster-api +---- ++ +You can scale the MachineSet up or down. It takes several minutes for the new +machines to be available. ++ +[IMPORTANT] +==== +The default installation creates only one router. If you scale down your cluster, +the node that hosts the router pod might be removed. In this case, you temporarily +cannot access the web console and some other resources. After the router pod +is redeployed to a different node, service will be restored. + +You can check the status of the router pod by running: + +[source,bash] +---- +$ oc get pod -n openshift-ingress +---- + +If there is no router pod, or if it is in the `ContainerCreating` state, wait +a little longer. +==== \ No newline at end of file