1
0
mirror of https://github.com/openshift/openshift-docs.git synced 2026-02-05 12:46:18 +01:00

arch edits

This commit is contained in:
Kathryn Alexander
2019-05-22 10:08:05 -04:00
parent 45dbb0d020
commit 630b9a5a31
56 changed files with 908 additions and 791 deletions

View File

@@ -49,22 +49,14 @@ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
Topics:
- Name: Product architecture
File: architecture
- Name: Installation and update
File: architecture-installation
- Name: The control plane
File: control-plane
Distros: openshift-enterprise,openshift-origin
Distros: openshift-enterprise,openshift-origin,openshift-dedicated
- Name: Understanding OpenShift development
File: understanding-development
- Name: Abstraction layers and topology
File: architecture-topology
- Name: Installation and update
File: architecture-installation
Distros: openshift-enterprise,openshift-origin
- Name: The control plane
File: control-plane
- Name: Operators in OpenShift Container Platform
File: architecture-operators
Distros: openshift-enterprise,openshift-origin
- Name: Available cluster customizations
File: customizations
Distros: openshift-enterprise,openshift-origin
---
Name: Administering a cluster
Dir: administering_a_cluster
@@ -110,6 +102,9 @@ Topics:
- Name: Installation configuration
Dir: install_config
Topics:
- Name: Available cluster customizations
File: customizations
Distros: openshift-enterprise,openshift-origin
- Name: Configuring your firewall
File: configuring-firewall
---

View File

@@ -4,10 +4,6 @@ include::modules/common-attributes.adoc[]
:context: architecture-installation
toc::[]
include::modules/installation-options.adoc[leveloffset=+1]
include::modules/installation-overview.adoc[leveloffset=+1]
include::modules/update-service-overview.adoc[leveloffset=+1]
include::modules/node-management.adoc[leveloffset=+1]
include::modules/node-types.adoc[leveloffset=+1]
include::modules/update-service-overview.adoc[leveloffset=+1]

View File

@@ -1,7 +0,0 @@
[id="architecture-operators"]
= Operators in {product-title}
include::modules/common-attributes.adoc[]
:context: architecture-operators
toc::[]
include::modules/operators-overview.adoc[leveloffset=+1]

View File

@@ -1,7 +0,0 @@
[id="architecture-topology"]
= {product-title} abstraction layers and topology
include::modules/common-attributes.adoc[]
:context: architecture-topology
toc::[]
include::modules/abstraction-layers.adoc[leveloffset=+1]

View File

@@ -4,25 +4,14 @@ include::modules/common-attributes.adoc[]
:context: architecture
toc::[]
//[IMPORTANT]
//====
//This assembly is a temporary placeholder to port the valid information from
//the 3.11 collection and include specific changes for 4.0 as that information
//becomes available.
//====
include::modules/architecture-platform-introduction.adoc[leveloffset=+1]
include::modules/platform-introduction.adoc[leveloffset=+1]
include::modules/architecture-kubernetes-introduction.adoc[leveloffset=+2]
include::modules/kubernetes-introduction.adoc[leveloffset=+2]
include::modules/container-application-benefits.adoc[leveloffset=+2]
include::modules/platform-benefits.adoc[leveloffset=+2]
include::modules/architecture-overview.adoc[leveloffset=+1]
include::modules/architecture-components.adoc[leveloffset=+1]
include::modules/architecture-container-application-benefits.adoc[leveloffset=+2]
include::modules/architecture-platform-benefits.adoc[leveloffset=+2]
////
== User facing components
* Workloads (Deployments, Jobs, ReplicaSets, etc)
* Operator Lifecycle Manager
@@ -42,21 +31,6 @@ streams] - The imagestream API provides an abstraction over container images
that exist in registries. It allows workloads to reference an image indirectly,
retains a history of the images that have been referenced, and allows
notification when an image is updated with a new version.
////
== User interface
* Web Console
* CLI
* Rest API
//[id="observability-architecture_{context}"]
//== Observability
//
//[IMPORTANT]
//====
//This section of the assembly is a placeholder for the Observability section,
//which will explain how monitoring, alerting, grafana, logging, and telemetry fit
//together.
//====
include::modules/telemetry-service-overview.adoc[leveloffset=+2]
include::modules/cluster-entitlements.adoc[leveloffset=+2]

View File

@@ -5,12 +5,11 @@ include::modules/common-attributes.adoc[]
toc::[]
include::modules/understanding-control-plane.adoc[leveloffset=+1]
include::modules/understanding-node-roles.adoc[leveloffset=+2]
include::modules/understanding-workers-masters.adoc[leveloffset=+2]
include::modules/defining-workers.adoc[leveloffset=+3]
include::modules/defining-masters.adoc[leveloffset=+3]
include::modules/understanding-operators.adoc[leveloffset=+2]
include::modules/understanding-cluster-version-operator.adoc[leveloffset=+3]
include::modules/understanding-machine-config-operator.adoc[leveloffset=+3]
include::modules/digging-into-machine-config.adoc[leveloffset=+3]
include::modules/looking-inside-nodes.adoc[leveloffset=+3]
include::modules/architecture-machine-roles.adoc[leveloffset=+2]
include::modules/operators-overview.adoc[leveloffset=+2]
include::modules/update-service-overview.adoc[leveloffset=+3]
include::modules/understanding-machine-config-operator.adoc[leveloffset=+3]

View File

@@ -1,17 +0,0 @@
// [id="architecture"]
// = {product-title} architecture
// include::modules/common-attributes.adoc[]
// :context: architecture-intro
// toc::[]
// include::modules/platform-introduction.adoc[leveloffset=+1]
// include::modules/container-application-benefits.adoc[leveloffset=+2]
// include::modules/kubernetes-introduction.adoc[leveloffset=+2]
// include::modules/platform-benefits.adoc[leveloffset=+2]
// For install docs
// include::modules/understanding-installation.adoc[leveloffset=+1]
// include::modules/running-simple-installation.adoc[leveloffset=+2]
// include::modules/running-modified-installation.adoc[leveloffset=+2]
// include::modules/following-installation.adoc[leveloffset=+2]
// include::modules/completing-installation.adoc[leveloffset=+2]
// End install docs

View File

@@ -1,45 +1,395 @@
[id="understanding-openshift-development"]
[id="understanding-development"]
= Understanding {product-title} development
include::modules/common-attributes.adoc[]
:context: container-development
:context: understanding-development
toc::[]
For many people, their first experience building and running containers is awesome.
To fully leverage the capability of containers when developing and running
enterprise-quality applications, ensure your environment is supported by tools
that allow containers to be:
Often knowing little or nothing about containers, many people have been able to build a containerized application in just a few minutes, push it to a registry to make it available to anyone they chose, and run it from any Linux system with a container runtime. If they were just running individual applications on their local laptop, they might feel like containers give them everything they need.
* Created as discrete microservices that can be connected to other
containerized, and non-containerized, services. For example, you might want to
join your application with a database or attach a monitoring application to it.
But as thrilling as the first experience can be, building and running a single container manually just makes you want more. To make containers a viable entity for developing and running enterprise-quality applications, they needed to be surrounded by tools that allowed them to be:
* Resilient, so if a server crashes or needs to go down for maintenance or to be
decommissioned, containers can start on another machine.
* Created as discrete microservices that could be connected together with other containerized, and non-containerized, services. For example, you might want to join your application with a database or have a monitoring application go with it.
* Automated to pick up code changes automatically and then start and deploy new
versions of themselves.
* Resilient, so if a server crashes or needs to go down for maintenance or to be decommissioned, containers can just start up on another node
* Scaled up, or replicated, to have more instances serving clients as demand
increases and then spun down to fewer instances as demand declines.
* Automated to pick up code changes automatically, then spin up and deploy new versions of themselves.
* Run in different ways, depending on the type of application. For example, one
application might run once a month to produce a report and then exit. Another
application might need to run constantly and be highly available to clients.
* Scaled up (replicated) to have more instances serving clients as demand increases, then spun down to fewer instances as demand declines.
* Managed so you can watch the state of your application and react when
something goes wrong.
* Run in different ways, depending on the type of application. For example, one application may run once a month to produce a report, then exit. Another application might need to run all the time and be highly available to clients.
Containers widespread acceptance, and the resulting requirements for tools and
methods to make them enterprise-ready, resulted in many options for them.
* Managed so you can watch the state of your application and react when something goes wrong.
The rest of this section explains options for
assets you can create when you build and deploy containerized Kubernetes
applications in {product-title}. It also describes which approaches you might
use for different kinds of applications and development requirements.
Containers wide-spread acceptance, and the resulting hunger for tools and methods to make them enterprise-ready, lead to an explosion of wrap up and manage containers. At a glance, it might be hard to figure out which approaches to choose.
[id="developing-containerized-applications"]
== About developing containerized applications
So, where do you start? The rest of this section lays out the different kinds of assets you can create as someone building and deploying containerized Kubernetes applications in OpenShift. It also describes which approaches are most appropriate for different kinds of applications and development requirements.
== Developing containerized applications
There are many ways to approach application development with containers. The goal of this section is to step through one approach that begins with developing a single container to ultimately deploying that container as a mission-critical application for a large enterprise. Along the way, you will see the different kinds of tools, formats, and methods you can employ in this journey. From a high level, this path includes:
You can approach application development with containers in many ways, and
different approaches might be more appropriate for different situations. To
illustrate some of this variety, the series of approaches that is presented
starts with developing a single container and ultimately deploys that container
as a mission-critical application for a large enterprise. These approaches
show different tools, formats, and methods that you can employ with containerized
application development. This topic describes:
* Building a simple container and storing it in a registry
* Creating a Kubernetes manifest and saving it to a git repository
* Creating a Kubernetes manifest and saving it to a Git repository
* Making an Operator to share your application with others
Although we are illustrating a particular path from a simple container to an enterprise-ready application, along the way you will see options you have to incorporate different tools and methods, as well as reasons why you might want to choose those other options.
[id="building-simple-container"]
== Building a simple container
include::modules/building-simple-container.adoc[leveloffset=+1]
include::modules/choosing-container-build-tools.adoc[leveloffset=+2]
include::modules/choosing-base-image.adoc[leveloffset=+2]
include::modules/choosing-registry.adoc[leveloffset=+2]
include::modules/creating-kubernetes-manifest-openshift.adoc[leveloffset=+1]
include::modules/develop-for-operators.adoc[leveloffset=+1]
You have an idea for an application and you want to containerize it.
First you require a tool for building a container, like buildah or docker,
and a file that describes what goes in your container, which is typically a
link:https://docs.docker.com/engine/reference/builder/[Dockerfile].
Next, you require a location to push the resulting container image so you can
pull it to run anywhere you want it to run. This location is a container
registry.
Some examples of each of these components are installed by default on most
Linux operating systems, except for the Dockerfile, which you provide yourself.
The following diagram displays the process of building and pushing an image:
.Create a simple containerized application and push it to a registry
image::create-push-app.png[Creating and pushing a containerized application]
If you use a computer that runs Red Hat Enterprise Linux (RHEL) as the operating
system, the process of creating a containerized application requires the
following steps:
. Install container build tools: RHEL contains a set of tools that includes
podman, buildah, and skopeo that you use to build and manage containers.
. Create a Dockerfile to combine base image and software: Information about
building your container goes into a file that is named `Dockerfile`. In that
file, you identify the base image you build from, the software packages you
install, and the software you copy into the container. You also identify
parameter values like network ports that you expose outside the container and
volumes that you mount inside the container. Put your Dockerfile and the
software you want to containerized in a directory on your RHEL system.
. Run buildah or docker build: Run the `buildah build-using-dockerfile` or
the `docker build` command to pull you chosen base image to the local system and
creates a container image that is stored locally. You can also build container
without a Dockerfile by using buildah.
. Tag and push to a registry: Add a tag to your new container image that
identifies the location of the registry in which you want to store and share
your container. Then push that image to the registry by running the
`podman push` or `docker push` command.
. Pull and run the image: From any system that has a container client tool,
such as podman or docker, run a command that identifies your new image.
For example, run the `podman run <image_name>` or `docker run <image_name>`
command. Here `<image_name>` is the name of your new container image, which
resembles `quay.io/myrepo/myapp:latest`. The registry might require credentials
to push and pull images.
For more details on the process of building container images, pushing them to
registries, and running them, see
xref:../builds/custom-builds-buildah.adoc#custom-builds-buildah[Custom image builds with Buildah].
[id="container-build-tool-options"]
=== Container build tool options
While the Docker Container Engine and `docker` command are popular tools
to work with containers, with RHEL and many other Linux systems, you can
instead choose a different set of container tools that includes podman, skopeo,
and buildah. You can still use Docker Container Engine tools to create
containers that will run in {product-title} and any other container platform.
Building and managing containers with buildah, podman, and skopeo results in
industry standard container images that include features tuned specifically
for ultimately deploying those containers in {product-title} or other Kubernetes
environments. These tools are daemonless and can be run without root privileges,
so there is less overhead in running them.
When you ultimately run your containers in {product-title}, you use the
link:https://cri-o.io/[CRI-O] container engine. CRI-O runs on every worker and
master machine in an {product-title} cluster, but CRI-O is not yet supported as
a standalone runtime outside of {product-title}.
[id="base-image-options"]
=== Base image options
The base image you choose to build your application on contains a set of
software that resembles a Linux system to your application. When you build your
own image, your software is placed into that file system and sees that file
system as though it were looking at its operating system. Choosing this base
image has major impact on how secure, efficient and upgradeable your container
is in the future.
Red Hat provides a new set of base images referred to as
link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images] (UBI).
These images are based on Red Hat Enterprise Linux and are similar to base
images that Red Hat has offered in the past, with one major difference: they
are freely redistributable without a Red Hat subscription. As a result, you can
build your application on UBI images without having to worry about how they
are shared or the need to create different images for different environments.
These UBI images have standard, init, and minimal versions. You can also use the
link:https://access.redhat.com/documentation/en-us/red_hat_software_collections/3/html-single/using_red_hat_software_collections_container_images/index[Red Hat Software Collections]
images as a foundation for applications that rely on specific runtime
environments such as Node.js, Perl, or Python. Special versions of some of
these runtime base images referred to as Source-to-image (S2I) images. With
S2I images, you can insert your code into a base image environment that is ready
to run that code.
S2I images are available for you to use directly from the {product-title} web UI
by selecting *Catalog* -> *Developer Catalog*, as shown in the following figure:
.Choose S2I base images for apps that need specific runtimes
image::developer-catalog.png[{product-title} Developer Catalog]
[id="registry-options"]
=== Registry options
Container Registries are where you store container images so you can share them
with others and make them available to the platform where they ultimately run.
You can select large, public container registries that offer free accounts or a
premium version that offer more storage and special features. You can also
install your own registry that can be exclusive to your organization or
selectively shared with others.
To get Red Hat images and certified partner images, you can draw from the
Red Hat Registry. The Red Hat Registry is represented by two locations:
`registry.access.redhat.com`, which is unauthenticated and deprecated, and
`registry.redhat.io`, which requires authentication. You can learn about the Red
Hat and partner images in the Red Hat Registry from the
link:https://registry.redhat.io/[Red Hat Container Catalog].
Besides listing Red Hat container images, it also shows extensive information
about the contents and quality of those images, including health scores that are
based on applied security updates.
Large, public registries include link:https://hub.docker.com/[Docker Hub] and
link:https://quay.io/[Quay.io]. The Quay.io registry is owned and managed by Red
Hat. Many of the components used in {product-title} are stored in Quay.io,
including container images and the Operators that are used to deploy
{product-title} itself. Quay.io also offers the means of storing other types of
content, including Helm Charts.
If you want your own, private container registry, {product-title} itself
includes a private container registry that is installed with {product-title}
and runs on its cluster. Red Hat also offers a private version of the Quay.io
registry called link:https://access.redhat.com/products/red-hat-quay[Red Hat Quay].
Red Hat Quay includes geo replication, Git build triggers, Clair image scanning,
and many other features.
All of the registries mentioned here can require credentials to download images
from those registries. Some of those credentials are presented on a cluster-wide
basis from {product-title}, while other credentials can be assigned to individuals.
[id="creating-kubernetes-manifest-openshift"]
== Creating a Kubernetes Manifest for {product-title}
While the container image is the basic building block for a containerized
application, more information is required to manage and deploy that application
in a Kubernetes environment such as {product-title}. The typical next steps after
you create an image are to:
* Understand the different resources you work with in Kubernetes manifests
* Make some decisions about what kind of an application you are running
* Gather supporting components
* Create a manifest and store that manifest in a Git repository so you can store
it in a source versioning system, audit it, track it, promote and deploy it
to the next environment, roll it back to earlier versions, if necessary, and
share it with others
[id="understanding-kubernetes-pods"]
=== About Kubernetes Pods and services
While the container image is the basic unit with docker, the basic units that
Kubernetes works with are called
link:https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/[Pods].
Pods represent the next step in building out an application. A Pod can contain
one or more than one container. The key is that the Pod is the single unit
that you deploy, scale, and manage.
Scalability and namespaces are probably the main items to consider when determining
what goes in a Pod. For ease of deployment, you might want to deploy a container
in a Pod and include its own logging and monitoring container in the Pod. Later,
when you run the Pod and need to scale up an additional instance, those other
containers are scaled up with it. For namespaces, containers in a Pod share the
same network interfaces, shared storage volumes, and resource limitations,
such as memory and CPU, which makes it easier to manage the contents of the Pod
as a single unit. Containers in a Pod can also communicate with each other by
using standard inter-process communications, such as System V semaphores or
POSIX shared memory.
While individual Pods represent a scalable unit in Kubernetes, a
link:https://kubernetes.io/docs/concepts/services-networking/service/[service]
provides a means of grouping together a set of Pods to create a complete, stable
application that can complete tasks such as load balancing. A service is also
more permanent than a Pod because the service remains available from the same
IP address until you delete it. When the service is in use, it is requested by
name and the {product-title} cluster resolves that name into the IP addresses
and ports where you can reach the Pods that compose the service.
By their nature, containerized applications are separated from the operating
systems where they run and, by extension, their users. Part of your Kubernetes
manifest describes how to expose the application to internal and external
networks by defining
link:https://kubernetes.io/docs/concepts/services-networking/network-policies/[network policies]
that allow fine-grained control over communication with your containerized
applications. To connect incoming requests for HTTP, HTTPS, and other services
from outside your cluster to services inside your cluster, you can use an
link:https://kubernetes.io/docs/concepts/services-networking/ingress/[Ingress]
resource.
If your container requires on-disk storage instead of database storage, which
might be provided through a service, you can add
link:https://kubernetes.io/docs/concepts/storage/volumes/[volumes]
to your manifests to make that storage available to your Pods. You can configure
the manifests to create physical volumes (PVs) or dynamically create volumes that
are added to your Pod definitions.
After you define a group of Pods that compose your application, you can define
those Pods in
link:https://kubernetes.io/docs/concepts/workloads/controllers/deployment/[deployments]
and xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[deploymentconfigs].
[id="application-types"]
=== Application types
Next, consider how your application type influences how to run it.
Kubernetes defines different types of workloads that are appropriate for
different kinds of applications. To determine the appropriate workload for your
application, consider if the application is:
* Meant to run to completion and be done. An example is an application that
starts up to produce a report and exits when the report is complete. The
application might not run again then for a month. Suitable {product-title}
objects for these types of applications include
link:https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/[Jobs]
and https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/[CronJob] objects.
* Expected to run continuously. For long-running applications, you can write a
xref:../applications/deployments/what-deployments-are.adoc#deployments-kube-deployments[Deployment]
or a xref:../applications/deployments/what-deployments-are.adoc#deployments-and-deploymentconfigs[DeploymentConfig].
* Required to be highly available. If your application requires high
availability, then you want to size your deployment to have more than one
instance. A Deployment or DeploymentConfig can incorporate a
link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[ReplicaSet]
for that type of application. With ReplicaSets, Pods run across multiple nodes
to make sure the application is always available, even if a worker goes down.
* Need to run on every node. Some types of Kubernetes applications are intended
to run in the cluster itself on every master or worker node. DNS and monitoring
applications are examples of applications that need to run continuously on every
node. You can run this type of application as a
link:https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[DaemonSet].
You can also run a DaemonSet on a subset of nodes, based on node labels.
* Require life-cycle management. When you want to hand off your application so
that others can use it, consider creating an
link:https://coreos.com/operators/[Operator]. Operators let you build in
intelligence, so it can handle things like backups and upgrades automatically.
Coupled with the Operator Lifecycle Manager (OLM), cluster managers can expose
Operators to selected namespaces so that users in the cluster can run them.
* Have identity or numbering requirements. An application might have identity
requirements or numbering requirements. For example, you might be
required to run exactly three instances of the application and to name the
instances `0`, `1`, and `2`. A
https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[StatefulSet]
is suitable for this application. StatefulSets are most useful for applications
that require independent storage, such as databases and zookeeper clusters.
[id="supporting-components"]
=== Available supporting components
The application you write might need supporting components, like a database or
a logging component. To fulfill that need, you might be able to obtain the
required component from the following Catalogs that are available in the
{product-title} web console:
* OperatorHub, which is available in each {product-title} {product-version}
cluster. The OperatorHub makes Operators available from Red Hat,
certified Red Hat partners, and community members to the cluster operator. The
cluster operator can make those Operators available in all or selected
namespaces in the cluster, so developers can launch them and configure them
with their applications.
* Service Catalog, which offers alternatives to Operators. While deploying
Operators is the preferred method of getting packaged
applications in {product-title}, there are some reasons why you might
want to use the Service Catalog to get supporting applications for your own
application. You might want to use the Service Catalog if you are an existing
{product-title} 3 customer and are
invested in Service Catalog applications or if you already have a Cloud Foundry
environment from which you are interested in consuming brokers from other ecosystems.
* Templates, which are useful for a one-off type of application, where the
lifecycle of a component is not important after it is installed. A template provides an easy
way to get started developing a Kubernetes application with minimal overhead.
A template can be a list of resource definitions, which could be deployments,
services, routes, or other objects. If you want to change names or resources,
you can set these values as parameters in the template.
The Template Service Broker Operator is a service broker that you can use to
instantiate your own templates. You can also install templates directly from the
command line.
You can configure the supporting Operators, Service Catalog applications, and
templates to the specific needs of your development team and then make them
available in the namespaces in which your developers work. Many people add
shared templates to the `openshift` namespace because it is accessible from all
other namespaces.
[id="applying-manifest"]
=== Applying the manifest
Kubernetes manifests let you create a more complete picture of the components
that make up your Kubernetes applications. You write these manifests as YAML
files and deploy them by applying them to the cluster, for example, by running
the `oc apply` command.
[id="manifest-next-steps"]
=== Next steps
At this point, consider ways to automate your container development process.
Ideally, you have some sort of CI pipeline that builds the images and pushes
them to a registry. In particular, a GitOps pipeline integrates your container
development with the Git repositories that you use to store the software that
is required to build your applications.
The workflow to this point might look like:
* Day 1: You write some YAML. You then run the `oc apply` command to apply that
YAML to the cluster and test that it works.
* Day 2: You put your YAML container configuration file into your own Git
repository. From there, people who want to install that app, or help you improve
it, can pull down the YAML and apply it to their cluster to run the app.
* Day 3: Consider writing an Operator for your application.
[id="develop-for-operators"]
== Develop for Operators
Packaging and deploying your application as an Operator might be preferred
if you make your application available for others to run. As noted earlier,
Operators add a lifecycle component to your application that acknowledges that
the job of running an application is not complete as soon as it is installed.
When you create an application as an Operator, you can build in your own
knowledge of how to run and maintain the application. You can build in features
for upgrading the application, backing it up, scaling it, or keeping track of
its state. If you configure the application correctly, maintenance tasks,
like updating the Operator, can happen automatically and invisibly to the
Operator's users.
An example of a useful Operator is one that is set up to automatically back up
data at particular times. Having an Operator manage an applications backup at
set times can save a system administrator from remembering to do it.
Any application maintenance that has traditionally been completed manually,
like backing up data or rotating certificates, can be completed automatically
with an Operator.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 72 KiB

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

View File

@@ -6,13 +6,17 @@ toc::[]
You complete most of the cluster configuration and customization after you
deploy your {product-title} cluster. By default, a number of
_configuration resources_ and _Custom Resources_ are available.
_configuration resources_
//and _Custom Resources_
are available.
You modify the configuration resources to define the major components of the
cluster, such as the networking configuration and the identity provider.
////
You can also deploy Custom Resources (CRs) that are based on a number of
Custom Resource Definitions (CRDs) that are deployed on your cluster.
////
[id="configuration-resources_{context}"]
== Configuration resources
@@ -64,11 +68,12 @@ Custom Resource Definitions (CRDs) that are deployed on your cluster.
|Configure default proxies.
|Scheduler
|Configure the scheduler.
|Configure the scheduler.
|===
////
[id="default-crds_{context}"]
== Custom resources
@@ -424,3 +429,4 @@ new values. If it is deleted, it recreates automatically.
|Namespaced
|
|===
////

View File

@@ -1,25 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="abstraction-layers_{context}"]
= {product-title} abstraction layers
The container service provides the abstraction for packaging and creating
Linux-based, lightweight container images. Kubernetes provides the
cluster management and orchestrates containers on multiple hosts.
{product-title} adds:
- Source code management, builds, and deployments for developers
- Managing and promoting images at scale as they flow through your system
- Application management at scale
- Team and user tracking for organizing a large developer organization
- Networking infrastructure that supports the cluster
ifdef::openshift-enterprise,openshift-origin[]
.{product-title} Architecture Overview
image::../images/architecture_overview.png[{product-title} Architecture Overview]
endif::[]
The cluster uses a combination of master and worker nodes.

View File

@@ -1,24 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="architecture-components_{context}"]
= Components
{product-title} {product-version} consists of a number of key components making up the product
stack.
== Infrastructure
* OpenShift API server
* Kubernetes API server
* Kubernetes controller manager
* Kubernetes nodes/kubelet
* CRI-O
* RedHat CoreOS
* Infrastructure Operators
* Networking (SDN/Router/DNS)
* Storage
* Monitoring + Telemetry
* Security
* Authorization/Authentication/Oauth
* Logging

View File

@@ -0,0 +1,54 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="architecture-container-application-benefits_{context}"]
= The benefits of containerized applications
Using containerized applications offers many advantages over using traditional
deployment methods. Where applications were once expected to be installed on
operating systems that included all their dependencies, containers let an
application carry their dependencies with them. Creating containerized
applications offers many benefits.
[id="operating-system-benefits_{context}"]
== Operating system benefits
Containers use small, dedicated Linux operating systems without a kernel.
Their file system, networking, cgroups, process tables, and namespaces are
separate from the host Linux system, but the containers can integrate with the
hosts seamlessly when necessary. Being based on Linux allows containers to use
all the advantages that come with the open source development model of rapid
innovation.
Because each container uses a dedicated operating system, you can deploy
applications that require conflicting software dependencies on the same host.
Each container carries its own dependent software and manages its own
interfaces, such as networking and file systems, so applications never need to
compete for those assets.
[id="deployment-scaling-benefits_{context}"]
== Deployment and scaling benefits
If you employ rolling upgrades between major releases of your application, you
can continuously improve your applications without downtime and still maintain
compatibility with the current release.
You can also deploy and test a new version of an application alongside the
existing version. Deploy the new application version in addition to the current
version. If the container passes your tests, simply deploy more new containers
remove the old ones. 
Since all the software dependencies for an application are resolved within the
container itself, you can use a generic operating system on each host in your
data center. You do not need to configure a specific operating system for each
application host. When your data center needs more capacity, you can deploy
another generic host system.
Similarly, scaling containerize applications is simple. {product-title} offers
a simple, standard way of scaling any containerized service. For example, if you
build applications as a set of microservices rather than large, monolithic
applications, you can scale the individual microservices individually to meet
demand. This capability allows your to scale only the required services instead
of the entire application, which can allow you to meet application demands
while using minimal resources.

View File

@@ -0,0 +1,33 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="architecture-kubernetes-introduction_{context}"]
= About Kubernetes
Although container images and the containers that run from them are the
primary building blocks for modern application development, to run them at scale
requires a reliable and flexible distribution system. Kubernetes is the
defacto standard for orchestrating containers.
Kubernetes is an open source container orchestration engine for automating
deployment, scaling, and management of containerized applications. The general
concept of Kubernetes is fairly simple:
* Start with one or more worker nodes to run the container workloads.
* Manage the deployment of those workloads from one or more master nodes.
* Wrap containers in a deployment unit called a Pod. Using Pods provides extra
metadata with the container and offers the ability to group several containers
in a single deployment entity.
* Create special kinds of assets. For example, services are represented by a
set of Pods that and a policy that defines how they are accessed. This policy
allows containers to connect to the services that they need even if they do not
have the specific IP addresses for the services. Replication controllers are
another special asset that indicates how many Pod Replicas are required to run
at a time. You can use this capability to automatically scale your application
to adapt to its current demand.
In only a few years, Kubernetes has seen massive cloud and on-premise adoption.
The open source development model allows many people to extend Kubernetes
by implementing different technologies for components such as networking,
storage, and authentication.

View File

@@ -0,0 +1,92 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="architecture-machine-roles_{context}"]
= Machine roles in {product-title}
{product-title} assigns hosts different roles. These roles define the function
of the machine within the cluster. The cluster contains definitions for the
standard master and worker role types.
[NOTE]
====
The cluster also contains the definition for the bootstrap role. Because the
bootstrap machine is used only during cluster installation, its function is
explained in the cluster installation documentation.
====
[id="defining-workers_{context}"]
== Cluster workers
In a Kubernetes cluster, the worker nodes are where the actual workloads
requested by Kubernetes users run and are managed. The worker nodes advertise
their capacity and the scheduler, which is part of the master services,
determines on which nodes to start containers and Pods. Important services run
on each worker node, including CRI-O, which isthe container engine, Kubelet,
which is the service that accepts and fulfills requests for running and
stopping container workloads, and a service proxy, which manages communication
for pods across workers.
In {product-title}, MachineSets control the worker machines. Machines with
the worker role drive compute workloads that are governed by a specific machine
pool that autoscales them. Because {product-title} has the capacity to support
multiple machine types, the worker machines are classed as _compute_ machines.
In this release, the terms "worker machine" and "compute machine" are
used interchangeably because the only default type of compute machine
is the worker machine. In future versions of {product-title}, different types
of compute machines, such as infrastructure machines, might be used by default.
[id="defining-masters_{context}"]
== Cluster masters
In a Kubernetes cluster, the master nodes run services that are required to
control the Kubernetes cluster. In {product-title}, the master machines are
the control plane. They contain more
than just the Kubernetes services for managing the {product-title} cluster.
Because all of the machines with the control plane role are master machines,
and the terms "master" and "control plane" are used interchangeably to describe
them. Instead of being grouped into a
MachineSet, master machines are defined by a series of standalone machine API
resources. Extra controls apply to master machines to prevent you from deleting
all master machines and breaking your cluster.
Services that fall under the Kubernetes category on the master include the
API server, etcd, controller manager server, and HAProxy services.
.Kubernetes services that run on the control plane
[options="header"]
|===
|Component |Description
|API Server
|The Kubernetes API server validates and configures the data for Pods, Services,
and replication controllers. It also provides a focal point for clusters shared
state.
|etcd
|etcd stores the persistent master state while other components watch etcd for
changes to bring themselves into the specified state.
//etcd can be optionally configured for high availability, typically deployed with 2n+1 peer services.
|Controller Manager Server
|The Controller Manager Server watches etcd for changes to objects such as
replication, namespace, and serviceaccount controller objects, and then uses the
API to enforce the specified state. Several such processes create a cluster with
one active leader at a time.
|===
Some of these services on the master machines run as systemd services, while
others run as static Pods.
Systemd services are appropriate for services that you need to always come up on
that particular system shortly after it starts. For master machines, those
include sshd, which allows remote login. It also includes services such as:
* The CRI-O container engine (crio), which runs and
manages the containers. {product-title} {product-version} uses CRI-O instead of
the Docker Container Engine.
* Kubelet (kubelet), which accepts requests for managing containers on the
machine from master services.
CRI-O and Kubelet must run directly on the host as systemd services because
they need to be running before you can run other containers.

View File

@@ -1,80 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="architecture-overview_{context}"]
= Changes to {product-title} {product-version}
With {product-title} {product-version}, the core story remains unchanged: {product-title} offers
your developers a set of tools to evolve their applications under operational oversight
and using Kubernetes to provide application infrastructure. The key change to {product-title} {product-version} is
that the infrastructure and its management are flexible, automated, and self-managing.
A major difference between {product-title} 3 and {product-title} {product-version} is that {product-title} {product-version} uses Operators
as both the fundamental unit of the product and an option for easily deploying
and managing utilities that your apps use.
{product-title} {product-version} runs on top of a Kubernetes cluster, with data about the
objects stored in etcd, a reliable clustered key-value store. The cluster is
enhanced with standard components that are required to run your cluster, including
network, Ingress, logging, and monitoring, that run as Operators to increase the
ease and automation of installation, scaling, and maintenance.
////
The core services include:
* Operators, which run the core {product-title} services.
* REST APIs, which expose each of the core objects:
** Containers and images, which are the building blocks for deploying your
applications.
** Pods and services, which containers use to communicate with each other and
proxy connections.
** Projects and users, which provide the space and means for communities to
organize and manage their content together.
** Builds and imagestreams allow you to
build working images and react to new images.
** Deployments, which expand support for the software development and deployment
lifecycle.
** Ingress and routes, which announce your service to the world.
* Controllers, which read those REST APIs, apply changes to other objects, and
report status or write back to the object.
////
{product-title} offers a catalog of supporting application infrastructure that
includes:
* Operators, which expose APIs that automate the complete component lifecycle
and include components like databases
* Service bindings, which consume services that run outside the cluster
* Templates, which are simple instant examples
Users make calls to the REST API to change the state of the system. Controllers
use the REST API to read the user's desired state and then try to bring the
other parts of the system into sync. For example, when you request a build, the
REST APIs create a `build` object. The build controller sees that a new build has been created, and
runs a process on the cluster to perform that build. When the build completes,
the controller updates the build object via the REST API and the user sees that
their build is complete.
The controller pattern means that much of the functionality in {product-title}
is extensible. The way that builds are run and launched can be customized
independently of how images are managed, or how deployments happen. The controllers
perform the "business logic" of the system, taking user actions and
transforming them into reality. By customizing those controllers or replacing
them with your own logic, you can implement different behaviors. From a system
administration perspective, this also means that you can use the API to script common
administrative actions on a repeating schedule. Those scripts are also
controllers that watch for changes and take action. {product-title} makes the
ability to customize the cluster in this way a first-class behavior.
To make this possible, controllers use a reliable stream of changes to the
system to sync their view of the system with what users are doing. This event
stream pushes changes from etcd to the REST API and then to the controllers as
soon as changes occur so changes can efficiently ripple through the system.
However, because failures can occur at any time, the controllers
must also be able to get the latest state of the system at start up and confirm
that everything is in the right state. This resynchronization is important
because it means that even if something goes wrong, you can
restart the affected components, and the system confirms its status before it
continues. Because the controllers can always bring the system into sync, the
system eventually converges to your intent.

View File

@@ -0,0 +1,151 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="architecture-platform-benefits_{context}"]
= {product-title} overview
////
Red Hat was one of the early contributors of Kubernetes and quickly integrated
it as the centerpiece of its {product-title} product line. Today, Red Hat
continues as one of the largest contributors to Kubernetes across a wide range
of technology areas.
////
{product-title} provides enterprise-ready enhancements to Kubernetes, including
the following enhancements:
* Hybrid cloud deployments. You can deploy {product-title} clusters to variety
of public cloud platforms or in your data center.
* Integrated Red Hat technology. Major components in {product-title} come from
Red Hat Enterprise Linux and related Red Hat technologies. {product-title}
benefits from the intense testing and certification initiatives for Red Hats
enterprise quality software.
* Open source development model. Development is completed in the open, and the
source code is available from public software repositories. Thie open
collaboration fosters rapid innovation and development.
Although Kubernetes excels at managing your applications, it does not specify
or manage platform-level requirements or deployment processes. Powerful and
flexible platform management tools and processes are important benefits that
{product-title} {product-version} offers. The following sections describe some
unique features and benefits of {product-title}.
[id="architecture-custom-os_{context}"]
== Custom operating system
{product-title} uses {op-system-first}, a new container-oriented operating
system that combines some of the best features and functions of the CoreOS and
Red Hat Atomic Host operating systems. {op-system} is specifically designed for
running containerized applications from {product-title} and works with new tools
to provide fast installation, Operator-based management, and simplified upgrades.
{op-system} includes:
* Ignition, which is a firstboot system configuration for initially bringing up and
configuring {product-title} nodes.
In {product-title} {product-version}, you must use {op-system} for all control
plane machines, but you can use Red Hat Enterprise Linux (RHEL) as the operating
system for compute, or worker, machines. If you choose to use RHEL workers, you
must perform more system maintenance than if you use {op-system} for all of the
cluster machines.
[id="architecture-platform-management_{context}"]
== Simplified installation and update process
With {product-title} {product-version}, if you have an account with the right
permissions, you can deploy a production cluster in supported clouds by running
a single command and providing a few values. You can also customize your cloud
installation or install your cluster in your data center if you use a supported
platform.
For clusters that use {op-system} for all machines, updating, or
upgrading, {product-title} is a simple, highly-automated process. Because
{product-title} completely controls the systems and services that run on each
machine, including the operating system itself, from a central control plane,
upgrades are designed to become automatic events. If your cluster contains
RHEL worker machines, the control plane benefits from the streamlined update
process, but you must perform more tasks to upgrade the RHEL machines.
[id="architecture-key-features_{context}"]
== Other key features
Operators are both the fundamental unit of the {product-title} {product-version}
code base and a convenient way to deploy applications and software components
for your applications to use. By using Operators as the platform foundation,
{product-title} replace manual upgrades of operating
systems and control plane applications. {product-title} Operators such as the
Cluster Version Operator and Machine Config Operator allow simplified,
cluster-wide management of those critical components.
Operator Lifecycle Manager (OLM) and the OperatorHub provide facilities for
storing and distributing Operators to people developing and deploying applications.
CRI-O Container Engine is the streamlined container engine that is is developed in
tandem with Kubernetes releases and provides facilities for running, stopping,
and restarting containers. It fully replaces the Docker Container Engine in
{product-title} {product-version}.
The Red Hat Quay Container Registry is a Quay.io container registry that serves
most of the container images and Operators to {product-title} clusters.
Quay.io is a public registry version of Red Hat Quay that stores millions of images
and tags.
Other enhancements to Kubernetes in {product-title} include improvements in
software defined networking (SDN), authentication, log aggregation, monitoring,
and routing. {product-title} also offers a comprehensive web console and the
custom OpenShift CLI (`oc`) interface.
////
{product-title} includes the following infrastructure components:
* OpenShift API server
* Kubernetes API server
* Kubernetes controller manager
* Kubernetes nodes/kubelet
* CRI-O
* {op-system}
* Infrastructure Operators
* Networking (SDN/Router/DNS)
* Storage
* Monitoring
* Telemetry
* Security
* Authorization/Authentication/Oauth
* Logging
It also offers the following user interfaces:
* Web Console
* OpenShift CLI (`oc`)
* Rest API
////
[id="architecture-overview-image_{context}"]
== {product-title} lifecycle
The following figure illustrates the basic {product-title} lifecycle:
* Creating an {product-title} cluster
* Managing the cluster
* Developing and deploying applications
* Scaling up applications
.High level {product-title} overview
image::product-workflow-overview.png[High-level {product-title} flow]
[id="architecture-3-4_{context}"]
== {product-title} 3 and 4
With {product-title} {product-version}, the core story remains unchanged:
{product-title} offers
your developers a set of tools to evolve their applications under operational oversight
and using Kubernetes to provide application infrastructure. The key change to
{product-title} {product-version} is
that the infrastructure and its management are flexible, automated, and self-managing.
A major difference between {product-title} 3 and {product-title} {product-version}
is that {product-title} {product-version} uses Operators
as both the fundamental unit of the product and an option for easily deploying
and managing utilities that your apps use.

View File

@@ -1,12 +1,19 @@
// Module included in the following assemblies:
// * architecture/architecture.adoc
[id="openshift-introduction_{context}"]
[id="architecture-platform-introduction_{context}"]
= Introduction to {product-title}
{product-title} is a platform for developing and running containerized applications. It is designed to allow both applications and the data centers that support them to expand from just a few machines and applications to thousands of machines serving millions of clients.
{product-title} is a platform for developing and running containerized
applications. It is designed to allow applications and the data centers
that support them to expand from just a few machines and applications to
thousands of machines that serve millions of clients.
With its foundation in Kubernetes, {product-title} incorporates the same technology that serves as the engine for massive telecommunications, streaming video, gaming, banking and other applications. Its implementation in open Red Hat technologies lets you extend your containerized applications beyond a single cloud to on-premise and multi-cloud environments.
With its foundation in Kubernetes, {product-title} incorporates the same
technology that serves as the engine for massive telecommunications, streaming
video, gaming, banking and other applications. Its implementation in open
Red Hat technologies lets you extend your containerized applications beyond a
single cloud to on-premise and multi-cloud environments.
// The architecture presented here is meant to give you insights into how {product-title} works. It does this by stepping you through the process of installing an {product-title} cluster, managing the cluster, and developing and deploying applications on it. Along the way, this architecture describes:

View File

@@ -1,22 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/understanding-openshift-development.adoc
[id="building-simple-container_{context}"]
= Building a simple container
You have an idea for an application and you want to containerize it. All you must have to get started is a tool for building a container (buildah or docker) and a file that describes what will go into your container (typically, a https://docs.docker.com/engine/reference/builder/[Dockerfile]). Next you will want a place to push the resulting container image (a container registry) so you can pull it to run anywhere you want it to run.
Some examples of each of those components just described come with most Linux systems, except for the Dockerfile which you provide yourself. The following diagram shows what the process of building and pushing an image entails:
.Create a simple containerized application and push it to a registry
image::create-push-app.png[Creating and pushing a containerized application]
Using a Red Hat Enterprise Linux (RHEL) system as an example, heres what the process of creating a containerized application might look like:
* Install container build tools: RHEL contains a set of tools (podman, buildah, skopeo and others) for building and managing containers. In particular, the command line buildah build-using-dockerfile can replace the common docker build command for turning your Dockerfile and software into a container image. You can also build container without a Dockerfile using buildah.
* Create Dockerfile to combine base image and software: Information about building your container goes into a file named Dockerfile. In that file you identify the base image you build from, the software package you install, and the software you copy into the container. You also identify things like network ports you expose outside the container and volumes you mount inside the container. Put your Dockerfile and the software you want to containerized in a directory on your RHEL system.
* Run buildah or docker build: Running buildah or docker build pulls your chosen base image to the local system and creates a container image that is stored locally.
* Tag and push to a registry: Add a tag to your new container image that identifies the location of the registry in which you want to store and share your container. Then push that image, podman push or docker push, to the registry.
* Pull and run the image: From any system that has a container client tool, such as podman or docker, run a command line that identifies your new image. For example, run podman run or docker run, followed by the name of your new container image (for example, quay.io/myrepo/myapp:latest). The registry might require credentials to push and pull images.
For more details on the process of building container images, pushing them to registries, and running them, see https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/managing_containers/index%23using_podman_to_work_with_containers[Using podman to work with containers] and https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/managing_containers/index%23building_container_images_with_buildah[Building container images with buildah]. Along this process, you needed to make some decisions about the tools and features you use. Some of those choices are detailed here.

View File

@@ -1,16 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/understanding-openshift-development.adoc
[id="choosing-base-image_{context}"]
= Choosing a base image
The base image you choose to build your application on contains a set of software that looks like a Linux system to your application. When you build your own image, your software is placed into that file system and sees that file system as though it were looking at its operating system. Choosing this base image has major impact on how secure, efficient and upgradeable your container is in the future.
Red Hat provides a new set of base images referred to as https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index%23using_red_hat_base_container_images_standard_and_minimal[Red Hat Universal Base Images] (UBI). These RHEL-based images are similar to base images Red Hat has offered in the past, with one major difference: they are freely redistributable without a Red Hat subscription. As a result, you can build your application on UBI images without having to worry about how they are shared. No need to create different images for different environments.
These UBI images have standard, init, and minimal versions. There is also a set of https://access.redhat.com/documentation/en-us/red_hat_software_collections/3/html-single/using_red_hat_software_collections_container_images/index[Red Hat Software Collections] images that can be used as a foundation for applications that rely on specific runtime environments (such as Node.js, perl, python and other runtimes). Special versions of some of these runtime base images referred to as Source-to-image (S2I) images. With S2I images, you can insert your code into a base image environment that is ready to run that code.
S2I images are available for you to use directly from the {product-title} web UI by selecting Catalog → Developer Catalog, as shown in the following figure:
.Choose S2I base images for apps that need specific runtimes
image::developer-catalog.png[{product-title} Developer Catalog]

View File

@@ -1,12 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/understanding-openshift-development.adoc
[id="choosing-container-build-tools_{context}"]
= Choosing container build tools
When containers first really took hold, most people used the Docker Container Engine and docker command to work with containers. You can still use those tools to create containers that will run in {product-title} and any other container platform. However, with RHEL and also many other Linux system you can instead choose a different set of container tools that includes: podman, skopeo, and buildah.
Building and managing containers with buildah, podman, and skopeo results in industry standard container images that include features tuned specifically for ultimately deploying those containers in {product-title} or other Kubernetes environments. These tools are daemonless and can be run without root privileges, so there is less overhead in running them.
When you ultimately run your containers in {product-title}, the https://cri-o.io/[CRI-O] container engine has replaced Docker as the container engine. CRI-O runs on every worker and master node in an {product-title} cluster, but CRI-O is not yet supported as a standalone runtime outside of {product-title}.

View File

@@ -1,16 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/understanding-openshift-development.adoc
[id="choosing-registry_{context}"]
= Choosing a registry
Container Registries are where you store container images so you can share them with others and make them available to the platform where they ultimately run. There are large, public container registries that offer free accounts, as well as premium versions that offer more storage and special features. You can also install your own registry that can be exclusive to your organization or selectively shared with others.
To get Red Hat images and certified partner images, you can draw from the Red Hat Registry. The Red Hat Registry is represented by two locations: registry.access.redhat.com (unauthenticated and deprecated) and registry.redhat.io (requires authentication). You can learn about the Red Hat and partner images in the Red Hat Registry from the https://access.redhat.com/containers/[Red Hat Container Catalog]. Besides listing Red Hat container images, it also shows extensive information about the contents and quality of those images, including health scores based on applied security updates.
Large, public registries include https://hub.docker.com/[Docker Hub] and https://quay.io/[Quay.io]. The Quay.io registry is owned and managed by Red Hat. Many of the components used in {product-title} are stored in Quay.io, including container images and Operators used to deploy {product-title} itself. Quay.io also offers the means of storing other types of content, including Helm Charts.
If you want your own, private container registry, {product-title} itself includes a private container registry that is installed with {product-title} and runs on its cluster. Red Hat also offers a private version of the Quay.io registry called https://access.redhat.com/products/red-hat-quay[Red Hat Quay]. Included with Red Hat Quay are features for geo replication, git build triggers, and Clair image scanning, among other features.
All of the registries mentioned here can require credentials for someone to download images from those registries. Some of those credentials are presented on a cluster-wide basis from {product-title}, while other credentials can represent an individuals credentials.

View File

@@ -10,10 +10,12 @@
[id="cluster-entitlements_{context}"]
= Internet and Telemetry access for {product-title}
In {product-title} {product-version}, to perform subscription management,
including legally entitling your purchase from Red Hat, you must use the
Telemetry service and access
In {product-title} {product-version}, Telemetry is the component that provides
metrics about cluster health and the success of updates. To perform subscription
management, including legally entitling your purchase from Red Hat, you must use
the Telemetry service and access
link:https://cloud.openshift.com/clusters/install[the OpenShift start page].
Because there is no disconnected subscription management, you cannot both opt
out of sending data back to Red Hat and entitle your purchase. Support for
disconnected subscription management might be added in future releases of
@@ -24,10 +26,11 @@ but if you do so, you cannot entitle your cluster.
[IMPORTANT]
====
Your machines have direct internet access to install the cluster.
Your machines must have direct internet access to install the cluster.
====
You must have internet access to
You must have internet access to:
* Access link:https://cloud.openshift.com/clusters/install[the OpenShift start page]
to download the installation program
* Access link:http://quay.io[quay.io] to obtain the packages that are required

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
// * TBD
[id="completing-installation_{context}"]
= Completing and verifying the {product-title} installation

View File

@@ -1,17 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="benefits-containerized-applications_{context}"]
= The benefits of containerized applications
Containerized applications offer many advantages over traditional deployment methods. Where applications were once expected to be installed on operating systems that included all their dependencies, containers let an application carry their dependencies with them. As a result:
* Conflicting applications can run on the same systems. Because each container carries its own dependent software and manages its own interfaces (networking, filesystems and so on), there is no problem with applications competing for those assets.
* Linux inside. Containers are really like small Linux operating systems without a kernel. Their filesystem, networking, cgroups, process tables, and other namespaces are separate from the host Linux system, but is able to integrate with the hosts seamlessly when necessary. Being based on Linux allows containers to leverage all the advantages that come with the open source development model of rapid innovation.
* Host operating system configuration can become more generic. Another side effect of dependencies being inside each container is that the operating systems that run them dont have to each be configured specially. When your data center needs more capacity, you can just spin up another host system.
* Scaling is much simpler. {product-title} offers a simple, standard way of scaling any containerized service. For example, if you build applications as a set of microservices, rather than large, monolithic applications, individual microservices can be scaled up and scaled down individually as needed. You dont have to scale up an entire, huge application, if only one service is in particular demand. 
* Rolling upgrades. Deploying rolling upgrades between major releases lets you continuously improve your applications without downtime, when compatibility with the current release can be maintained.
* Canary deployment. If you have a new version of a container available, you can simply start it up along side of the containers that are currently running. If the container runs without incident, you can simply add more new containers and spin down the old ones. 
While container images and the containers that run from them have become the primary building blocks for modern application development, to run them at scale requires a reliable and flexible distribution system. Kubernetes has become the defacto standard for orchestrating containers.

View File

@@ -1,69 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/understanding-openshift-development.adoc
[id="creating-kubernetes-manifest-openshift_{context}"]
= Creating a Kubernetes Manifest for {product-title}
While the container image is the most basic building block for a containerized application, more is needed to manage and deploy that application in a Kubernetes environment such as {product-title}. The typical next step after creating your image is to:
* Understand the different resources you work with in Kubernetes manifests
* Make some decisions on what kind of an application you are running
* Gather supporting components
* Create a manifest and store that manifest in a git repository where it can be stored in a source versioning system, audited, tracked, promoted and deployed to the next environment, rolled back to earlier versions, if necessary, and shared with others
[id="understanding-kubernetes-pods_{context}"]
== Understanding Kubernetes pods, services, and so on
While the container image is the basic unit with docker run, the basic units that Kubernetes works with are called https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/[pods]. Pods represent the next step in building out an application. A pod can contain more than one container, although it can just contain one. The key is that the pod is what you deploy, scale up or down, and generally treat as a single unit.
Scalability and namespaces are probably the main things you think about when you decide what goes into a pod. For ease of deployment, you might want to deploy a container in a pod, along with its own logging and monitoring container. Later when you run the pod and need to scale up an additional instance later, those other containers are scaled up with it. For namespaces, containers in a pod share the same network interfaces, shared storage volumes, and resource limitations (such as memory and CPU), making it easier to manage the contents of the pod as a single unit. Containers in a pod can also communicate with each other using standard inter-process communications, such as System V semaphores or POSIX shared memory.
While individual pods represent a scalable unit in Kubernetes, a https://kubernetes.io/docs/concepts/services-networking/service/[service] provides a means of grouping together a set of pods to create a more full-blown application that can be more stable and is able to do things like load balancing. A service also has the advantage of being less fleeting, because the service remains available from the same IP address, as long as it is not deleted. When the service is used, it is requested by name and the {product-title}/Kubernetes cluster resolves that name into the locations (IP addresses and ports) where the pods backing up that service can be reached.
By their nature, containerized applications are kept confined from the operating systems where they run and, by extension, the world that will eventually use them. So, part of your Kubernetes manifest involves exposing the application to internal and external networks by defining https://kubernetes.io/docs/concepts/services-networking/network-policies/[network policies] that allow fine-grained control over communications with your containerized applications. To connect incoming requests for HTTP, HTTPS, and other services from outside your cluster to services inside your cluster, you can use an https://kubernetes.io/docs/concepts/services-networking/ingress/[Ingress] resource.
If your container requires on-disk storage (as opposed to database storage, which might be provided through a service), you can add https://kubernetes.io/docs/concepts/storage/volumes/[volumes] to your manifests to make that storage available to your pods. Those manifests can create physical volumes (PVs), or set up to dynamically create volumes, that are added to your pod definitions.
Once you have defined a group of pods that go together to make up your application, you can define those pods in https://kubernetes.io/docs/concepts/workloads/controllers/deployment/[deployments] and https://docs.openshift.com/container-platform/4.1/applications/deployments/what-deployments-are.html[deploymentconfigs].
Your next set of choices has to do with how your application is run. To make that determination, you need to consider the nature of your application.
[id="deciding-application_{context}"]
== Deciding what kind of application
So far, you have seen the kind of elements (pods, services, ingress, and so on) that need to go into your manifest. This next step is to consider the character of your application. That consideration helps drive the additional choices you need to make in creating the Kubernetes manifest for your application.
Kubernetes defines different types of workloads that are appropriate for different kinds of applications. To determine the appropriate workload for your application, you might ask yourself whether the application is:
* Meant to run to completion and be done? An example is an application that starts up to produce a report and exits when the report is done. The application might not run again then for a month. Suitable {product-title} objects for these types of applications include https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/[Jobs] or https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/[CronJob] objects.
* Expected to run continuously? For long-running applications, you can write a https://docs.openshift.com/container-platform/4.1/applications/deployments/what-deployments-are.html%23deployments-kube-deployments_what-deployments-are[Deployment] or a https://docs.openshift.com/container-platform/4.1/applications/deployments/what-deployments-are.html%23deployments-and-deploymentconfigs_what-deployments-are[DeploymentConfig].
* Required to be highly available? If your application requires high availability, then you want to size your deployment to have more than one instance. A Deployment or DeploymentConfig can incorporate a https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[ReplicaSet] for that type of application. With ReplicaSets, pods are run across multiple nodes, to make sure the application is always available, even if a worker goes down.
* Need to run on every node? There are some types of Kubernetes applications that are meant to run in the cluster itself on every master or worker node. DNS and monitoring applications are examples of applications that need to run continuously on every node. This type of applications can be run a https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[DaemonSet]. A DaemonSet can also be run on a subset of nodes, based on labels.
* Require life-cycle management? When you want to hand off your application so that others can use it, consider creating an https://coreos.com/operators/[Operators]. Operators let you build in intelligence, so it can handle things like backups and upgrades automatically. Coupled with the Operator Lifecycle Manager (OLM), cluster managers can expose Operators to selected namespaces so they can be run by users in the cluster.
* Have identity or numbering requirements? An application might have identity requirements or numbering requirements. For example, I might be required to run exactly three instances of the application and be required to name them 0, 1, and 2. In that case, you could use a https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[StatefulSets]. StatefulSets are most useful for applications that require independent storages, such as databases and zookeeper clusters.
The application you write may need supporting components, like a database or a logging component. To fulfill that need, the developer might be able to just get the needed component from Catalogs available from {product-title}.
[id="supporting-components_{context}"]
== Gather supporting components
If you are creating an application that needs a database, you almost certainly will want to grab an available database and use it to develop and later deploy with your application. In the {product-title} web UI, there are Catalogs you can choose from to get the applications you need to use as you write your application. Here are your choices:
* OperatorHub: Every {product-title} {product-version}.1 cluster has the OperatorHub available. The OperatorHub makes Operators available from Red Hat, certified Red Hat partners, and community members to the cluster operator. The cluster operator can make those Operators available in all or selected namespaces in the cluster, so developers can launch them and configure them with their applications. More on Operators and the OperatorHub later.
* Service Catalog: Operators are the preferred method of getting packaged applications in {product-title}. However, there are some reasons why you might want to use the Service Catalog to get supporting applications for your own application. If you are an existing {product-title} 3 customer and you have already invested in Service Catalog applications or if you already have a Cloud Foundry environment from which you are interested in consuming brokers from other ecosystems.
* Templates: For a one-off type of application, where the lifecycle of a component is not important once it is installed, a template provides an easy way to get started developing a Kubernetes application with minimal overhead. A template can be a list of resource definitions, which could be deployments, services, routes, or other objects. if want to change names or resources, those items can be parameters in the template. +
There is a service broker for templates (Template Service Broker Operator). That template service broker lets you instantiate your own templates. You can also just  install templates directly from the command line.
If you find a supporting Operator, Service Catalog application, or template to use for your application development, a common practice is to configure it for the specific needs of your development team, then make it available in the namespaces in which your developers work. Many people add shared templates to the openshift namespace, as it is accessible from all other namespaces.
[id="manifest-creation-storage_{context}"]
== Create and store manifest
Kubernetes manifests let you create a more complete picture of the components that go into your Kubernetes applications. These manifests are written as yaml files and deployed by applying them to the cluster (for example, with the oc apply command).
At this point, you should be thinking more about ways to automate your container development process. Ideally, you would have some sort of CI pipeline that builds the images and pushes them to a registry. In particular, a GitOps pipeline integrates your container development with the git repositories being used to ultimately store the software needed to build your applications. The workflow to this point might look like:
* Day 1: You just write some yaml. You then run oc apply to apply that yaml to the cluster and test that it works.
* Day 2: You put your yaml container configuration file into your own git repository. From there, people who want to install that app, or help you improve it, can pull down that yaml and apply it to their cluster and they have the app running.
* Day 3: Consider writing an Operator for your application.

View File

@@ -1,26 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="defining-masters_{context}"]
= Defining masters
A master node is a Kubernetes concept, represented by systems that run services needed to control the Kubernetes cluster. Basically, they represent the brains of a Kubernetes cluster. Those same master nodes in {product-title} are referred to as control plane nodes, because they contain more than just the Kubernetes services for managing the {product-title} cluster.
Services that fall under the Kubernetes category on the master include the API server, etcd, controller manager server, and HAProxy services. Table 1 describes those services.
|=================================================================================================================================================================================================================================================================================================
|Component |Description
|API Server |The Kubernetes API server validates and configures the data for pods, services, and replication controllers. It also provides a focal point for clusters shared state.
|etcd |etcd stores the persistent master state while other components watch etcd for changes to bring themselves into the desired state. etcd can be optionally configured for high availability, typically deployed with 2n+1 peer services.
|Controller Manager Server |The controller manager server watches etcd for changes to objects such as replication, namespace, and serviceaccount controller objects, and then uses the API to enforce the desired state. Several such processes create a cluster with one active leader at a time.
|=================================================================================================================================================================================================================================================================================================
If you were to look inside your control plane nodes, you would see that some of the services on those nodes run as systemd services while others run as Kubernetes pods.
Systemd services are appropriate for services that you need to always come up on that particular system, and usually at an early point. For master nodes, those include sshd (to allow remote login). It also includes services such as:
* The CRI-O container engine (crio): This is the container engine that runs and manages the containers. In {product-title} {product-version}, this is used instead of the Docker Container Engine.
* Kubelet (kubelet): Accepts requests for managing containers on the node from master services.
The reason to have CRI-O and Kubelet running directly on the host as systemd services is that they need to be running before you can run other containers. Which comes to the next level of services running on masters: containerized services managed by Operators.

View File

@@ -1,8 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="defining-workers_{context}"]
= Defining workers
In a Kubernetes clusters, the workers are where the actual workloads requested by Kubernetes users are run and otherwise managed. {product-title} sometimes refers to these as compute nodes. The worker nodes advertise their capacity and the scheduler (part of the control plane/master services) decides where containers/pods get started. Important services running on each worker node include CRI-O (the container engine), Kubelet (which accepts and carries out requests for running and stopping container workloads), and a service proxy (to handle communications for pods across workers).

View File

@@ -1,14 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/understanding-openshift-development.adoc
[id="develop-for-operators_{context}"]
= Develop for Operators
Packaging and deploying your application as an Operator is a particularly good idea if you are offering that application to be run by others. As noted earlier, Operators add a lifecycle component to your application that acknowledges that the job of running an application is not done when it is installed.
When you create an application as an Operator, you can build in your own knowledge of how to run and maintain the application going forward. You can build in features for upgrading the application, backing it up, scaling it, or keeping track of the state of the application. If done properly, maintenance tasks, like updating the Operator, can happen automatically and invisibly to those using the Operator.
An example of a useful Operator is one that is set up to automatically backup data at particular times. Having an Operator manage an applications backup at set times can save a system administrator from remembering to do it.
The bottom line is that any time an application needs nourishment that has typically been handled by a person in real time, that nourishment (like backing up data or rotating certificates) can be done automatically with an Operator.

View File

@@ -5,9 +5,9 @@
[id="efk-logging-configuring-image-about_{context}"]
= Understanding the cluster logging component images
There are several components in cluster logging, each one implemented with one
or more images. Each image is specified by an environment variable
defined in the *cluster-logging-operator* deployment in the *openshift-logging* project and should not be changed.
There are several components in cluster logging, each one implemented with one
or more images. Each image is specified by an environment variable
defined in the *cluster-logging-operator* deployment in the *openshift-logging* project and should not be changed.
You can view the images by running the following command:
@@ -26,7 +26,7 @@ OAUTH_PROXY_IMAGE=quay.io/openshift/origin-oauth-proxy:latest <5>
<4> *CURATOR_IMAGE* deploys Curator.
<5> *OAUTH_PROXY_IMAGE* defines OAUTH for OpenShift Container Platform.
[NOTE]
[NOTE]
====
The values might be different depending on your environment.
====

View File

@@ -1,16 +1,9 @@
// Module included in the following assemblies:
//
// * authentication/understanding-authentication.adoc
[id="understanding-cluster-version-operator_{context}"]
= Understanding the Cluster Version Operator (CVO)
// *
The {product-title} Container Platform update service is the hosted service that provides over-the-air updates to both {product-title} Container Platform and {op-system-first}({op-system}). The Cluster Version Operator (CVO) in your cluster checks with the {product-title} Container Platform update service to see the valid updates and update paths, based on current component versions and information in the graph.
When you request an update, the {product-title} Container Platform CVO uses the release image for that update to upgrade your cluster. The release artifacts are hosted in Quay.io as container images. To allow the {product-title} Container Platform update service to provide only compatible updates, a release verification pipeline exists to drive automation.
Each release artifact is verified for compatibility with supported cloud platforms and system architectures as well as other component packages. After the pipeline confirms the suitability of a release, the {product-title} Container Platform update service can apply the update to your cluster or notify you that it is available.
During continuous update mode, two controllers run. One continuously updates the payload manifests, applies them to the cluster, and outputs the status of the controlled rollout of the Operators, whether they are available, upgrading, or failed. The second controller polls the {product-title} Container Platform update service to determine if updates are available.
[id="exploring-cvo_{context}"]
= Exploring the CVO
To see the current version that your cluster is on, type:

View File

@@ -1,7 +1,7 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
// * TBD
[id="following-installation_{context}"]
= Following an {product-title} Install

View File

@@ -1,42 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
//* architecture/installation-options.adoc
[id="installation-options_{context}"]
= Installation and update
// this is going to get deprecated when we merge the new arch guide
In {product-title} {product-version}, you can install only clusters that use
installer-provisioned infrastructure in Amazon Web Services (AWS).
These clusters use {op-system-first}
nodes as the operating system. Future versions of {product-title} will support
clusters that use both installer-provisioned infrastructure
and user-provisioned infrastructure on more cloud providers and on bare metal.
With all cluster types, you must use {op-system} as the operating system for
control plane machines.
////
If you want to
use any other cloud or install your cluster on-premise, use the bring your own
infrastructure option to install your cluster on existing Red Hat Enterprise
Linux (RHEL) hosts.
////
Using installer-provisioned infrastructure offers full-stack automation to:
* Manage compute
* Manage operating system ({op-system})
* Manage control plane
* Manage nodes
////
With the bring your own infrastructure option, you have more responsibilities.
You must provide the hosts and update RHEL on them. {product-title} provides:
* Managed control plane
* Ansible to manage kubelet and container runtime
////
Installation and upgrade both use an Operator
that constantly reconciles component versions as if it were any other Kubernetes
controller.

View File

@@ -1,15 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="kubernetes-introduction_{context}"]
= About Kubernetes
Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. The general concept of Kubernetes is fairly simple:
* Start with one or more worker nodes to run the container workloads
* Manage the deployment of those workloads from one or more master nodes
* Wrap containers in a deployment unit called a pod, to provide extra metadata with the container and possibly group several containers in a single deployment entity
* Create special kinds of assets. For example, services  are represented by a set of pods that and a policy that defines how they are accessed (so containers can connect to services that they need without needing to know IP addresses). Replication controllers indicate how many pod replicas should run at a time (allowing you to adapt to the amount of demand for an application).
In only a few years, Kubernetes has seen massive cloud and on-premise adoption. The open source development model has allowed many people to extend Kubernetes by implementing different networking, storage, authentication, and other technologies.

View File

@@ -0,0 +1,75 @@
// Module included in the following assemblies:
//
// * TBD
[id="machine-configs-and-pools_{context}"]
= Machine Configs and Machine Config Pools
Machine Config Pools manage a cluster of nodes and their corresponding
Machine Configs. Machine Configs contain configuration information for a
cluster.
To list all Machine Config Pools that are known:
----
$ oc get machineconfigpools
NAME CONFIG UPDATED UPDATING DEGRADED
master master-1638c1aea398413bb918e76632f20799 False False False
worker worker-2feef4f8288936489a5a832ca8efe953 False False False
----
To list all Machine Configs:
----
$ oc get machineconfig
NAME GENERATEDBYCONTROLLER IGNITIONVERSION CREATED OSIMAGEURL
00-master 4.0.0-0.150.0.0-dirty 2.2.0 16m
00-master-ssh 4.0.0-0.150.0.0-dirty 16m
00-worker 4.0.0-0.150.0.0-dirty 2.2.0 16m
00-worker-ssh 4.0.0-0.150.0.0-dirty 16m
01-master-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m
01-worker-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m
master-1638c1aea398413bb918e76632f20799 4.0.0-0.150.0.0-dirty 2.2.0 16m
worker-2feef4f8288936489a5a832ca8efe953 4.0.0-0.150.0.0-dirty 2.2.0 16m
----
To list all KubeletConfigs:
----
$ oc get kubeletconfigs
----
To get more detailed information about a KubeletConfig, including the reason for
the current condition:
----
$ oc describe kubeletconfig <name>
----
For example:
----
# oc describe kubeletconfig set-max-pods
Name: set-max-pods <1>
Namespace:
Labels: <none>
Annotations: <none>
API Version: machineconfiguration.openshift.io/v1
Kind: KubeletConfig
Metadata:
Creation Timestamp: 2019-02-05T16:27:20Z
Generation: 1
Resource Version: 19694
Self Link: /apis/machineconfiguration.openshift.io/v1/kubeletconfigs/set-max-pods
UID: e8ee6410-2962-11e9-9bcc-664f163f5f0f
Spec:
Kubelet Config: <2>
Max Pods: 100
Machine Config Pool Selector: <3>
Match Labels:
Custom - Kubelet: small-pods
Events: <none>
----
<1> The name of the KubeletConfig.
<2> The user defined configuration.
<3> The Machine Config Pool selector to apply the KubeletConfig to.

View File

@@ -1,123 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="node-management_{context}"]
= Node management in {product-title}
{product-title} version 4.0 integrates management of
the container operating system and cluster management. Because the cluster manages
its updates, including updates to {op-system-first} on cluster nodes, {product-title} provides an opinionated
lifecycle management experience that simplifies the orchestration of upgrades.
{product-title} employs three DaemonSets and controllers to simplify node management:
* The `machine-config-controller` coordinates machine upgrades.
* The `machine-config-daemon` DaemonSet is a subset of the Ignition configuration that
applies the specified machine configuration and controls kubelet configuration.
* The `machine-config-server` DaemonSet provides the Ignition config to new hosts.
These tools orchestrate operating system updates and configuration changes to
the hosts by using standard Kubernetes-style constructs. A `machine-config-daemon`
DaemonSet runs on each machine in the cluster and watches for changes in
the machine configuration for it to apply. The machine configuration is a subset
of the Ignition configuration. The `machine-config-daemon` reads the machine configuration to see
if it needs to do an OSTree update, if it should apply a series of systemd
kubelet file changes, configuration changes, or other changes to the
operating system or {product-title} configuration.
The masters also run the `machine-config-controller` process that monitors all of the cluster nodes
and orchestrates their configuration updates. So if you try to apply
an update or configuration change to a node on the cluster, the `machine-config-controller`
directs a node to update. The node sees that it needs to change, drains off its
pods, applies the update, and reboots. This process is key to the success of
managing {product-title} and {op-system} updates together.
The `machine-config-server` provides configurations to nodes as they join the
cluster. It orchestrates configuration to nodes and changes to the operating system
and is used in both cluster installation and node maintenance. The
`machine-config-server` components upgrade the operating system and controls the Ignition
configuration for nodes.
////
The `bootkube` process calls the `machine-config-server` component when the
{product-title} installer bootstraps the initial master node. After installation,
the `machine-config-server` runs in the cluster. It reads the `machine-config`
Custom Resource Definitions (CRDs) and serves the required Ignition configurations
to new nodes when they join the cluster.
////
When you perform node management operations, you will be creating or
modifying a KubeletConfig Custom Resource (CR).
[id="machine-configs-and-pools_{context}"]
== Machine Configs and Machine Config Pools
Machine Config Pools manage a cluster of nodes and their corresponding
Machine Configs. Machine Configs contain configuration information for a
cluster.
To list all Machine Config Pools that are known:
----
$ oc get machineconfigpools
NAME CONFIG UPDATED UPDATING DEGRADED
master master-1638c1aea398413bb918e76632f20799 False False False
worker worker-2feef4f8288936489a5a832ca8efe953 False False False
----
To list all Machine Configs:
----
$ oc get machineconfig
NAME GENERATEDBYCONTROLLER IGNITIONVERSION CREATED OSIMAGEURL
00-master 4.0.0-0.150.0.0-dirty 2.2.0 16m
00-master-ssh 4.0.0-0.150.0.0-dirty 16m
00-worker 4.0.0-0.150.0.0-dirty 2.2.0 16m
00-worker-ssh 4.0.0-0.150.0.0-dirty 16m
01-master-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m
01-worker-kubelet 4.0.0-0.150.0.0-dirty 2.2.0 16m
master-1638c1aea398413bb918e76632f20799 4.0.0-0.150.0.0-dirty 2.2.0 16m
worker-2feef4f8288936489a5a832ca8efe953 4.0.0-0.150.0.0-dirty 2.2.0 16m
----
To list all KubeletConfigs:
----
$ oc get kubeletconfigs
----
To get more detailed information about a KubeletConfig, including the reason for
the current condition:
----
$ oc describe kubeletconfig <name>
----
For example:
----
# oc describe kubeletconfig set-max-pods
Name: set-max-pods <1>
Namespace:
Labels: <none>
Annotations: <none>
API Version: machineconfiguration.openshift.io/v1
Kind: KubeletConfig
Metadata:
Creation Timestamp: 2019-02-05T16:27:20Z
Generation: 1
Resource Version: 19694
Self Link: /apis/machineconfiguration.openshift.io/v1/kubeletconfigs/set-max-pods
UID: e8ee6410-2962-11e9-9bcc-664f163f5f0f
Spec:
Kubelet Config: <2>
Max Pods: 100
Machine Config Pool Selector: <3>
Match Labels:
Custom - Kubelet: small-pods
Events: <none>
----
<1> The name of the KubeletConfig.
<2> The user defined configuration.
<3> The Machine Config Pool selector to apply the KubeletConfig to.

View File

@@ -1,23 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="node-roles_{context}"]
= Node roles in {product-title}
{product-title} assigns hosts different roles. These roles define the function
of the node within the cluster. The cluster contains standard definitions for
standard role types, such as bootstrap, master, and worker.
A node with the bootstrap role
provides the initial configuration to clusters and is used only during initial
configuration.
Nodes with the master role run the cluster
infrastructure and required components. Instead of being grouped into a `MachineSet`,
they are a series of standalone machine API resources. Extra controls apply to
master nodes to prevent you from deleting all master nodes and breaking your
cluster.
Nodes with the worker role drive compute workloads. Each type of worker node is
governed by a specific machine pool that autoscales them.

View File

@@ -5,15 +5,32 @@
[id="operators-overview_{context}"]
= Operators in {product-title}
{product-title} {product-version} uses different classes of Operators to perform cluster
operations and run services on the cluster for your applications to use.
In {product-title}, Operators are the preferred method of packaging, deploying,
and managing services on the control plane. They also provide advantages to
applications that users run. Operators integrate with
Kubernetes APIs and CLI tools such as `kubectl` and `oc` commands. They provide
the means of watching over an application, performing health checks, managing
over-the-air updates, and ensuring that the applications remain in your
specified state.
Because CRI-O and the Kubelet run on every node, almost every other cluster
function can be managed on the control plane by using Operators. Operators are
among the most important components of {product-title} {product-version}.
Components that are added to the control plane by using Operators include
critical networking and credential services.
The Operator that manages the other Operators in an {product-title} cluster is
the Cluster Version Operator.
{product-title} {product-version} uses different classes of Operators to perform
cluster operations and run services on the cluster for your applications to use.
ifdef::openshift-enterprise,openshift-origin[]
[id="platform-operators_{context}"]
== Platform Operators in {product-title}
In {product-title} {product-version}, all cluster functions are divided into a series
of platform Operators. Platform operators manage a particular area of
of platform Operators. Platform Operators manage a particular area of
cluster functionality, such as cluster-wide application logging, management of
the Kubernetes control plane, or the machine provisioning system.
@@ -25,30 +42,6 @@ Operators also offer a more granular configuration experience. You configure eac
component by modifying the API that the Operator exposes instead of modifying a
global configuration file.
In {product-title} {product-version}, all control plane components are run and managed as
applications on the infrastructure to ensure a uniform and consistent management
experience. The control plane services run as static pods so they can
manage normal workloads or processes the same way that they manage disaster
recovery. Aside from the core control plane components, other services run as
normal pods on the cluster, managed by regular Kubernetes constructs. Unlike in the past
where the `kubelet` could be running as containerized or non-containerized, the `kubelet`
always runs as a `systemd` process.
[id="second-level-operators_{context}"]
== Second-level Operators in {product-title}
The Cluster Version Operator, when we talk about payload manifests, is a
second-level Operator, the Operators that actually manage {product-title} as if
it were a native Kubernetes application. Second-level Operators are not a
codified concept, but the namespace where your code exists, the service accounts
or roles the second-level Operator runs as, the
link:https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions[Custom Resource Definition] (CRD)
and pull secret that drives the operation of the Operator, and the Operator deployment.
Second-level Operators write out to a CRD resource called the ClusterOperator
that allows the Cluster Version Operator to understand the progress of the
managed component's deployment.
endif::[]
[id="OLM-operators_{context}"]

View File

@@ -1,28 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="platform-benefits_{context}"]
= The {product-title} in brief
Red Hat was one of the early contributors of Kubernetes and quickly integrated it as the centerpiece of its {product-title} product line. Today, Red Hat continues as one of the largest contributors to Kubernetes across a wide range of technology areas. Red Hat aims to make {product-title} the premiere choice of enterprise-ready Kubernetes platforms by bringing these enhancements:
* Hybrid cloud deployments. Red Hat is cloud agnostic when it comes to {product-title}. The goal is to allow {product-title} in on-premise installations, as well as with a variety of public cloud platforms.
* Red Hat technology. Major components in {product-title} come from Red Hat Enterprise Linux and related Red Hat technologies. It leverages the intense testing and certification initiatives that go into all of Red Hats enterprise quality software.
* Open source development model. Development is done in the open, with source code available from public software repositories. This allows for rapid innovation and development.
Because Kubernetes doesnt dictate the operating systems it runs on or the way that the code is deployed, where and how Kubernetes-based platforms are managed is critically important. {product-title} {product-version} brings together a powerful set of features for installing, managing, upgrading, and using your Kubernetes-based cluster. Its components include:
* Simplified installation: {product-title} {product-version} aims at integrating with different cloud providers to allow someone with a particular cloud account to spin up an {product-title} cluster by just answering a few questions.
* Simplified upgrades: By locking down the systems and services running on each {product-title} node, and managing them from a central control plane, upgrades are designed to become automatic events. The goal is to, in fact, make upgrades almost non-events.
* {op-system-first}: is a new container-oriented operating system that leverages the combined talents of CoreOS and Red Hat Atomic Host development teams. {op-system} is specifically designed for running containerized applications from {product-title} and works with new tools to provide fast installation, Operator-based management, and simplified upgrades.
* Operators: Operators help {product-title} replace manual upgrades of operating systems and control plane applications, {product-title} Operators such as the Cluster Version Operator and Machine Config Operator  allow simplified, cluster-wide management of those critical components. Operator Lifecycle Manager (OLM) and the OperatorHub provide facilities for storing and distributing Operators to people developing and deploying applications.
* CRI-O Container Engine: This streamlined container engine is developed in tandem with Kubernetes releases, to provide facilities for running, stopping, and restarting containers. It fully replaces the Docker Container Engine in {product-title} {product-version}.
* Ignition: A firstboot system configuration for initially bringing up and configuring {product-title} nodes.
* Red Hat Quay Container Registry:  Most of the container images and Operators consumed by {product-title} are served from the Quay.io container registry.  Quay.io is a public registry version of Red Hat Quay, with millions of images and tags stored in it.
* Many other features: Other enhancements to Kubernetes in {product-title} include improvements in software defined networking (SDN), authentication, log aggregation, monitoring, routing, and web console and command line (oc) interfaces.
The following figure illustrates the basic flow of creating an {product-title} cluster, managing the cluster, developing and deploying applications, then scaling up those applications as needed.
.High level {product-title} overview
image::overview.png[High-level {product-title} flow]

View File

@@ -1,7 +1,8 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="running-modified_{context}"]
// * TBD
[id="running-modified-installation_{context}"]
= Running a modified {product-title} installation
Running a default {product-title} {product-version} cluster is the best way to ensure that the {product-title} cluster you get will be easy to install, maintain, and upgrade going forward. However, because you may want to add to or change your {product-title} cluster, openshift-install offers several ways to modify the default installation or add to it later. These include:

View File

@@ -1,6 +1,7 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
// * TBD
[id="running-simple-installation_{context}"]
= Running a simple {product-title} installation

View File

@@ -1,9 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="telemetry-service-overview_{context}"]
= The Telemetry service
In a managed Red Hat environment, Telemetry is the component that provides
metrics about cluster health and the success of updates.

View File

@@ -1,7 +1,12 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="understanding-openshift-control-plane_{context}"]
[id="understanding-control-plane_{context}"]
= Understanding the {product-title} control plane
To the cluster administrator, the view of the inner workings of an {product-title} cluster goes through the control plane. The nodes in the control plane are master nodes that manage workloads on worker nodes. All upgrades to those nodes are handled centrally from the cluster itself through the Cluster Version Operator, the Machine Config Operator, and set of individual Operators.
The control plane, which is comprised of master machines, manages the
{product-title} cluster. The control plane machines manage workloads on the
compute, or worker, machines. The cluster itself manages all upgrades to the
machines by the actions of the Cluster Version Operator, the
Machine Config Operator, and set of individual Operators.

View File

@@ -1,6 +1,7 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
// * TBD
[id="understanding-installation_{context}"]
= Understanding {product-title} installation

View File

@@ -4,14 +4,37 @@
[id="understanding-machine-config-operator_{context}"]
= Understanding the Machine Config Operator
{product-title} Container Platform version {product-version} integrates both operating system and cluster management. Because the cluster manages its own updates, including updates to {op-system-first}({op-system}) on cluster nodes, {product-title} Container Platform provides an opinionated lifecycle management experience that simplifies the orchestration of node upgrades.
{product-title} {product-version} integrates both
operating system and cluster management. Because the cluster manages its own
updates, including updates to {op-system-first} on cluster nodes,
{product-title} provides an opinionated lifecycle management
experience that simplifies the orchestration of node upgrades.
{product-title} Container Platform employs three DaemonSets and controllers to simplify node management. These DaemonSets orchestrate operating system updates and configuration changes to the hosts by using standard Kubernetes-style constructs. They include:
{product-title} employs three DaemonSets and controllers to
simplify node management. These DaemonSets orchestrate operating system updates
and configuration changes to the hosts by using standard Kubernetes-style
constructs. They include:
* Machine-config-controller: Coordinates machine upgrades from the control plane. It monitors all of the cluster nodes and orchestrates their configuration updates.
* Machine-config-daemon DaemonSet: A machine-config-daemon DaemonSet runs on each node in the cluster and updates a machine to configuration defined by MachineConfig as instructed by the MachineConfigController. When the node sees a change, it drains off its pods, applies the update, and reboots. These changes come in the form of Ignition configuration files that apply the specified machine configuration and control kubelet configuration. The update itself is delivered in a container. This process is key to the success of managing {product-title} Container Platform and {op-system} updates together.
* Machine-config-server DaemonSet: Provides the Ignition configs to master nodes as they join the cluster.
* The `machine-config-controller`, which coordinates machine upgrades from the control
plane. It monitors all of the cluster nodes and orchestrates their configuration
updates.
* The `machine-config-daemon` DaemonSet, which runs on
each node in the cluster and updates a machine to configuration defined by
MachineConfig as instructed by the MachineConfigController. When the node sees
a change, it drains off its pods, applies the update, and reboots. These changes
come in the form of Ignition configuration files that apply the specified
machine configuration and control kubelet configuration. The update itself is
delivered in a container. This process is key to the success of managing
{product-title} and {op-system} updates together.
* The `machine-config-server` DaemonSet, which provides the Ignition config files
to master nodes as they join the cluster.
The machine configuration is a subset of the Ignition configuration. The machine-config-daemon reads the machine configuration to see if it needs to do an OSTree update, if it should apply a series of systemd kubelet file changes, configuration changes, or other changes to the operating system or {product-title} Container Platform configuration.
The machine configuration is a subset of the Ignition configuration. The
`machine-config-daemon` reads the machine configuration to see if it needs to do
an OSTree update or if it must apply a series of systemd kubelet file changes,
configuration changes, or other changes to the operating system or {product-title}
configuration.
When you perform node management operations, you will be creating or modifying a Kubelet Config Custom Resource (CR). See https://github.com/openshift/machine-config-operator/blob/master/docs/KubeletConfigDesign.md[KubeletConfigDesign] for details.
When you perform node management operations, you create or modify a
KubeletConfig Custom Resource (CR).
//See https://github.com/openshift/machine-config-operator/blob/master/docs/KubeletConfigDesign.md[KubeletConfigDesign] for details.

View File

@@ -1,9 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="understanding-node-roles_{context}"]
= Understanding node roles
{product-title} Container Platform assigns hosts different roles. These roles define the function of the node within the cluster. The cluster contains standard definitions for standard role types, such as   master and worker. The bootstrap role does not exist outside of the initial installation.
Worker nodes are controlled by MachineSets. Instead of being grouped into a MachineSet, master nodes are defined by a series of standalone machine API resources. Extra controls apply to master nodes to prevent you from deleting all master nodes and breaking your cluster. Nodes with the worker role drive compute workloads. Each type of worker node is governed by a specific machine pool that autoscales them.

View File

@@ -1,13 +0,0 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
[id="understanding-operators_{context}"]
= Understanding Operators on the control plane
In {product-title}, Operators are the preferred method of packaging, deploying and managing services on the control plane, as well as providing those same advantages to applications running in user space. Operators integrate with Kubernetes APIs and CLI tools such as kubectl and oc commands. They provide the means of watching over an application, doing health checks, managing over-the-air updates, and making sure that the applications remains in the applications chosen state.
With CRI-O and the Kubelet running on every node, almost everything else can be managed on the control plane using Operators. Operators are among the most important components of {product-title} {product-version}. Components added to the control plane using operators include critical networking pieces and credential services.
The Operator overseeing Operators in an {product-title} cluster is the Cluster Version Operator.
To learn more about Operators, see xref:../applications/operators/olm-what-operators-are.adoc[What Operators are].

View File

@@ -1,6 +1,7 @@
// Module included in the following assemblies:
//
// * architecture/architecture.adoc
// *
[id="understanding-workers-masters_{context}"]
= Understanding {product-title} workers and masters

View File

@@ -4,7 +4,7 @@
// * upgrading/upgrading-cluster.adoc
[id="update-service-overview_{context}"]
= About the {product-title} update service
= About the {product-title} update service and Cluster Version Operator
The {product-title} update service is the hosted service that provides over-the-air
updates to both {product-title} and {op-system-first}. It provides a graph,
@@ -19,10 +19,6 @@ on current component versions and information in the graph. When you request an
update, the {product-title} CVO uses the release image for that update to
upgrade your cluster. The release artifacts are hosted in Quay as container
images.
////
By accepting automatic updates, you can automatically
keep your cluster up to date with the most recent compatible components.
////
To allow the {product-title} update service to provide only compatible updates,
a release verification pipeline exists to drive automation. Each release
@@ -31,16 +27,6 @@ architectures as well as other component packages. After the pipeline confirms
the suitability of a release, the {product-title} update service can apply the
update to your cluster or notify you that it is available.
////
The interaction between the registry and the {product-title} update service is different during
bootstrap and continuous update modes. When you bootstrap the initial
infrastructure, the Cluster Version Operator finds
the fully qualified image name for the shortname of the images that it needs to
apply to the server during installation. It looks at the imagestream that it needs
to apply and renders it to disk. It calls bootkube and waits for a temporary minimal control
plane to come up and load the Cluster Version Operator.
////
During continuous update mode, two controllers run. One continuously updates
the payload manifests, applies them to the cluster, and outputs the status of
the controlled rollout of the Operators, whether they are available, upgrading,