1
0
mirror of https://github.com/openshift/openshift-docs.git synced 2026-02-05 12:46:18 +01:00

Merge pull request #101287 from openshift-cherrypick-robot/cherry-pick-101051-to-enterprise-4.21

[enterprise-4.21] CTEditor-z-to-the-r: Add missing module type tags to files starting w…
This commit is contained in:
Darragh Fitzmaurice
2025-10-29 11:55:32 +00:00
committed by GitHub
114 changed files with 262 additions and 256 deletions

View File

@@ -1,5 +1,6 @@
[id="contributing-to-docs-tools-and-setup"]
= Install and set up the tools and software
:icons:
:toc: macro
:toc-title:

View File

@@ -2,6 +2,7 @@
//
// * post_installation_configuration/node-tasks.adoc
:_mod-docs-content-type: REFERENCE
[id="recommended-node-host-practices_{context}"]
= Recommended node host practices

View File

@@ -2,6 +2,7 @@
//
// * scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc
:_mod-docs-content-type: CONCEPT
[id="recommended-scale-practices_{context}"]
= Recommended practices for scaling the cluster

View File

@@ -2,6 +2,7 @@
//
// * applications/red-hat-marketplace.adoc
:_mod-docs-content-type: CONCEPT
[id="red-hat-marketplace-features_{context}"]
= Red Hat Marketplace features

View File

@@ -2,6 +2,7 @@
//
// * registry/index.adoc
:_mod-docs-content-type: CONCEPT
[id="registry-authentication-enabled-registry-overview_{context}"]
= Authentication enabled Red Hat registry

View File

@@ -3,6 +3,7 @@
//* registry/registry-options
//* registry/index.adoc
:_mod-docs-content-type: CONCEPT
[id="registry-integrated-openshift-registry_{context}"]
= Integrated {product-registry}

View File

@@ -2,6 +2,7 @@
//
// * registry/configuring-registry-storage-aws-user-infrastructure.adoc
:_mod-docs-content-type: REFERENCE
[id="registry-operator-configuration-resource-overview-aws-s3_{context}"]
= Image Registry Operator configuration parameters for AWS S3

View File

@@ -2,6 +2,7 @@
//
// * openshift_images/configuring-registry-operator.adoc
:_mod-docs-content-type: REFERENCE
[id="registry-operator-configuration-resource-overview-gcp-gcs_{context}"]
= Image Registry Operator configuration parameters for {gcp-short} GCS

View File

@@ -2,6 +2,7 @@
//
// * registry/configuring_registry_storage/configuring-registry-storage-openstack-user-infrastructure.adoc
:_mod-docs-content-type: REFERENCE
[id="registry-operator-configuration-resource-overview-openstack-swift_{context}"]
= Image Registry Operator configuration parameters for {rh-openstack} Swift

View File

@@ -2,7 +2,7 @@
//
// * openshift_images/configuring-registry-operator.adoc
:_mod-docs-content-type: REFERENCE
[id="registry-operator-distribution-across-availability-zones_{context}"]
= Image Registry Operator distribution across availability zones

View File

@@ -2,6 +2,7 @@
//
// * registry/index.adoc
:_mod-docs-content-type: CONCEPT
[id="registry-quay-overview_{context}"]
= {quay} registries

View File

@@ -13,6 +13,7 @@
// * registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc
// * registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc
:_mod-docs-content-type: CONCEPT
[id="registry-removed_{context}"]
= Image registry removed during installation

View File

@@ -2,6 +2,7 @@
//
// * windows_containers/scheduling-windows-workloads.adoc
:_mod-docs-content-type: REFERENCE
[id="sample-windows-workload-deployment_{context}"]
= Sample Windows container workload deployment

View File

@@ -2,7 +2,7 @@
//
// * openshift_images/configuring_samples_operator.adoc
:_mod-docs-content-type: REFERENCE
[id="samples-operator-configuration_{context}"]
= Cluster Samples Operator configuration parameters

View File

@@ -2,6 +2,7 @@
//
// updating/updating_a_cluster/updating-hardware-on-nodes-running-in-vsphere.adoc
:_mod-docs-content-type: CONCEPT
[id="scheduling-virtual-hardware-update-on-vsphere_{context}"]
= Scheduling an update for virtual hardware on vSphere

View File

@@ -2,6 +2,7 @@
//
// * osd_architecture/osd_policy/osd-service-definition.adoc
:_mod-docs-content-type: CONCEPT
[id="sdpolicy-logging_{context}"]
= Logging
{product-title} provides optional integrated log forwarding to Amazon CloudWatch (on AWS) or {gcp-full} Logging (on {gcp-short}).

View File

@@ -2,6 +2,7 @@
//
// * osd_architecture/osd_policy/osd-service-definition.adoc
:_mod-docs-content-type: CONCEPT
[id="sdpolicy-monitoring_{context}"]
= Monitoring

View File

@@ -2,6 +2,7 @@
//
// * osd_architecture/osd_policy/osd-service-definition.adoc
:_mod-docs-content-type: CONCEPT
[id="sdpolicy-networking_{context}"]
= Networking

View File

@@ -3,6 +3,7 @@
//
// * osd_architecture/osd_policy/osd-service-definition.adoc
:_mod-docs-content-type: REFERENCE
[id="sdpolicy-platform_{context}"]
= Platform

View File

@@ -2,6 +2,7 @@
//
// * osd_architecture/osd_policy/osd-service-definition.adoc
:_mod-docs-content-type: CONCEPT
[id="sdpolicy-security_{context}"]
= Security

View File

@@ -3,6 +3,7 @@
//
// * osd_architecture/osd_policy/osd-service-definition.adoc
:_mod-docs-content-type: CONCEPT
[id="sdpolicy-storage_{context}"]
= Storage

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-build.adoc
:_mod-docs-content-type: CONCEPT
[id="security-build-designing_{context}"]
= Designing your build process

View File

@@ -2,21 +2,18 @@
//
// * security/container_security/security-build.adoc
:_mod-docs-content-type: PROCEDURE
[id="security-build-inputs_{context}"]
= Securing inputs during builds
In some scenarios, build operations require credentials to access dependent
resources, but it is undesirable for those credentials to be available in the
final application image produced by the build. You can define input secrets for
this purpose.
In some scenarios, build operations require credentials to access dependent resources, but it is undesirable for those credentials to be available in the final application image produced by the build. You can define input secrets for this purpose.
For example, when building a Node.js application, you can set up your private
mirror for Node.js modules. To download modules from that private
mirror, you must supply a custom `.npmrc` file for the build that contains
a URL, user name, and password. For security reasons, you do not want to expose
your credentials in the application image.
For example, when building a Node.js application, you can set up your private mirror for Node.js modules. To download modules from that private mirror, you must supply a custom `.npmrc` file for the build that contains
a URL, user name, and password. For security reasons, you do not want to expose your credentials in the application image.
Using this example scenario, you can add an input secret to a new `BuildConfig` object:
Using this example scenario, you can add an input secret to a new `BuildConfig` object.
.Procedure
. Create the secret, if it does not exist:
+
@@ -25,8 +22,7 @@ Using this example scenario, you can add an input secret to a new `BuildConfig`
$ oc create secret generic secret-npmrc --from-file=.npmrc=~/.npmrc
----
+
This creates a new secret named `secret-npmrc`, which contains the base64
encoded content of the `~/.npmrc` file.
This creates a new secret named `secret-npmrc`, which contains the base64 encoded content of the `~/.npmrc` file.
. Add the secret to the `source` section in the existing `BuildConfig` object:
+

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-build.adoc
:_mod-docs-content-type: CONCEPT
[id="security-build-knative_{context}"]
= Building Knative serverless applications

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-build.adoc
:_mod-docs-content-type: CONCEPT
[id="security-build-management_{context}"]
= Managing builds

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-build.adoc
:_mod-docs-content-type: CONCEPT
[id="security-build-once_{context}"]
= Building once, deploying everywhere

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-container-content.adoc
:_mod-docs-content-type: REFERENCE
[id="security-container-content-external-scanning_{context}"]
= Integrating external scanning

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-container-content.adoc
:_mod-docs-content-type: CONCEPT
[id="security-container-content-inside_{context}"]
= Securing inside the container

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-container-content.adoc
:_mod-docs-content-type: CONCEPT
[id="security-container-content-scanning_{context}"]
= Security scanning in {op-system-base}

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-container-content.adoc
:_mod-docs-content-type: CONCEPT
[id="security-container-content-universal_{context}"]
= Creating redistributable images with UBI

View File

@@ -2,12 +2,10 @@
//
// * security/container_security/security-deploy.adoc
:_mod-docs-content-type: CONCEPT
[id="security-deploy-continuous_{context}"]
= Automating continuous deployment
You can integrate your own continuous deployment (CD) tooling with
{product-title}.
You can integrate your own continuous deployment (CD) tooling with {product-title}.
By leveraging CI/CD and {product-title}, you can automate the process of
rebuilding the application to incorporate the latest fixes, testing, and
ensuring that it is deployed everywhere within the environment.
By leveraging CI/CD and {product-title}, you can automate the process of rebuilding the application to incorporate the latest fixes, testing, and ensuring that it is deployed everywhere within the environment.

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-deploy.adoc
:_mod-docs-content-type: REFERENCE
[id="security-deploy-image-sources_{context}"]
= Controlling what image sources can be deployed

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-deploy.adoc
:_mod-docs-content-type: CONCEPT
[id="security-deploy-signature_{context}"]
= Using signature transports

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-deploy.adoc
:_mod-docs-content-type: CONCEPT
[id="security-deploy-trigger_{context}"]
= Controlling container deployments with triggers

View File

@@ -2,8 +2,8 @@
//
// * security/container_security/security-hardening.adoc
:_mod-docs-content-type: CONCEPT
[id="security-hardening-how_{context}"]
= Choosing how to harden {op-system}
Direct modification of {op-system} systems in {product-title} is discouraged. Instead, you should think of modifying systems in pools of nodes, such as worker nodes and control plane nodes. When a new node is needed, in non-bare metal installs, you can request a new node of the type you want and it will be created from an {op-system} image plus the modifications you created earlier.

View File

@@ -2,8 +2,8 @@
//
// * security/container_security/security-hardening.adoc
:_mod-docs-content-type: CONCEPT
[id="security-hardening-what_{context}"]
= Choosing what to harden in {op-system}
ifdef::openshift-origin[]
For information on how to approach security for any {op-system-base} system, see the link:https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9#Security[Security] category in the Red{nbsp}Hat Enterprise Linux 9 documentation.

View File

@@ -2,8 +2,10 @@
//
// * security/container_security/security-hosts-vms.adoc
:_mod-docs-content-type: CONCEPT
[id="security-hosts-vms-openshift_{context}"]
= Securing {product-title}
When you deploy {product-title}, you have the choice of an
installer-provisioned infrastructure (there are several available platforms)
or your own user-provisioned infrastructure.

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-hosts-vms.adoc
:_mod-docs-content-type: CONCEPT
[id="security-hosts-vms-rhcos_{context}"]
= Securing containers on {op-system-first}

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-hosts-vms.adoc
:_mod-docs-content-type: CONCEPT
[id="security-hosts-vms-vs-containers_{context}"]
= Comparing virtualization and containers

View File

@@ -2,33 +2,24 @@
//
// * security/container_security/security-platform.adoc
:_mod-docs-content-type: CONCEPT
[id="security-platform-admission_{context}"]
= Protecting control plane with admission plugins
While RBAC controls access rules between users and groups and available projects,
_admission plugins_ define access to the {product-title} master API.
Admission plugins form a chain of rules that consist of:
While RBAC controls access rules between users and groups and available projects, _admission plugins_ define access to the {product-title} master API. Admission plugins form a chain of rules that consist of:
* Default admissions plugins: These implement a default set of
policies and resources limits that are applied to components of the {product-title}
control plane.
* Default admissions plugins: These implement a default set of policies and resources limits that are applied to components of the {product-title} control plane.
* Mutating admission plugins: These plugins dynamically extend the admission chain.
They call out to a webhook server and can both authenticate a request and modify the selected resource.
* Mutating admission plugins: These plugins dynamically extend the admission chain. They call out to a webhook server and can both authenticate a request and modify the selected resource.
* Validating admission plugins: These validate requests for a selected resource
and can both validate the request and ensure that the resource does not change again.
* Validating admission plugins: These validate requests for a selected resource and can both validate the request and ensure that the resource does not change again.
API requests go through admissions plugins in a chain, with any failure along
the way causing the request to be rejected. Each admission plugin is associated with particular resources and only
responds to requests for those resources.
API requests go through admissions plugins in a chain, with any failure along the way causing the request to be rejected. Each admission plugin is associated with particular resources and only responds to requests for those resources.
[id="security-deployment-sccs_{context}"]
== Security context constraints (SCCs)
You can use _security context constraints_ (SCCs) to define a set of conditions
that a pod must run with to be accepted
into the system.
You can use _security context constraints_ (SCCs) to define a set of conditions that a pod must run with to be accepted into the system.
Some aspects that can be managed by SCCs include:
@@ -38,21 +29,17 @@ Some aspects that can be managed by SCCs include:
- SELinux context of the container
- Container user ID
If you have the required permissions, you can adjust the default SCC policies to
be more permissive, if required.
If you have the required permissions, you can adjust the default SCC policies to be more permissive, if required.
[id="security-service-account_{context}"]
== Granting roles to service accounts
You can assign roles to service accounts, in the same way that
users are assigned role-based access.
There are three default service accounts created for each project.
A service account:
You can assign roles to service accounts, in the same way that users are assigned role-based access.
There are three default service accounts created for each project. A service account:
* is limited in scope to a particular project
* derives its name from its project
* is automatically assigned an API token and credentials to access the
OpenShift Container Registry
Service accounts associated with platform components automatically
have their keys rotated.
Service accounts associated with platform components automatically have their keys rotated.

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-platform.adoc
:_mod-docs-content-type: CONCEPT
[id="security-platform-authentication_{context}"]
= Authentication and authorization

View File

@@ -2,13 +2,11 @@
//
// * security/container_security/security-platform.adoc
:_mod-docs-content-type: CONCEPT
[id="security-platform-certificates_{context}"]
= Managing certificates for the platform
{product-title} has multiple components within its framework that use REST-based
HTTPS communication leveraging encryption via TLS certificates.
{product-title}'s installer configures these certificates during
installation. There are some primary components that generate this traffic:
{product-title} has multiple components within its framework that use REST-based HTTPS communication leveraging encryption via TLS certificates. {product-title}'s installer configures these certificates during installation. There are some primary components that generate this traffic:
* masters (API server and controllers)
* etcd
@@ -19,6 +17,4 @@ installation. There are some primary components that generate this traffic:
[id="security-platform-config-custom-certs_{context}"]
== Configuring custom certificates
You can configure custom serving certificates for the public hostnames of the
API server and web console during initial installation or when redeploying
certificates. You can also use a custom CA.
You can configure custom serving certificates for the public hostnames of the API server and web console during initial installation or when redeploying certificates. You can also use a custom CA.

View File

@@ -2,34 +2,21 @@
//
// * security/container_security/security-platform.adoc
:_mod-docs-content-type: CONCEPT
[id="security-platform-multi-tenancy_{context}"]
= Isolating containers with multitenancy
Multitenancy allows applications on an {product-title} cluster that are owned
by multiple users, and run across multiple hosts and namespaces,
to remain isolated from each other and from outside attacks.
You obtain multitenancy by applying role-based access control (RBAC)
Multitenancy allows applications on an {product-title} cluster that are owned by multiple users, and run across multiple hosts and namespaces,
to remain isolated from each other and from outside attacks. You obtain multitenancy by applying role-based access control (RBAC)
to Kubernetes namespaces.
In Kubernetes, _namespaces_ are areas where applications can run
in ways that are separate from other applications.
{product-title} uses and extends namespaces by adding extra
annotations, including MCS labeling in SELinux, and identifying
these extended namespaces as _projects_. Within the scope of
a project, users can maintain their own cluster resources,
including service accounts, policies, constraints,
and various other objects.
In Kubernetes, _namespaces_ are areas where applications can run in ways that are separate from other applications. {product-title} uses and extends namespaces by adding extra annotations, including MCS labeling in SELinux, and identifying these extended namespaces as _projects_. Within the scope of a project, users can maintain their own cluster resources, including service accounts, policies, constraints, and various other objects.
RBAC objects are assigned to projects to authorize selected users
to have access to those projects. That authorization takes the form
of rules, roles, and bindings:
RBAC objects are assigned to projects to authorize selected users to have access to those projects. That authorization takes the form of rules, roles, and bindings:
* Rules define what a user can create or access in a project.
* Rules define what a user can create or access in a project.
* Roles are collections of rules that you can bind to selected users or groups.
* Bindings define the association between users or groups and roles.
Local RBAC roles and bindings attach a user or group to a
particular project. Cluster RBAC can attach cluster-wide roles and bindings
to all projects in a cluster. There are default
cluster roles that can be assigned to provide `admin`, `basic-user`, `cluster-admin`,
and `cluster-status` access.
Local RBAC roles and bindings attach a user or group to a particular project. Cluster RBAC can attach cluster-wide roles and bindings
to all projects in a cluster. There are default cluster roles that can be assigned to provide `admin`, `basic-user`, `cluster-admin`, and `cluster-status` access.

View File

@@ -2,40 +2,20 @@
//
// * security/container_security/security-registries.adoc
:_mod-docs-content-type: CONCEPT
[id="security-registries-ecosystem_{context}"]
= Getting containers from Red Hat Registry and Ecosystem Catalog
Red Hat lists certified container images for Red Hat products and partner offerings from the
link:https://catalog.redhat.com/software/containers/explore[Container Images]
section of the Red Hat Ecosystem Catalog. From that catalog,
you can see details of each image, including CVE, software packages listings, and health
scores.
Red Hat lists certified container images for Red Hat products and partner offerings from the link:https://catalog.redhat.com/software/containers/explore[Container Images] section of the Red Hat Ecosystem Catalog. From that catalog, you can see details of each image, including CVE, software packages listings, and health scores.
Red Hat images are actually stored in what is referred to as the _Red Hat Registry_,
which is represented by a public container registry (`registry.access.redhat.com`)
and an authenticated registry (`registry.redhat.io`).
Both include basically the same set of container images, with
`registry.redhat.io` including some additional images that require authentication
with Red Hat subscription credentials.
Red Hat images are actually stored in what is referred to as the _Red Hat Registry_, which is represented by a public container registry (`registry.access.redhat.com`) and an authenticated registry (`registry.redhat.io`). Both include basically the same set of container images, with
`registry.redhat.io` including some additional images that require authentication with Red Hat subscription credentials.
Container content is monitored for vulnerabilities by Red Hat and updated
regularly. When Red Hat releases security updates, such as fixes to _glibc_,
link:https://access.redhat.com/security/vulnerabilities/drown[DROWN], or
link:https://access.redhat.com/blogs/766093/posts/2757141[Dirty Cow],
any affected container images are also rebuilt and pushed
to the Red Hat Registry.
Container content is monitored for vulnerabilities by Red Hat and updated regularly. When Red Hat releases security updates, such as fixes to _glibc_, link:https://access.redhat.com/security/vulnerabilities/drown[DROWN], or link:https://access.redhat.com/blogs/766093/posts/2757141[Dirty Cow], any affected container images are also rebuilt and pushed to the Red Hat Registry.
Red Hat uses a `health index` to reflect the security risk for each container provided through
the Red Hat Ecosystem Catalog. Because containers consume software provided by Red
Hat and the errata process, old, stale containers are insecure whereas new,
fresh containers are more secure.
Red Hat uses a `health index` to reflect the security risk for each container provided through the Red Hat Ecosystem Catalog. Because containers consume software provided by Red Hat and the errata process, old, stale containers are insecure whereas new, fresh containers are more secure.
To illustrate the age of containers, the Red Hat Ecosystem Catalog uses a
grading system. A freshness grade is a measure of the oldest and most severe
security errata available for an image. "A" is more up to date than "F". See
link:https://access.redhat.com/articles/2803031[Container Health Index grades as used inside the Red Hat Ecosystem Catalog] for more details on this grading system.
To illustrate the age of containers, the Red Hat Ecosystem Catalog uses a grading system. A freshness grade is a measure of the oldest and most severe security errata available for an image. "A" is more up to date than "F". See link:https://access.redhat.com/articles/2803031[Container Health Index grades as used inside the Red Hat Ecosystem Catalog] for more details on this grading system.
See the link:https://access.redhat.com/security/[Red Hat Product Security Center]
for details on security updates and vulnerabilities related to Red Hat software.
Check out link:https://access.redhat.com/security/security-updates/#/security-advisories[Red Hat Security Advisories]
See the link:https://access.redhat.com/security/[Red Hat Product Security Center] for details on security updates and vulnerabilities related to Red Hat software. Check out link:https://access.redhat.com/security/security-updates/#/security-advisories[Red Hat Security Advisories]
to search for specific advisories and CVEs.

View File

@@ -2,15 +2,11 @@
//
// * security/container_security/security-registries.adoc
:_mod-docs-content-type: CONCEPT
[id="security-registries-immutable_{context}"]
= Immutable and certified containers
Consuming security updates is particularly important when managing _immutable
containers_. Immutable containers are containers that will never be changed
while running. When you deploy immutable containers, you do not step into the
running container to replace one or more binaries. From an operational
standpoint, you rebuild and redeploy an updated container image
to replace a container instead of changing it.
Consuming security updates is particularly important when managing _immutable containers_. Immutable containers are containers that will never be changed while running. When you deploy immutable containers, you do not step into the running container to replace one or more binaries. From an operational standpoint, you rebuild and redeploy an updated container image to replace a container instead of changing it.
Red Hat certified images are:
@@ -18,11 +14,5 @@ Red Hat certified images are:
* Compatible across the {op-system-base} platforms, from bare metal to cloud
* Supported by Red Hat
The list of known vulnerabilities is constantly evolving, so you must track the
contents of your deployed container images, as well as newly downloaded images,
over time. You can use
link:https://access.redhat.com/security/security-updates/#/security-advisories[Red Hat Security Advisories (RHSAs)]
to alert you to any newly discovered issues in
Red Hat certified container images, and direct you to the updated image.
Alternatively, you can go to the Red Hat Ecosystem Catalog
to look up that and other security-related issues for each Red Hat image.
The list of known vulnerabilities is constantly evolving, so you must track the contents of your deployed container images, as well as newly downloaded images, over time. You can use link:https://access.redhat.com/security/security-updates/#/security-advisories[Red Hat Security Advisories (RHSAs)] to alert you to any newly discovered issues in Red Hat certified container images, and direct you to the updated image.
Alternatively, you can go to the Red Hat Ecosystem Catalog to look up that and other security-related issues for each Red Hat image.

View File

@@ -2,13 +2,10 @@
//
// * security/container_security/security-registries.adoc
:_mod-docs-content-type: CONCEPT
[id="security-registries-openshift_{context}"]
= OpenShift Container Registry
{product-title} includes the _OpenShift Container Registry_, a private registry
running as an integrated component of the platform that you can use to manage your container
images. The OpenShift Container Registry provides role-based access controls
that allow you to manage who can pull and push which container images.
{product-title} includes the _OpenShift Container Registry_, a private registry running as an integrated component of the platform that you can use to manage your container images. The OpenShift Container Registry provides role-based access controls that allow you to manage who can pull and push which container images.
{product-title} also supports integration with other private registries that you might
already be using, such as {quay}.
{product-title} also supports integration with other private registries that you might already be using, such as {quay}.

View File

@@ -2,8 +2,10 @@
//
// * security/container_security/security-registries.adoc
:_mod-docs-content-type: CONCEPT
[id="security-registries-quay_{context}"]
= Storing containers using {quay}
link:https://access.redhat.com/products/red-hat-quay[{quay}] is an
enterprise-quality container registry product from Red Hat.
Development for {quay} is done through the upstream

View File

@@ -2,10 +2,8 @@
//
// * security/container_security/security-registries.adoc
:_mod-docs-content-type: CONCEPT
[id="security-registries-where_{context}"]
= Knowing where containers come from?
There are tools you can use to scan and track the contents of your downloaded
and deployed container images. However, there are many public sources of
container images. When using public container registries, you can add a layer of
protection by using trusted sources.
There are tools you can use to scan and track the contents of your downloaded and deployed container images. However, there are many public sources of container images. When using public container registries, you can add a layer of protection by using trusted sources.

View File

@@ -2,10 +2,8 @@
//
// * security/container_security/security-storage.adoc
:_mod-docs-content-type: CONCEPT
[id="security-network-storage-block_{context}"]
= Block storage
For block storage providers like AWS Elastic Block Store (EBS), GCE Persistent
Disks, and iSCSI, {product-title} uses SELinux capabilities to secure the root
of the mounted volume for non-privileged pods, making the mounted volume owned
by and only visible to the container with which it is associated.
For block storage providers like AWS Elastic Block Store (EBS), GCE Persistent Disks, and iSCSI, {product-title} uses SELinux capabilities to secure the root of the mounted volume for non-privileged pods, making the mounted volume owned by and only visible to the container with which it is associated.

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-storage.adoc
:_mod-docs-content-type: REFERENCE
[id="security-network-storage-persistent_{context}"]
= Persistent volume plugins

View File

@@ -2,10 +2,9 @@
//
// * security/container_security/security-storage.adoc
:_mod-docs-content-type: CONCEPT
[id="security-network-storage-shared_{context}"]
= Shared storage
For shared storage providers like NFS, the PV registers its
group ID (GID) as an annotation on the PV resource. Then, when the PV is claimed
by the pod, the annotated GID is added to the supplemental groups of the pod,
giving that pod access to the contents of the shared storage.
group ID (GID) as an annotation on the PV resource. Then, when the PV is claimed by the pod, the annotated GID is added to the supplemental groups of the pod, giving that pod access to the contents of the shared storage.

View File

@@ -2,6 +2,7 @@
//
// * security/container_security/security-understanding.adoc
:_mod-docs-content-type: CONCEPT
[id="security-understanding-containers_{context}"]
= What are containers?

View File

@@ -2,28 +2,19 @@
//
// * security/container_security/security-understanding.adoc
:_mod-docs-content-type: CONCEPT
[id="security-understanding-openshift_{context}"]
= What is {product-title}?
Automating how containerized applications are deployed, run, and managed is the
job of a platform such as {product-title}. At its core, {product-title} relies
on the Kubernetes project to provide the engine for orchestrating containers
Automating how containerized applications are deployed, run, and managed is the job of a platform such as {product-title}. At its core, {product-title} relies on the Kubernetes project to provide the engine for orchestrating containers
across many nodes in scalable data centers.
Kubernetes is a project, which can run using different operating systems
and add-on components that offer no guarantees of supportability from the project.
As a result, the security of different Kubernetes platforms can vary.
Kubernetes is a project, which can run using different operating systems and add-on components that offer no guarantees of supportability from the project. As a result, the security of different Kubernetes platforms can vary.
{product-title} is designed to lock down Kubernetes security and integrate
the platform with a variety of extended components. To do this,
{product-title} draws on the extensive Red Hat ecosystem of open source
technologies that include the operating systems, authentication, storage,
networking, development tools, base container images, and many other
components.
{product-title} is designed to lock down Kubernetes security and integrate the platform with a variety of extended components. To do this,
{product-title} draws on the extensive Red Hat ecosystem of open source technologies that include the operating systems, authentication, storage,
networking, development tools, base container images, and many other components.
{product-title} can leverage Red Hat's experience in uncovering
and rapidly deploying fixes for vulnerabilities in the platform itself
as well as the containerized applications running on the platform.
Red Hat's experience also extends to efficiently integrating new
components with {product-title} as they become available and
adapting technologies to individual customer needs.
{product-title} can leverage Red Hat's experience in uncovering and rapidly deploying fixes for vulnerabilities in the platform itself
as well as the containerized applications running on the platform. Red Hat's experience also extends to efficiently integrating new
components with {product-title} as they become available and adapting technologies to individual customer needs.

View File

@@ -2,7 +2,7 @@
//
// * serverless/knative-serving/config-applications/restrictive-cluster-policies.adoc
:_mod-docs-content-type: Concept
:_mod-docs-content-type: CONCEPT
[id="serverless-services-network-policies_{context}"]
= Clusters with restrictive network policies

View File

@@ -2,6 +2,7 @@
//
// * authentication/using-service-accounts-as-oauth-client.adoc
:_mod-docs-content-type: CONCEPT
[id="service-accounts-as-oauth-clients_{context}"]
= Service accounts as OAuth clients

View File

@@ -2,6 +2,7 @@
//
// * authentication/using-service-accounts.adoc
:_mod-docs-content-type: REFERENCE
[id="service-accounts-default_{context}"]
= Default service accounts

View File

@@ -2,13 +2,16 @@
//
// * authentication/using-service-accounts.adoc
:_mod-docs-content-type: PROCEDURE
[id="service-accounts-granting-roles_{context}"]
= Granting roles to service accounts
You can grant roles to service accounts in the same way that you grant roles
to a regular user account.
* You can modify the service accounts for the current project. For example, to add
.Procedure
. You can modify the service accounts for the current project. For example, to add
the `view` role to the `robot` service account in the `top-secret` project:
+
[source,terminal]
@@ -38,10 +41,9 @@ subjects:
----
====
* You can also grant access to a specific service account in a project. For
. You can also grant access to a specific service account in a project. For
example, from the project to which the service account belongs, use
the `-z` flag and specify the `<service_account_name>`
+
[source,terminal]
----
@@ -77,9 +79,9 @@ subjects:
----
====
* To modify a different namespace, you can use the `-n` option to indicate the
. To modify a different namespace, you can use the `-n` option to indicate the
project namespace it applies to, as shown in the following examples.
+
** For example, to allow all service accounts in all projects to view resources in
the `my-project` project:
+
@@ -109,7 +111,7 @@ subjects:
name: system:serviceaccounts
----
====
+
** To allow all service accounts in the `managers` project to edit resources in the
`my-project` project:
+

View File

@@ -4,6 +4,7 @@
//* storage/understanding-ephemeral-storage.adoc
//* microshift_storage/understanding-ephemeral-storage-microshift.adoc
:_mod-docs-content-type: CONCEPT
[id=storage-ephemeral-storage-manage_{context}]
= Ephemeral storage management

View File

@@ -3,21 +3,23 @@
// storage/understanding-persistent-storage.adoc[leveloffset=+1]
//* microshift_storage/understanding-ephemeral-storage-microshift.adoc
:_mod-docs-content-type: PROCEDURE
[id=storage-ephemeral-storage-monitoring_{context}]
= Monitoring ephemeral storage
You can use `/bin/df` as a tool to monitor ephemeral storage usage on the volume where ephemeral container data is located, which is `/var/lib/kubelet` and `/var/lib/containers`. The available space for only `/var/lib/kubelet` is shown when you use the `df` command if `/var/lib/containers` is placed on a separate disk by the cluster administrator.
To show the human-readable values of used and available space in `/var/lib`, enter the following command:
.Procedure
* To show the human-readable values of used and available space in `/var/lib`, enter the following command:
+
[source,terminal]
----
$ df -h /var/lib
----
+
The output shows the ephemeral storage usage in `/var/lib`:
+
.Example output
[source,terminal]
----

View File

@@ -3,7 +3,7 @@
// storage/understanding-persistent-storage.adoc[leveloffset=+1]
//* microshift_storage/understanding-ephemeral-storage-microshift.adoc
:_mod-docs-content-type: REFERENCE
[id=storage-ephemeral-storage-types_{context}]
= Types of ephemeral storage
@@ -14,19 +14,9 @@ partition: root and runtime.
== Root
This partition holds the kubelet root directory, `/var/lib/kubelet/` by
default, and `/var/log/` directory. This partition can be shared between user
pods, the OS, and Kubernetes system daemons. This partition can be consumed by pods
through `EmptyDir` volumes, container logs, image layers, and container-writable
layers. Kubelet manages shared access and isolation of this partition. This
partition is ephemeral, and applications cannot expect any performance SLAs,
such as disk IOPS, from this partition.
This partition holds the kubelet root directory, `/var/lib/kubelet/` by default, and `/var/log/` directory. This partition can be shared between user pods, the OS, and Kubernetes system daemons. This partition can be consumed by pods through `EmptyDir` volumes, container logs, image layers, and container-writable layers. Kubelet manages shared access and isolation of this partition. This partition is ephemeral, and applications cannot expect any performance SLAs, such as disk IOPS, from this partition.
== Runtime
This is an optional partition that runtimes can use for overlay
file systems. {product-title} attempts to identify and provide
shared access along with isolation to this partition. Container image
layers and writable layers are stored here. If the runtime partition
exists, the `root` partition does not hold any image layer or other writable storage.
This is an optional partition that runtimes can use for overlay file systems. {product-title} attempts to identify and provide shared access along with isolation to this partition. Container image layers and writable layers are stored here. If the runtime partition exists, the `root` partition does not hold any image layer or other writable storage.

View File

@@ -2,6 +2,7 @@
//
// * storage/persistent_storage-aws.adoc
:_mod-docs-content-type: REFERENCE
[id="maximum-number-of-ebs-volumes-on-a-node_{context}"]
= Maximum number of EBS volumes on a node

View File

@@ -5,16 +5,13 @@
// This module should only be present in openshift-enterprise and
// openshift-origin distributions.
:_mod-docs-content-type: REFERENCE
[id="block-volume-support_{context}"]
= Block volume support
{product-title} can statically provision raw block volumes. These volumes
do not have a file system, and can provide performance benefits for
applications that either write to the disk directly or implement their own
storage service.
{product-title} can statically provision raw block volumes. These volumes do not have a file system, and can provide performance benefits for applications that either write to the disk directly or implement their own storage service.
Raw block volumes are provisioned by specifying `volumeMode: Block` in the
PV and PVC specification.
Raw block volumes are provisioned by specifying `volumeMode: Block` in the PV and PVC specification.
[IMPORTANT]
====

View File

@@ -2,14 +2,18 @@
//
// * storage/persistent_storage-iscsi.adoc
:_mod-docs-content-type: PROCEDURE
[id="iscsi-custom-iqn_{context}"]
= iSCSI custom initiator IQN
Configure the custom initiator iSCSI Qualified Name (IQN) if the iSCSI
targets are restricted to certain IQNs, but the nodes that the iSCSI PVs
Configure the custom initiator iSCSI Qualified Name (IQN) if the iSCSI targets are restricted to certain IQNs, but the nodes that the iSCSI PVs
are attached to are not guaranteed to have these IQNs.
To specify a custom initiator IQN, use `initiatorName` field.
.Procedure
* To specify a custom initiator IQN, update the `initiatorName` field in the `PersistentVolume` defintion object .
.Example PersistentVolume object with a value specified in the initiatorName field.
[source,yaml]
----
apiVersion: v1

View File

@@ -2,12 +2,12 @@
//
// * storage/persistent_storage-iscsi.adoc
:_mod-docs-content-type: CONCEPT
[id="enforcing-disk-quotas-iscsi_{context}"]
= Enforcing disk quotas
Use LUN partitions to enforce disk quotas and size constraints. Each LUN
is one persistent volume. Kubernetes enforces unique names for persistent
= Enforce disk quotas
Use LUN partitions to enforce disk quotas and size constraints. Each LUN is one persistent volume. Kubernetes enforces unique names for persistent
volumes.
Enforcing quotas in this way allows the end user to request persistent
storage by a specific amount (for example, `10Gi`) and be matched with a
Enforcing quotas in this way allows the user to request persistent storage by a specific amount (for example, `10Gi`) and be matched with a
corresponding volume of equal or greater capacity.

View File

@@ -2,16 +2,18 @@
//
// * storage/persistent_storage-iscsi.adoc
:_mod-docs-content-type: PROCEDURE
[id="iscsi-multipath_{context}"]
= iSCSI multipathing
For iSCSI-based storage, you can configure multiple paths by using the
same IQN for more than one target portal IP address. Multipathing ensures
access to the persistent volume when one or more of the components in a
path fail.
To specify multi-paths in the pod specification, use the `portals` field.
For example:
For iSCSI-based storage, you can configure multiple paths by using the same IQN for more than one target portal IP address. Multipathing ensures
access to the persistent volume when one or more of the components in a path fail.
.Procedure
* To specify multi-paths in the pod specification, specify a value in the `portals` field of the `PersistentVolume` defintion object.
.Example PersistentVolume object with a value specified in the portals field.
[source,yaml]
----
apiVersion: v1

View File

@@ -2,12 +2,15 @@
//
// * storage/persistent_storage-iscsi.adoc
:_mod-docs-content-type: REFERENCE
[id="persistent-storage-iscsi-provisioning_{context}"]
= Provisioning
Verify that the storage exists in the underlying infrastructure before
mounting it as a volume in {product-title}. All that is required for the
iSCSI is the iSCSI target portal, a valid iSCSI Qualified Name (IQN),
a valid LUN number, the filesystem type, and the `PersistentVolume` API.
You can verify that the storage exists in the underlying infrastructure before mounting it as a volume in {product-title}. All that is required for the iSCSI is the iSCSI target portal, a valid iSCSI Qualified Name (IQN), a valid LUN number, the filesystem type, and the `PersistentVolume` API.
.Procedure
* Verify that the storage exists in the underlying infrastructure before mounting it as a volume in {product-title} by creating the following .`PersistentVolume` object definition:
.`PersistentVolume` object definition
[source,yaml]

View File

@@ -2,6 +2,7 @@
//
// * storage/persistent_storage-iscsi.adoc
:_mod-docs-content-type: REFERENCE
[id="volume-security-iscsi_{context}"]
= iSCSI volume security
Users request storage with a `PersistentVolumeClaim` object. This claim only

View File

@@ -3,7 +3,7 @@
// * storage/understanding-persistent-storage.adoc
//* microshift_storage/understanding-persistent-storage-microshift.adoc
:_mod-docs-content-type: CONCEPT
[id=lifecycle-volume-claim_{context}]
= Lifecycle of a volume and claim

View File

@@ -2,6 +2,8 @@
//
// * storage/persistent_storage/persistent-storage-nfs.adoc
:_mod-docs-content-type: REFERENCE
[id="additional-config-troubleshooting_{context}"]
= Additional configuration and troubleshooting
Depending on what version of NFS is being used and how it is configured,

View File

@@ -2,8 +2,9 @@
//
// * storage/persistent_storage/persistent-storage-nfs.adoc
:_mod-docs-content-type: CONCEPT
[id="nfs-enforcing-disk-quota_{context}"]
= Enforcing disk quotas
= Enforce disk quotas
You can use disk partitions to enforce disk quotas and size constraints.
Each partition can be its own export. Each export is one PV.

View File

@@ -2,6 +2,7 @@
//
// * storage/persistent_storage/persistent-storage-nfs.adoc
:_mod-docs-content-type: REFERENCE
[id="nfs-reclaiming-resources_{context}"]
= Reclaiming resources
NFS implements the {product-title} `Recyclable` plugin interface. Automatic

View File

@@ -2,6 +2,7 @@
//
// * storage/persistent_storage/persistent-storage-nfs.aodc
:_mod-docs-content-type: REFERENCE
[id="nfs-user-id_{context}"]
= User IDs

View File

@@ -2,23 +2,18 @@
//
// * storage/persistent_storage/persistent-storage-nfs.adoc
:_mod-docs-content-type: REFERENCE
[id="nfs-volume-security_{context}"]
= NFS volume security
This section covers NFS volume security, including matching permissions and
SELinux considerations. The user is expected to understand the basics of
POSIX permissions, process UIDs, supplemental groups, and SELinux.
This section covers NFS volume security, including matching permissions and SELinux considerations. The user is expected to understand the basics of POSIX permissions, process UIDs, supplemental groups, and SELinux.
Developers request NFS storage by referencing either a PVC by name or the
NFS volume plugin directly in the `volumes` section of their `Pod`
Developers request NFS storage by referencing either a PVC by name or the NFS volume plugin directly in the `volumes` section of their `Pod`
definition.
The `/etc/exports` file on the NFS server contains the accessible NFS
directories. The target NFS directory has POSIX owner and group IDs. The
{product-title} NFS plugin mounts the container's NFS directory with the
same POSIX ownership and permissions found on the exported NFS directory.
However, the container is not run with its effective UID equal to the
owner of the NFS mount, which is the desired behavior.
The `/etc/exports` file on the NFS server contains the accessible NFS directories. The target NFS directory has POSIX owner and group IDs. The
{product-title} NFS plugin mounts the container's NFS directory with the same POSIX ownership and permissions found on the exported NFS directory.
However, the container is not run with its effective UID equal to the owner of the NFS mount, which is the desired behavior.
As an example, if the target NFS directory appears on the NFS server as:
@@ -43,13 +38,10 @@ $ id nfsnobody
uid=65534(nfsnobody) gid=65534(nfsnobody) groups=65534(nfsnobody)
----
Then the container must match SELinux labels, and either run with a UID of
`65534`, the `nfsnobody` owner, or with `5555` in its supplemental groups to access the directory.
Then the container must match SELinux labels, and either run with a UID of `65534`, the `nfsnobody` owner, or with `5555` in its supplemental groups to access the directory.
[NOTE]
====
The owner ID of `65534` is used as an example. Even though NFS's
`root_squash` maps `root`, uid `0`, to `nfsnobody`, uid `65534`, NFS
exports can have arbitrary owner IDs. Owner `65534` is not required
for NFS exports.
The owner ID of `65534` is used as an example. Even though NFS's `root_squash` maps `root`, uid `0`, to `nfsnobody`, uid `65534`, NFS
exports can have arbitrary owner IDs. Owner `65534` is not required for NFS exports.
====

View File

@@ -3,6 +3,7 @@
// * storage/understanding-persistent-storage.adoc
//* microshift_storage/understanding-persistent-storage-microshift.adoc
:_mod-docs-content-type: REFERENCE
[id="persistent-volumes_{context}"]
= Persistent volumes

View File

@@ -3,11 +3,14 @@
// * storage/understanding-persistent-storage.adoc
//* microshift_storage/understanding-persistent-storage-microshift.adoc
:_mod-docs-content-type: PROCEDURE
[id="reclaim-policy_{context}"]
= Changing the reclaim policy of a persistent volume
To change the reclaim policy of a persistent volume:
You can change the reclaim policy of a persistent volume.
.Procedure
. List the persistent volumes in your cluster:
+
@@ -32,7 +35,6 @@ NAME CAPACITY ACCESSMODES RECLAIMPOLIC
$ oc patch pv <your-pv-name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
----
+
. Verify that your chosen persistent volume has the right policy:
+
[source,terminal]

View File

@@ -8,16 +8,13 @@
// * storage/persistent_storage-aws.adoc
// * storage/persistent_storage-gce.adoc
:_mod-docs-content-type: CONCEPT
[id="volume-format-{provider}_{context}"]
= Volume format
Before {product-title} mounts the volume and passes it to a container, it
checks that the volume contains a file system as specified by the `fsType`
parameter in the persistent volume definition. If the device is not
formatted with the file system, all data from the device is erased and the
device is automatically formatted with the given file system.
This verification enables you to use unformatted {provider} volumes as persistent volumes,
because {product-title} formats them before the first use.
Before {product-title} mounts the volume and passes it to a container, it checks that the volume contains a file system as specified by the `fsType` arameter in the persistent volume definition. If the device is not formatted with the file system, all data from the device is erased and the device is automatically formatted with the given file system.
This verification enables you to use unformatted {provider} volumes as persistent volumes, because {product-title} formats them before the first use.
// Undefined {provider} attribute, so that any mistakes are easily spotted
:!provider:

View File

@@ -2,21 +2,26 @@
//
// * support/troubleshooting/troubleshooting-s2i.adoc
:_mod-docs-content-type: PROCEDURE
[id="strategies-for-s2i-troubleshooting_{context}"]
= Strategies for Source-to-Image troubleshooting
Use Source-to-Image (S2I) to build reproducible, Docker-formatted container images. You can create ready-to-run images by injecting application source code into a container image and assembling a new image. The new image incorporates the base image (the builder) and built source.
To determine where in the S2I process a failure occurs, you can observe the state of the pods relating to each of the following S2I stages:
.Procedure
. *During the build configuration stage*, a build pod is used to create an application container image from a base image and application source code.
. To determine where in the S2I process a failure occurs, you can observe the state of the pods relating to each of the following S2I stages:
+
.. *During the build configuration stage*, a build pod is used to create an application container image from a base image and application source code.
+
.. *During the deployment configuration stage*, a deployment pod is used to deploy application pods from the application container image that was built in the build configuration stage. The deployment pod also deploys other resources such as services and routes. The deployment configuration begins after the build configuration succeeds.
+
.. *After the deployment pod has started the application pods*, application failures can occur within the running application pods. For instance, an application might not behave as expected even though the application pods are in a `Running` state. In this scenario, you can access running application pods to investigate application failures within a pod.
. *During the deployment configuration stage*, a deployment pod is used to deploy application pods from the application container image that was built in the build configuration stage. The deployment pod also deploys other resources such as services and routes. The deployment configuration begins after the build configuration succeeds.
. *After the deployment pod has started the application pods*, application failures can occur within the running application pods. For instance, an application might not behave as expected even though the application pods are in a `Running` state. In this scenario, you can access running application pods to investigate application failures within a pod.
When troubleshooting S2I issues, follow this strategy:
. Monitor build, deployment, and application pod status
. Determine the stage of the S2I process where the problem occurred
. Review logs corresponding to the failed stage
. When troubleshooting S2I issues, follow this strategy:
+
.. Monitor build, deployment, and application pod status.
+
.. Determine the stage of the S2I process where the problem occurred.
+
.. Review logs corresponding to the failed stage.

View File

@@ -2,6 +2,7 @@
//
// * support/gathering-cluster-data.adoc
:_mod-docs-content-type: REFERENCE
[id="support-network-trace-methods_{context}"]
= Network trace methods

View File

@@ -20,6 +20,7 @@
// * distr_tracing/distr_tracing_rn/distr-tracing-rn-3-0.adoc
// * microshift_support/microshift-getting-support.adoc
:_mod-docs-content-type: REFERENCE
[id="support_{context}"]
= Getting support

View File

@@ -2,6 +2,7 @@
//
// * openshift_images/using-templates.adoc
:_mod-docs-content-type: CONCEPT
[id="templates-quickstart_{context}"]
= Quick start templates

View File

@@ -2,6 +2,7 @@
//
// * openshift_images/using-templates.adoc
:_mod-docs-content-type: CONCEPT
[id="templates-using-the-cli_{context}"]
= Creating objects from templates by using the CLI

View File

@@ -2,22 +2,33 @@
//
// * openshift_images/using-templates.adoc
:_mod-docs-content-type: PROCEDURE
[id="templates-waiting-for-readiness_{context}"]
= Waiting for template readiness
Template authors can indicate that certain objects within a template should be waited for before a template instantiation by the service catalog, {tsb-name}, or `TemplateInstance` API is considered complete.
To use this feature, mark one or more objects of kind `Build`, `BuildConfig`, `Deployment`, `DeploymentConfig`, `Job`, or `StatefulSet` in a template with the following annotation:
Before starting the procedure, read the following considerations:
* Set memory, CPU, and storage default sizes to make sure your application is given enough resources to run smoothly.
* Avoid referencing the `latest` tag from images if that tag is used across major versions. This can cause running applications to break when new images are pushed to that tag.
* A good template builds and deploys cleanly without requiring modifications after the template is deployed.
.Procedure
* To use the template feature, mark one or more objects of kind `Build`, `BuildConfig`, `Deployment`, `DeploymentConfig`, `Job`, or `StatefulSet` in a template with the following annotation:
+
[source,text]
----
"template.alpha.openshift.io/wait-for-ready": "true"
----
+
Template instantiation is not complete until all objects marked with the annotation report ready. Similarly, if any of the annotated objects report failed, or if the template fails to become ready within a fixed timeout of one hour, the template instantiation fails.
+
For the purposes of instantiation, readiness and failure of each object kind are defined as follows:
+
[cols="1a,2a,2a", options="header"]
|===
@@ -50,9 +61,9 @@ For the purposes of instantiation, readiness and failure of each object kind are
the object.
| Not applicable.
|===
+
The following is an example template extract, which uses the `wait-for-ready` annotation. Further examples can be found in the {product-title} quick start templates.
+
[source,yaml]
----
kind: Template
@@ -86,10 +97,3 @@ objects:
...
----
.Additional recommendations
* Set memory, CPU, and storage default sizes to make sure your application is given enough resources to run smoothly.
* Avoid referencing the `latest` tag from images if that tag is used across major versions. This can cause running applications to break when new images are pushed to that tag.
* A good template builds and deploys cleanly without requiring modifications after the template is deployed.

View File

@@ -2,6 +2,7 @@
//
// * openshift_images/using-templates.adoc
:_mod-docs-content-type: REFERENCE
[id="templates-writing-description_{context}"]
= Writing the template description

View File

@@ -2,6 +2,7 @@
//
// * openshift_images/using-templates.adoc
:_mod-docs-content-type: REFERENCE
[id="templates-writing-labels_{context}"]
= Writing template labels

View File

@@ -2,6 +2,7 @@
//
// * openshift_images/using-templates.adoc
:_mod-docs-content-type: REFERENCE
[id="templates-writing-object-list_{context}"]
= Writing the template object list

View File

@@ -2,6 +2,7 @@
//
// * openshift_images/using-templates.adoc
:_mod-docs-content-type: CONCEPT
[id="templates-writing-parameters_{context}"]
= Writing template parameters

View File

@@ -2,6 +2,7 @@
//
// * openshift_images/using-templates.adoc
:_mod-docs-content-type: REFERENCE
[id="templates-writing_{context}"]
= Writing templates

View File

@@ -3,6 +3,7 @@
// * service_mesh/v1x/threescale_adapter/threescale-adapter.adoc
// * service_mesh/v2x/threescale_adapter/threescale-adapter.adoc
:_mod-docs-content-type: CONCEPT
[id="threescale-backend-cache_{context}"]
= 3scale backend cache

View File

@@ -3,6 +3,7 @@
// * service_mesh/v1x/threescale_adapter/threescale-adapter.adoc
// * service_mesh/v2x/threescale_adapter/threescale-adapter.adoc
:_mod-docs-content-type: CONCEPT
[id="threescale-istio-adapter-apicast_{context}"]
= 3scale Istio Adapter APIcast emulation

View File

@@ -3,6 +3,7 @@
// * scaling_and_performance/using-topology-manager.adoc
// * post_installation_configuration/node-tasks.adoc
:_mod-docs-content-type: REFERENCE
[id="topology-manager-policies_{context}"]
= Topology Manager policies

View File

@@ -2,6 +2,7 @@
//
// * support/troubleshooting/troubleshooting-operator-issues.adoc
:_mod-docs-content-type: CONCEPT
[id="troubleshooting-disabling-autoreboot-mco_{context}"]
= Disabling the Machine Config Operator from automatically rebooting

View File

@@ -3,8 +3,8 @@
// * updating/understanding_updates/understanding-update-channels-release.adoc
:_mod-docs-content-type: REFERENCE
[id="understanding-update-channels_{context}"]
= Update channels
ifndef::openshift-origin[]

View File

@@ -4,6 +4,7 @@
// * updating/updating-cluster-within-minor.adoc
// * observability/logging/cluster-logging-support.adoc
:_mod-docs-content-type: REFERENCE
[id="unmanaged-operators_{context}"]
= Support policy for unmanaged Operators

View File

@@ -2,6 +2,7 @@
//
// * updating/preparing_for_updates/updating-cluster-prepare.adoc
:_mod-docs-content-type: PROCEDURE
[id="update-preparing-evaluate-alerts_{context}"]
= Reviewing alerts to identify uses of removed APIs
@@ -10,6 +11,10 @@ Two alerts fire when an API is in use that will be removed in the next release:
* `APIRemovedInNextReleaseInUse` - for APIs that will be removed in the next {product-title} release.
* `APIRemovedInNextEUSReleaseInUse` - for APIs that will be removed in the next {product-title} Extended Update Support (EUS) release.
If either of these alerts are firing in your cluster, review the alerts and take action to clear the alerts by migrating manifests and API clients to use the new API version.
.Procedure
Use the `APIRequestCount` API to get more information about which APIs are in use and which workloads are using removed APIs, because the alerts do not provide this information. Additionally, some APIs might not trigger these alerts but are still captured by `APIRequestCount`. The alerts are tuned to be less sensitive to avoid alerting fatigue in production systems.
* If either of the alerts are firing in your cluster, review the alerts and take action to clear the alerts by migrating manifests and API clients to use the new API version.
.Verification
* Use the `APIRequestCount` API to get more information about which APIs are in use and which workloads are using removed APIs, because the alerts do not provide this information. Additionally, some APIs might not trigger these alerts but are still captured by `APIRequestCount`. The alerts are tuned to be less sensitive to avoid alerting fatigue in production systems.

View File

@@ -2,6 +2,7 @@
//
// * updating/preparing_for_updates/updating-cluster-prepare.adoc
:_mod-docs-content-type: REFERENCE
[id="update-preparing-list_{context}"]
= Removed APIs

View File

@@ -2,6 +2,7 @@
//
// * updating/preparing_for_updates/updating-cluster-prepare.adoc
:_mod-docs-content-type: REFERENCE
[id="update-preparing-migrate_{context}"]
= Migrating instances of removed APIs

View File

@@ -2,6 +2,7 @@
//
// * support/troubleshooting/troubleshooting-installations.adoc
:_mod-docs-content-type: CONCEPT
[id="upi-installation-considerations_{context}"]
= User-provisioned infrastructure installation considerations

View File

@@ -2,7 +2,6 @@
//
// * virt/support/virt-troubleshooting.adoc
:_mod-docs-content-type: reference
[id="virt-loki-log-queries_{context}"]
= {VirtProductName} LogQL queries

View File

@@ -2,6 +2,7 @@
//
// * web_console/configuring-web-console.adoc
:_mod-docs-content-type: PROCEDURE
[id="web-console-configuration_{context}"]
= Configuring the web console

View File

@@ -2,6 +2,7 @@
//
// * web_console/disabling-web-console.adoc
:_mod-docs-content-type: PROCEDURE
[id="web-console-disable_{context}"]
= Disabling the web console

Some files were not shown because too many files have changed in this diff Show More