mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-05 12:46:18 +01:00
osdocs3148: clearing build errors
This commit is contained in:
committed by
openshift-cherrypick-robot
parent
456641d4d4
commit
25846afbb9
@@ -12,6 +12,7 @@ include::modules/dr-restoring-cluster-state-about.adoc[leveloffset=+1]
|
||||
|
||||
// Restoring to a previous cluster state
|
||||
include::modules/dr-restoring-cluster-state.adoc[leveloffset=+1]
|
||||
include::modules/dr-scenario-cluster-state-issues.adoc[leveloffset=+1]
|
||||
|
||||
.Additional resources
|
||||
|
||||
|
||||
@@ -33,4 +33,5 @@ To access the component over the web, create a URL using `odo url create`.
|
||||
|
||||
|
||||
|
||||
include::modules/developer-cli-odo-sample-applications.adoc[leveloffset=+1]
|
||||
include::modules/developer-cli-odo-sample-applications-git.adoc[leveloffset=+1]
|
||||
include::modules/developer-cli-odo-sample-applications-binary.adoc[leveloffset=+1]
|
||||
|
||||
@@ -9,6 +9,8 @@ Once you have an {product-title} subscription, you can access your services.
|
||||
|
||||
include::modules/dedicated-creating-your-cluster.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/dedicated-accessing-your-cluster.adoc[leveloffset=+1]
|
||||
|
||||
////
|
||||
|
||||
== Receiving status updates
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
[id="ipi-install-configuration-files"]
|
||||
= Configuration files
|
||||
:context: ipi-install-configuration-files
|
||||
|
||||
include::modules/ipi-install-configuring-the-install-config-file.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-setting-proxy-settings-within-install-config.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-modifying-install-config-for-no-provisioning-network.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-configuring-managed-secure-boot-in-the-install-config-file.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-additional-install-config-parameters.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-bmc-addressing.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-bmc-addressing-for-dell-idrac.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-bmc-addressing-for-fujitsu-irmc.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-root-device-hints.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-creating-the-openshift-manifests.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc[leveloffset=+1]
|
||||
|
||||
ifeval::[{product-version} > 4.8]
|
||||
include::modules/ipi-install-configuring-bios-for-worker-node.adoc[leveloffset=+1]
|
||||
endif::[]
|
||||
@@ -15,13 +15,41 @@ include::modules/ipi-install-extracting-the-openshift-installer.adoc[leveloffset
|
||||
|
||||
include::modules/ipi-install-creating-an-rhcos-images-cache.adoc[leveloffset=+1]
|
||||
|
||||
ifdef::upstream[]
|
||||
include::ipi-install-configuration-files.adoc[leveloffset=+1]
|
||||
[id="ipi-install-configuration-files"]
|
||||
== Configuration files
|
||||
|
||||
include::modules/ipi-install-configuring-the-install-config-file.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-setting-proxy-settings-within-install-config.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-modifying-install-config-for-no-provisioning-network.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-modifying-install-config-for-dual-stack-network.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-configuring-managed-secure-boot-in-the-install-config-file.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-additional-install-config-parameters.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-bmc-addressing.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-bmc-addressing-for-dell-idrac.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-bmc-addressing-for-hpe-ilo.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-bmc-addressing-for-fujitsu-irmc.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-root-device-hints.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-creating-the-openshift-manifests.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ipi-install-configure-network-components-to-run-on-the-control-plane.adoc[leveloffset=+2]
|
||||
|
||||
ifeval::[{product-version} > 4.8]
|
||||
include::modules/ipi-install-configuring-bios-for-worker-node.adoc[leveloffset=+2]
|
||||
endif::[]
|
||||
|
||||
ifndef::upstream[]
|
||||
include::installing/installing_bare_metal_ipi/ipi-install-configuration-files.adoc[leveloffset=+1]
|
||||
endif::[]
|
||||
|
||||
include::modules/ipi-install-creating-a-disconnected-registry.adoc[leveloffset=+1]
|
||||
|
||||
|
||||
@@ -23,3 +23,4 @@ The service catalog is deprecated in {product-title} 4. You can migrate workload
|
||||
include::modules/migration-terminology.adoc[leveloffset=+1]
|
||||
include::modules/migration-mtc-workflow.adoc[leveloffset=+1]
|
||||
include::modules/migration-understanding-data-copy-methods.adoc[leveloffset=+1]
|
||||
include::modules/migration-direct-volume-migration-and-direct-image-migration.adoc[leveloffset=+1]
|
||||
|
||||
@@ -26,3 +26,4 @@ See xref:../migration_toolkit_for_containers/advanced-migration-options-mtc.adoc
|
||||
include::modules/migration-terminology.adoc[leveloffset=+1]
|
||||
include::modules/migration-mtc-workflow.adoc[leveloffset=+1]
|
||||
include::modules/migration-understanding-data-copy-methods.adoc[leveloffset=+1]
|
||||
include::modules/migration-direct-volume-migration-and-direct-image-migration.adoc[leveloffset=+1]
|
||||
|
||||
104
modules/api-support-tiers-mapping.adoc
Normal file
104
modules/api-support-tiers-mapping.adoc
Normal file
@@ -0,0 +1,104 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * rest_api/understanding-api-support-tiers.adoc
|
||||
|
||||
[id="api-support-tiers-mapping_{context}"]
|
||||
= Mapping API tiers to API groups
|
||||
|
||||
For each API tier defined by Red Hat, we provide a mapping table for specific API groups where the upstream communities are committed to maintain forward compatibility. Any API group that does not specify an explicit compatibility level is assigned API tier 3 by default.
|
||||
|
||||
[id="mapping-support-tiers-to-kubernetes-api-groups_{context}"]
|
||||
== Support for Kubernetes API groups
|
||||
|
||||
API groups that end with the suffix `*.k8s.io` or have the form `version.<name>` with no suffix are governed by the Kubernetes deprecation policy and follow a general mapping between API version exposed and corresponding support tier unless otherwise specified.
|
||||
|
||||
[cols="2",options="header"]
|
||||
|===
|
||||
|API version example
|
||||
|API tier
|
||||
|
||||
|`v1`
|
||||
|Tier 1
|
||||
|
||||
|`v1beta1`
|
||||
|Tier 2
|
||||
|
||||
|`v1alpha1`
|
||||
|Tier 4
|
||||
|
||||
|===
|
||||
|
||||
[id="mapping-support-tiers-to-openshift-api-groups_{context}"]
|
||||
== Support for OpenShift API groups
|
||||
|
||||
API groups that end with the suffix `*.openshift.io` are governed by the {product-title} deprecation policy and follow a general mapping between API version exposed and corresponding compatibility level unless otherwise specified.
|
||||
|
||||
[cols="2",options="header"]
|
||||
|===
|
||||
|API version example
|
||||
|API tier
|
||||
|
||||
|`apps.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`authorization.openshift.io/v1`
|
||||
|Tier 1, some tier 1 deprecated
|
||||
|
||||
|`build.openshift.io/v1`
|
||||
|Tier 1, some tier 1 deprecated
|
||||
|
||||
|`config.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`image.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`network.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`network.operator.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`oauth.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`imagecontentsourcepolicy.operator.openshift.io/v1alpha1`
|
||||
|Tier 1
|
||||
|
||||
|`project.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`quota.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`route.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`quota.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`security.openshift.io/v1`
|
||||
|Tier 1 except for `RangeAllocation` (tier 4) and `*Reviews` (tier 2)
|
||||
|
||||
|`template.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`console.openshift.io/v1`
|
||||
|Tier 2
|
||||
|
||||
|===
|
||||
|
||||
[id="mapping-support-tiers-to-monitoring-api-groups_{context}"]
|
||||
== Support for Monitoring API groups
|
||||
|
||||
API groups that end with the suffix `monitoring.coreos.com` have the following mapping:
|
||||
|
||||
[cols="2",options="header"]
|
||||
|===
|
||||
|API version example
|
||||
|API tier
|
||||
|
||||
|`v1`
|
||||
|Tier 1
|
||||
|
||||
|===
|
||||
@@ -30,104 +30,3 @@ Components and developer tools that receive continuous updates through the Opera
|
||||
No compatibility is provided. API and AOE can change at any point. These capabilities should not be used by applications needing long-term support.
|
||||
|
||||
It is common practice for Operators to use custom resource definitions (CRDs) internally to accomplish a task. These objects are not meant for use by actors external to the Operator and are intended to be hidden. If any CRD is not meant for use by actors external to the Operator, the `operators.operatorframework.io/internal-objects` annotation in the Operators `ClusterServiceVersion` (CSV) must be specified, and that signals that the corresponding resource is internal use only.
|
||||
|
||||
[id="mapping-support-tiers-to-api-groups_{context}"]
|
||||
= Mapping API tiers to API groups
|
||||
|
||||
For each API tier defined by Red Hat, we provide a mapping table for specific API groups where the upstream communities are committed to maintain forward compatibility. Any API group that does not specify an explicit compatibility level is assigned API tier 3 by default.
|
||||
|
||||
[id="mapping-support-tiers-to-kubernetes-api-groups_{context}"]
|
||||
== Support for Kubernetes API groups
|
||||
|
||||
API groups that end with the suffix `*.k8s.io` or have the form `version.<name>` with no suffix are governed by the Kubernetes deprecation policy and follow a general mapping between API version exposed and corresponding support tier unless otherwise specified.
|
||||
|
||||
[cols="2",options="header"]
|
||||
|===
|
||||
|API version example
|
||||
|API tier
|
||||
|
||||
|`v1`
|
||||
|Tier 1
|
||||
|
||||
|`v1beta1`
|
||||
|Tier 2
|
||||
|
||||
|`v1alpha1`
|
||||
|Tier 4
|
||||
|
||||
|===
|
||||
|
||||
[id="mapping-support-tiers-to-openshift-api-groups_{context}"]
|
||||
== Support for OpenShift API groups
|
||||
|
||||
API groups that end with the suffix `*.openshift.io` are governed by the {product-title} deprecation policy and follow a general mapping between API version exposed and corresponding compatibility level unless otherwise specified.
|
||||
|
||||
[cols="2",options="header"]
|
||||
|===
|
||||
|API version example
|
||||
|API tier
|
||||
|
||||
|`apps.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`authorization.openshift.io/v1`
|
||||
|Tier 1, some tier 1 deprecated
|
||||
|
||||
|`build.openshift.io/v1`
|
||||
|Tier 1, some tier 1 deprecated
|
||||
|
||||
|`config.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`image.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`network.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`network.operator.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`oauth.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`imagecontentsourcepolicy.operator.openshift.io/v1alpha1`
|
||||
|Tier 1
|
||||
|
||||
|`project.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`quota.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`route.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`quota.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`security.openshift.io/v1`
|
||||
|Tier 1 except for `RangeAllocation` (tier 4) and `*Reviews` (tier 2)
|
||||
|
||||
|`template.openshift.io/v1`
|
||||
|Tier 1
|
||||
|
||||
|`console.openshift.io/v1`
|
||||
|Tier 2
|
||||
|
||||
|===
|
||||
|
||||
[id="mapping-support-tiers-to-monitoring-api-groups_{context}"]
|
||||
== Support for Monitoring API groups
|
||||
|
||||
API groups that end with the suffix `monitoring.coreos.com` have the following mapping:
|
||||
|
||||
[cols="2",options="header"]
|
||||
|===
|
||||
|API version example
|
||||
|API tier
|
||||
|
||||
|`v1`
|
||||
|Tier 1
|
||||
|
||||
|===
|
||||
|
||||
15
modules/dedicated-accessing-your-cluster.adoc
Normal file
15
modules/dedicated-accessing-your-cluster.adoc
Normal file
@@ -0,0 +1,15 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * getting_started/accessing-your-services.adoc
|
||||
|
||||
[id="dedicated-accessing-your-cluster_{context}"]
|
||||
= Accessing your cluster
|
||||
|
||||
Use the following steps to access your {product-title} cluster.
|
||||
|
||||
.Procedure
|
||||
|
||||
. From link:https://console.redhat.com/openshift[console.redhat.com/openshift], click
|
||||
on the cluster you want to access.
|
||||
|
||||
. Click *Launch Console*.
|
||||
@@ -5,7 +5,9 @@
|
||||
[id="dedicated-creating-your-cluster_{context}"]
|
||||
= Creating your cluster
|
||||
|
||||
To create your {product-title} cluster:
|
||||
Use the following steps to create your {product-title} cluster.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Log in to link:https://console.redhat.com/openshift[console.redhat.com/openshift].
|
||||
|
||||
@@ -33,12 +35,3 @@ following are the default ranges available to use:
|
||||
|
||||
In the *Overview* tab under the *Details* heading will have a *Status*
|
||||
indicator. This will indicate that your cluster is *Ready* for use.
|
||||
|
||||
= Accessing your cluster
|
||||
|
||||
To access your {product-title} cluster:
|
||||
|
||||
. From link:https://console.redhat.com/openshift[console.redhat.com/openshift], click
|
||||
on the cluster you want to access.
|
||||
|
||||
. Click *Launch Console*.
|
||||
|
||||
42
modules/developer-cli-odo-sample-applications-binary.adoc
Normal file
42
modules/developer-cli-odo-sample-applications-binary.adoc
Normal file
@@ -0,0 +1,42 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * cli_reference/developer_cli_odo/using-sample-applications.adoc
|
||||
|
||||
[id="odo-sample-applications-binary_{context}"]
|
||||
= Binary example applications
|
||||
|
||||
Use the following commands to build and run sample applications from a binary file for a particular runtime.
|
||||
|
||||
[id="odo-sample-applications-binary-java_{context}"]
|
||||
== java
|
||||
|
||||
Java can be used to deploy a binary artifact as follows:
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ git clone https://github.com/spring-projects/spring-petclinic.git
|
||||
$ cd spring-petclinic
|
||||
$ mvn package
|
||||
$ odo create java test3 --binary target/*.jar
|
||||
$ odo push
|
||||
----
|
||||
|
||||
|
||||
//Commenting out as it doesn't work for now. https://github.com/openshift/odo/issues/4623
|
||||
////
|
||||
[id="odo-sample-applications-binary-wildfly_{context}"]
|
||||
== wildfly
|
||||
|
||||
WildFly can be used to deploy a binary application as follows:
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ git clone https://github.com/openshiftdemos/os-sample-java-web.git
|
||||
$ cd os-sample-java-web
|
||||
$ mvn package
|
||||
$ cd ..
|
||||
$ mkdir example && cd example
|
||||
$ mv ../os-sample-java-web/target/ROOT.war example.war
|
||||
$ odo create wildfly --binary example.war
|
||||
----
|
||||
////
|
||||
@@ -2,10 +2,12 @@
|
||||
//
|
||||
// * cli_reference/developer_cli_odo/using-sample-applications.adoc
|
||||
|
||||
[id="odo-sample-applications_{context}"]
|
||||
[id="odo-sample-applications-github_{context}"]
|
||||
= Git repository example applications
|
||||
|
||||
= Examples from Git repositories
|
||||
Use the following commands to build and run sample applications from a Git repository for a particular runtime.
|
||||
|
||||
[id="odo-sample-applications-github-httpd_{context}"]
|
||||
== httpd
|
||||
|
||||
This example helps build and serve static content using httpd on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/httpd-container/blob/master/2.4/root/usr/share/container-scripts/httpd/README.md[Apache HTTP Server container image repository].
|
||||
@@ -15,6 +17,7 @@ This example helps build and serve static content using httpd on CentOS 7. For m
|
||||
$ odo create httpd --git https://github.com/openshift/httpd-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-java_{context}"]
|
||||
== java
|
||||
|
||||
This example helps build and run fat JAR Java applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/fabric8io-images/s2i/blob/master/README.md[Java S2I Builder image].
|
||||
@@ -24,6 +27,7 @@ This example helps build and run fat JAR Java applications on CentOS 7. For more
|
||||
$ odo create java --git https://github.com/spring-projects/spring-petclinic.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-nodejs_{context}"]
|
||||
== nodejs
|
||||
|
||||
Build and run Node.js applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md[Node.js 8 container image].
|
||||
@@ -33,6 +37,7 @@ Build and run Node.js applications on CentOS 7. For more information about using
|
||||
$ odo create nodejs --git https://github.com/openshift/nodejs-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-perl_{context}"]
|
||||
== perl
|
||||
|
||||
This example helps build and run Perl applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-perl-container/blob/master/5.26/README.md[Perl 5.26 container image].
|
||||
@@ -42,6 +47,7 @@ This example helps build and run Perl applications on CentOS 7. For more informa
|
||||
$ odo create perl --git https://github.com/openshift/dancer-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-php_{context}"]
|
||||
== php
|
||||
|
||||
This example helps build and run PHP applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md[PHP 7.1 Docker image].
|
||||
@@ -51,6 +57,7 @@ This example helps build and run PHP applications on CentOS 7. For more informat
|
||||
$ odo create php --git https://github.com/openshift/cakephp-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-python_{context}"]
|
||||
== python
|
||||
|
||||
This example helps build and run Python applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md[Python 3.6 container image].
|
||||
@@ -60,6 +67,7 @@ This example helps build and run Python applications on CentOS 7. For more infor
|
||||
$ odo create python --git https://github.com/openshift/django-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-ruby_{context}"]
|
||||
== ruby
|
||||
|
||||
This example helps build and run Ruby applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see link:https://github.com/sclorg/s2i-ruby-container/blob/master/2.5/README.md[Ruby 2.5 container image].
|
||||
@@ -71,6 +79,7 @@ $ odo create ruby --git https://github.com/openshift/ruby-ex.git
|
||||
|
||||
//Commenting out as it doesn't work for now. https://github.com/openshift/odo/issues/4623
|
||||
////
|
||||
[id="odo-sample-applications-github-wildfly_{context}"]
|
||||
== wildfly
|
||||
|
||||
This example helps build and run WildFly applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/wildfly/wildfly-s2i/blob/master/README.md[Wildfly - CentOS Docker images for OpenShift].
|
||||
@@ -80,36 +89,3 @@ This example helps build and run WildFly applications on CentOS 7. For more info
|
||||
$ odo create wildfly --git https://github.com/openshift/openshift-jee-sample.git
|
||||
----
|
||||
////
|
||||
= Binary examples
|
||||
|
||||
== java
|
||||
|
||||
Java can be used to deploy a binary artifact as follows:
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ git clone https://github.com/spring-projects/spring-petclinic.git
|
||||
$ cd spring-petclinic
|
||||
$ mvn package
|
||||
$ odo create java test3 --binary target/*.jar
|
||||
$ odo push
|
||||
----
|
||||
|
||||
|
||||
//Commenting out as it doesn't work for now. https://github.com/openshift/odo/issues/4623
|
||||
////
|
||||
== wildfly
|
||||
|
||||
WildFly can be used to deploy a binary application as follows:
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ git clone https://github.com/openshiftdemos/os-sample-java-web.git
|
||||
$ cd os-sample-java-web
|
||||
$ mvn package
|
||||
$ cd ..
|
||||
$ mkdir example && cd example
|
||||
$ mv ../os-sample-java-web/target/ROOT.war example.war
|
||||
$ odo create wildfly --binary example.war
|
||||
----
|
||||
////
|
||||
91
modules/developer-cli-odo-sample-applications-github.adoc
Normal file
91
modules/developer-cli-odo-sample-applications-github.adoc
Normal file
@@ -0,0 +1,91 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * cli_reference/developer_cli_odo/using-sample-applications.adoc
|
||||
|
||||
[id="odo-sample-applications-github_{context}"]
|
||||
= Git repository sample application examples
|
||||
|
||||
Use the following commands to build and run sample applications from a Git repository for a particular runtime.
|
||||
|
||||
[id="odo-sample-applications-github-httpd_{context}"]
|
||||
== httpd
|
||||
|
||||
This example helps build and serve static content using httpd on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/httpd-container/blob/master/2.4/root/usr/share/container-scripts/httpd/README.md[Apache HTTP Server container image repository].
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ odo create httpd --git https://github.com/openshift/httpd-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-java_{context}"]
|
||||
== java
|
||||
|
||||
This example helps build and run fat JAR Java applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/fabric8io-images/s2i/blob/master/README.md[Java S2I Builder image].
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ odo create java --git https://github.com/spring-projects/spring-petclinic.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-nodejs_{context}"]
|
||||
== nodejs
|
||||
|
||||
Build and run Node.js applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-nodejs-container/blob/master/8/README.md[Node.js 8 container image].
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ odo create nodejs --git https://github.com/openshift/nodejs-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-perl_{context}"]
|
||||
== perl
|
||||
|
||||
This example helps build and run Perl applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-perl-container/blob/master/5.26/README.md[Perl 5.26 container image].
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ odo create perl --git https://github.com/openshift/dancer-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-php_{context}"]
|
||||
== php
|
||||
|
||||
This example helps build and run PHP applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-php-container/blob/master/7.1/README.md[PHP 7.1 Docker image].
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ odo create php --git https://github.com/openshift/cakephp-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-python_{context}"]
|
||||
== python
|
||||
|
||||
This example helps build and run Python applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md[Python 3.6 container image].
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ odo create python --git https://github.com/openshift/django-ex.git
|
||||
----
|
||||
|
||||
[id="odo-sample-applications-github-ruby_{context}"]
|
||||
== ruby
|
||||
|
||||
This example helps build and run Ruby applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see link:https://github.com/sclorg/s2i-ruby-container/blob/master/2.5/README.md[Ruby 2.5 container image].
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ odo create ruby --git https://github.com/openshift/ruby-ex.git
|
||||
----
|
||||
|
||||
//Commenting out as it doesn't work for now. https://github.com/openshift/odo/issues/4623
|
||||
////
|
||||
[id="odo-sample-applications-github-wildfly_{context}"]
|
||||
== wildfly
|
||||
|
||||
This example helps build and run WildFly applications on CentOS 7. For more information about using this builder image, including {product-title} considerations, see the link:https://github.com/wildfly/wildfly-s2i/blob/master/README.md[Wildfly - CentOS Docker images for OpenShift].
|
||||
|
||||
[source,terminal]
|
||||
----
|
||||
$ odo create wildfly --git https://github.com/openshift/openshift-jee-sample.git
|
||||
----
|
||||
////
|
||||
@@ -562,29 +562,3 @@ etcd-ip-10-0-173-171.ec2.internal 2/2 Running 0
|
||||
To ensure that all workloads return to normal operation following a recovery procedure, restart each pod that stores Kubernetes API information. This includes {product-title} components such as routers, Operators, and third-party components.
|
||||
|
||||
Note that it might take several minutes after completing this procedure for all services to be restored. For example, authentication by using `oc login` might not immediately work until the OAuth server pods are restarted.
|
||||
|
||||
[id="dr-scenario-cluster-state-issues_{context}"]
|
||||
= Issues and workarounds for restoring a persistent storage state
|
||||
|
||||
If your {product-title} cluster uses persistent storage of any form, a state of the cluster is typically stored outside etcd. It might be an Elasticsearch cluster running in a pod or a database running in a `StatefulSet` object. When you restore from an etcd backup, the status of the workloads in {product-title} is also restored. However, if the etcd snapshot is old, the status might be invalid or outdated.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
The contents of persistent volumes (PVs) are never part of the etcd snapshot. When you restore an {product-title} cluster from an etcd snapshot, non-critical workloads might gain access to critical data, or vice-versa.
|
||||
====
|
||||
|
||||
The following are some example scenarios that produce an out-of-date status:
|
||||
|
||||
* MySQL database is running in a pod backed up by a PV object. Restoring {product-title} from an etcd snapshot does not bring back the volume on the storage provider, and does not produce a running MySQL pod, despite the pod repeatedly attempting to start. You must manually restore this pod by restoring the volume on the storage provider, and then editing the PV to point to the new volume.
|
||||
|
||||
* Pod P1 is using volume A, which is attached to node X. If the etcd snapshot is taken while another pod uses the same volume on node Y, then when the etcd restore is performed, pod P1 might not be able to start correctly due to the volume still being attached to node Y. {product-title} is not aware of the attachment, and does not automatically detach it. When this occurs, the volume must be manually detached from node Y so that the volume can attach on node X, and then pod P1 can start.
|
||||
|
||||
* Cloud provider or storage provider credentials were updated after the etcd snapshot was taken. This causes any CSI drivers or Operators that depend on the those credentials to not work. You might have to manually update the credentials required by those drivers or Operators.
|
||||
|
||||
* A device is removed or renamed from {product-title} nodes after the etcd snapshot is taken. The Local Storage Operator creates symlinks for each PV that it manages from `/dev/disk/by-id` or `/dev` directories. This situation might cause the local PVs to refer to devices that no longer exist.
|
||||
+
|
||||
To fix this problem, an administrator must:
|
||||
|
||||
. Manually remove the PVs with invalid devices.
|
||||
. Remove symlinks from respective nodes.
|
||||
. Delete `LocalVolume` or `LocalVolumeSet` objects (see _Storage_ -> _Configuring persistent storage_ -> _Persistent storage using local volumes_ -> _Deleting the Local Storage Operator Resources_).
|
||||
|
||||
30
modules/dr-scenario-cluster-state-issues.adoc
Normal file
30
modules/dr-scenario-cluster-state-issues.adoc
Normal file
@@ -0,0 +1,30 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * disaster_recovery/scenario-2-restoring-cluster-state.adoc
|
||||
// * post_installation_configuration/cluster-tasks.adoc
|
||||
|
||||
[id="dr-scenario-cluster-state-issues_{context}"]
|
||||
= Issues and workarounds for restoring a persistent storage state
|
||||
|
||||
If your {product-title} cluster uses persistent storage of any form, a state of the cluster is typically stored outside etcd. It might be an Elasticsearch cluster running in a pod or a database running in a `StatefulSet` object. When you restore from an etcd backup, the status of the workloads in {product-title} is also restored. However, if the etcd snapshot is old, the status might be invalid or outdated.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
The contents of persistent volumes (PVs) are never part of the etcd snapshot. When you restore an {product-title} cluster from an etcd snapshot, non-critical workloads might gain access to critical data, or vice-versa.
|
||||
====
|
||||
|
||||
The following are some example scenarios that produce an out-of-date status:
|
||||
|
||||
* MySQL database is running in a pod backed up by a PV object. Restoring {product-title} from an etcd snapshot does not bring back the volume on the storage provider, and does not produce a running MySQL pod, despite the pod repeatedly attempting to start. You must manually restore this pod by restoring the volume on the storage provider, and then editing the PV to point to the new volume.
|
||||
|
||||
* Pod P1 is using volume A, which is attached to node X. If the etcd snapshot is taken while another pod uses the same volume on node Y, then when the etcd restore is performed, pod P1 might not be able to start correctly due to the volume still being attached to node Y. {product-title} is not aware of the attachment, and does not automatically detach it. When this occurs, the volume must be manually detached from node Y so that the volume can attach on node X, and then pod P1 can start.
|
||||
|
||||
* Cloud provider or storage provider credentials were updated after the etcd snapshot was taken. This causes any CSI drivers or Operators that depend on the those credentials to not work. You might have to manually update the credentials required by those drivers or Operators.
|
||||
|
||||
* A device is removed or renamed from {product-title} nodes after the etcd snapshot is taken. The Local Storage Operator creates symlinks for each PV that it manages from `/dev/disk/by-id` or `/dev` directories. This situation might cause the local PVs to refer to devices that no longer exist.
|
||||
+
|
||||
To fix this problem, an administrator must:
|
||||
|
||||
. Manually remove the PVs with invalid devices.
|
||||
. Remove symlinks from respective nodes.
|
||||
. Delete `LocalVolume` or `LocalVolumeSet` objects (see _Storage_ -> _Configuring persistent storage_ -> _Persistent storage using local volumes_ -> _Deleting the Local Storage Operator Resources_).
|
||||
@@ -0,0 +1,15 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * migrating_from_ocp_3_to_4/migrating-applications-3-4.adoc
|
||||
// * migration_toolkit_for_containers/migrating-applications-with-mtc.adoc
|
||||
|
||||
[id="migration-direct-volume-migration-and-direct-image-migration_{context}"]
|
||||
= Direct volume migration and direct image migration
|
||||
|
||||
You can use direct image migration (DIM) and direct volume migration (DVM) to migrate images and data directly from the source cluster to the target cluster.
|
||||
|
||||
If you run DVM with nodes that are in different availability zones, the migration might fail because the migrated pods cannot access the persistent volume claim.
|
||||
|
||||
DIM and DVM have significant performance benefits because the intermediate steps of backing up files from the source cluster to the replication repository and restoring files from the replication repository to the target cluster are skipped. The data is transferred with link:https://rsync.samba.org/[Rsync].
|
||||
|
||||
DIM and DVM have additional prerequisites.
|
||||
@@ -46,14 +46,3 @@ a|* Cloud provider must support snapshots.
|
||||
* Storage class must be compatible with snapshots.
|
||||
* Does not support direct volume migration.
|
||||
|===
|
||||
|
||||
[id="direct-volume-migration-and-direct-image-migration_{context}"]
|
||||
= Direct volume migration and direct image migration
|
||||
|
||||
You can use direct image migration (DIM) and direct volume migration (DVM) to migrate images and data directly from the source cluster to the target cluster.
|
||||
|
||||
If you run DVM with nodes that are in different availability zones, the migration might fail because the migrated pods cannot access the persistent volume claim.
|
||||
|
||||
DIM and DVM have significant performance benefits because the intermediate steps of backing up files from the source cluster to the replication repository and restoring files from the replication repository to the target cluster are skipped. The data is transferred with link:https://rsync.samba.org/[Rsync].
|
||||
|
||||
DIM and DVM have additional prerequisites.
|
||||
|
||||
@@ -73,30 +73,3 @@ to consume half of the memory requested by a higher QoS class.
|
||||
will allow a `Burstable` and `BestEffort` QoS classes to consume up to the full node
|
||||
allocatable amount if available, but increases the risk that a `Guaranteed` workload
|
||||
will not have access to requested memory. This condition effectively disables this feature.
|
||||
|
||||
[id="qos-about-swap_{context}"]
|
||||
= Understanding swap memory and QOS
|
||||
|
||||
You can disable swap by default on your nodes to preserve quality of
|
||||
service (QOS) guarantees. Otherwise, physical resources on a node can oversubscribe,
|
||||
affecting the resource guarantees the Kubernetes scheduler makes during pod
|
||||
placement.
|
||||
|
||||
For example, if two guaranteed pods have reached their memory limit, each
|
||||
container could start using swap memory. Eventually, if there is not enough swap
|
||||
space, processes in the pods can be terminated due to the system being
|
||||
oversubscribed.
|
||||
|
||||
Failing to disable swap results in nodes not recognizing that they are
|
||||
experiencing *MemoryPressure*, resulting in pods not receiving the memory they
|
||||
made in their scheduling request. As a result, additional pods are placed on the
|
||||
node to further increase memory pressure, ultimately increasing your risk of
|
||||
experiencing a system out of memory (OOM) event.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
If swap is enabled, any out-of-resource handling eviction thresholds for available memory will not work as
|
||||
expected. Take advantage of out-of-resource handling to allow pods to be evicted
|
||||
from a node when it is under memory pressure, and rescheduled on an alternative
|
||||
node that has no such pressure.
|
||||
====
|
||||
|
||||
@@ -56,7 +56,7 @@ configure history limits.
|
||||
====
|
||||
|
||||
[id="jobs-create_{context}"]
|
||||
= Understanding how to create jobs
|
||||
== Understanding how to create jobs
|
||||
|
||||
Both resource types require a job configuration that consists of the following key parts:
|
||||
|
||||
@@ -114,7 +114,7 @@ Doing this prevents them from generating unnecessary artifacts.
|
||||
====
|
||||
|
||||
[id="jobs-limits_{context}"]
|
||||
= Known limitations
|
||||
== Known limitations
|
||||
|
||||
The job specification restart policy only applies to the _pods_, and not the _job controller_. However, the job controller is hard-coded to keep retrying jobs to completion.
|
||||
|
||||
|
||||
31
modules/nodes-qos-about-swap.adoc
Normal file
31
modules/nodes-qos-about-swap.adoc
Normal file
@@ -0,0 +1,31 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * nodes/nodes-cluster-overcommit.adoc
|
||||
// * post_installation_configuration/node-tasks.adoc
|
||||
|
||||
[id="nodes-qos-about-swap_{context}"]
|
||||
= Understanding swap memory and QOS
|
||||
|
||||
You can disable swap by default on your nodes to preserve quality of
|
||||
service (QOS) guarantees. Otherwise, physical resources on a node can oversubscribe,
|
||||
affecting the resource guarantees the Kubernetes scheduler makes during pod
|
||||
placement.
|
||||
|
||||
For example, if two guaranteed pods have reached their memory limit, each
|
||||
container could start using swap memory. Eventually, if there is not enough swap
|
||||
space, processes in the pods can be terminated due to the system being
|
||||
oversubscribed.
|
||||
|
||||
Failing to disable swap results in nodes not recognizing that they are
|
||||
experiencing *MemoryPressure*, resulting in pods not receiving the memory they
|
||||
made in their scheduling request. As a result, additional pods are placed on the
|
||||
node to further increase memory pressure, ultimately increasing your risk of
|
||||
experiencing a system out of memory (OOM) event.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
If swap is enabled, any out-of-resource handling eviction thresholds for available memory will not work as
|
||||
expected. Take advantage of out-of-resource handling to allow pods to be evicted
|
||||
from a node when it is under memory pressure, and rescheduled on an alternative
|
||||
node that has no such pressure.
|
||||
====
|
||||
@@ -2,20 +2,8 @@
|
||||
//
|
||||
// * networking/hardware_networks/using-sriov-multicast.adoc
|
||||
|
||||
[id="nw-configuring-high-performance-multicast-with-sriov_{context}"]
|
||||
= Configuring high performance multicast
|
||||
|
||||
The OpenShift SDN default Container Network Interface (CNI) network provider supports multicast between pods on the default network. This is best used for low-bandwidth coordination or service discovery, and not high-bandwidth applications.
|
||||
For applications such as streaming media, like Internet Protocol television (IPTV) and multipoint videoconferencing, you can utilize Single Root I/O Virtualization (SR-IOV) hardware to provide near-native performance.
|
||||
|
||||
When using additional SR-IOV interfaces for multicast:
|
||||
|
||||
* Multicast packages must be sent or received by a pod through the additional SR-IOV interface.
|
||||
* The physical network which connects the SR-IOV interfaces decides the
|
||||
multicast routing and topology, which is not controlled by {product-title}.
|
||||
|
||||
[id="nw-using-an-sriov-interface-for-multicast_{context}"]
|
||||
= Using an SR-IOV interface for multicast
|
||||
= Configuring an SR-IOV interface for multicast
|
||||
|
||||
The follow procedure creates an example SR-IOV interface for multicast.
|
||||
|
||||
@@ -68,7 +56,7 @@ spec:
|
||||
{"dst": "232.0.0.0/5"}
|
||||
],
|
||||
"gateway": "10.56.217.1"
|
||||
}
|
||||
}
|
||||
resourceName: example
|
||||
----
|
||||
<1> If you choose to configure DHCP as IPAM, ensure that you provision the following default routes through your DHCP server: `224.0.0.0/5` and `232.0.0.0/5`. This is to override the static multicast route set by the default network provider.
|
||||
@@ -86,13 +74,13 @@ metadata:
|
||||
k8s.v1.cni.cncf.io/networks: nic1
|
||||
spec:
|
||||
containers:
|
||||
- name: example
|
||||
- name: example
|
||||
image: rhel7:latest
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["NET_ADMIN"] <1>
|
||||
command: [ "sleep", "infinity"]
|
||||
----
|
||||
<1> The `NET_ADMIN` capability is required only if your application needs to
|
||||
<1> The `NET_ADMIN` capability is required only if your application needs to
|
||||
assign the multicast IP address to the SR-IOV interface. Otherwise, it can be
|
||||
omitted.
|
||||
|
||||
@@ -39,12 +39,12 @@ spec:
|
||||
----
|
||||
ifdef::openshift-sdn[]
|
||||
<1> A name for your egress firewall policy.
|
||||
<2> A collection of one or more egress network policy rules as described in the following section.
|
||||
endif::openshift-sdn[]
|
||||
ifdef::ovn[]
|
||||
<1> The name for the object must be `default`.
|
||||
endif::ovn[]
|
||||
|
||||
<2> A collection of one or more egress network policy rules as described in the following section.
|
||||
endif::ovn[]
|
||||
|
||||
[id="egressnetworkpolicy-rules_{context}"]
|
||||
== {kind} rules
|
||||
@@ -65,11 +65,8 @@ egress:
|
||||
dnsName: <dns_name> <4>
|
||||
----
|
||||
<1> The type of rule. The value must be either `Allow` or `Deny`.
|
||||
|
||||
<2> A stanza describing an egress traffic match rule. A value for either the `cidrSelector` field or the `dnsName` field for the rule. You cannot use both fields in the same rule.
|
||||
|
||||
<3> An IP address range in CIDR format.
|
||||
|
||||
<4> A domain name.
|
||||
endif::openshift-sdn[]
|
||||
ifdef::ovn[]
|
||||
@@ -84,13 +81,9 @@ egress:
|
||||
...
|
||||
----
|
||||
<1> The type of rule. The value must be either `Allow` or `Deny`.
|
||||
|
||||
<2> A stanza describing an egress traffic match rule that specifies the `cidrSelector` field or the `dnsName` field. You cannot use both fields in the same rule.
|
||||
|
||||
<3> An IP address range in CIDR format.
|
||||
|
||||
<4> A DNS domain name.
|
||||
|
||||
<5> Optional: A stanza describing a collection of network ports and protocols for the rule.
|
||||
|
||||
.Ports stanza
|
||||
@@ -101,7 +94,6 @@ ports:
|
||||
protocol: <protocol> <2>
|
||||
----
|
||||
<1> A network port, such as `80` or `443`. If you specify a value for this field, you must also specify a value for `protocol`.
|
||||
|
||||
<2> A network protocol. The value must be either `TCP`, `UDP`, or `SCTP`.
|
||||
endif::ovn[]
|
||||
|
||||
|
||||
15
modules/nw-high-performance-multicast.adoc
Normal file
15
modules/nw-high-performance-multicast.adoc
Normal file
@@ -0,0 +1,15 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * networking/hardware_networks/using-sriov-multicast.adoc
|
||||
|
||||
[id="nw-high-performance-multicast_{context}"]
|
||||
= High performance multicast
|
||||
|
||||
The OpenShift SDN default Container Network Interface (CNI) network provider supports multicast between pods on the default network. This is best used for low-bandwidth coordination or service discovery, and not high-bandwidth applications.
|
||||
For applications such as streaming media, like Internet Protocol television (IPTV) and multipoint videoconferencing, you can utilize Single Root I/O Virtualization (SR-IOV) hardware to provide near-native performance.
|
||||
|
||||
When using additional SR-IOV interfaces for multicast:
|
||||
|
||||
* Multicast packages must be sent or received by a pod through the additional SR-IOV interface.
|
||||
* The physical network which connects the SR-IOV interfaces decides the
|
||||
multicast routing and topology, which is not controlled by {product-title}.
|
||||
@@ -14,7 +14,8 @@ The following diagram shows a multiple availability zone (Multi-AZ) PrivateLink
|
||||
|
||||
image::156_OpenShift_ROSA_Arch_0621_privatelink.svg[Multi-AZ AWS PrivateLink cluster deployed on private subnets]
|
||||
|
||||
= AWS reference architectures
|
||||
[id="osd-aws-reference-architecture.adoc_{context}"]
|
||||
== AWS reference architectures
|
||||
|
||||
AWS provides multiple reference architectures that can be useful to customers when planning how to set up a configuration that uses AWS PrivateLink. Here are three examples:
|
||||
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * operators/operator_sdk/oosdk-leader-election.adoc
|
||||
// * operators/operator_sdk/osdk-leader-election.adoc
|
||||
|
||||
[id="osdk-leader-election-types_{context}"]
|
||||
= Operator leader election examples
|
||||
|
||||
The following examples illustrate how to use the two leader election options for an Operator, Leader-for-life and Leader-with-lease.
|
||||
|
||||
[id="osdk-leader-for-life-election_{context}"]
|
||||
= Using Leader-for-life election
|
||||
== Leader-for-life election
|
||||
|
||||
With the Leader-for-life election implementation, a call to `leader.Become()` blocks the Operator as it retries until it can become the leader by creating the config map named `memcached-operator-lock`:
|
||||
|
||||
@@ -28,7 +33,7 @@ func main() {
|
||||
If the Operator is not running inside a cluster, `leader.Become()` simply returns without error to skip the leader election since it cannot detect the name of the Operator.
|
||||
|
||||
[id="osdk-leader-with-lease-election_{context}"]
|
||||
= Using Leader-with-lease election
|
||||
== Leader-with-lease election
|
||||
|
||||
The Leader-with-lease implementation can be enabled using the link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#Options[Manager Options] for leader election:
|
||||
|
||||
|
||||
10
modules/ossm-auto-route-annotations.adoc
Normal file
10
modules/ossm-auto-route-annotations.adoc
Normal file
@@ -0,0 +1,10 @@
|
||||
// Module is included in the following assemblies:
|
||||
// * service_mesh/v2x/ossm-traffic-manage.adoc
|
||||
//
|
||||
|
||||
[id="ossm-auto-route-annotations_{context}"]
|
||||
= {ProductName} route annotations
|
||||
|
||||
Sometimes specific annotations are needed in an OpenShift Route. For example, some advanced features in OpenShift Routes are managed via xref:../../networking/routes/route-configuration.adoc[special annotations]. For this and other use cases, {ProductName} will copy all annotations present in the Istio Gateway resource (with the exception of those starting with `kubectl.kubernetes.io`) into the managed OpenShift Route resource.
|
||||
|
||||
If you need specific annotations in the OpenShift Routes created by {ProductShortName}, create them in the Istio Gateway resource and they will be copied into the OpenShift Route resources managed by the {ProductShortName}.
|
||||
18
modules/ossm-auto-route-enable.adoc
Normal file
18
modules/ossm-auto-route-enable.adoc
Normal file
@@ -0,0 +1,18 @@
|
||||
// Module is included in the following assemblies:
|
||||
// * service_mesh/v2x/ossm-traffic-manage.adoc
|
||||
//
|
||||
|
||||
[id="ossm-auto-route-enable_{context}"]
|
||||
= Disabling automatic {ProductName} route creation
|
||||
|
||||
By default, the `ServiceMeshControlPlane` resource automatically synchronizes the Gateway resources with OpenShift routes. Disabling the automatic route creation allows you more flexibility to control routes if you have a special case or prefer to control routes manually.
|
||||
|
||||
Disable integration between Istio Gateways and OpenShift Routes by setting the `ServiceMeshControlPlane` field `gateways.openshiftRoute.enabled` to `false`. For example, see the following resource snippet.
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
spec:
|
||||
gateways:
|
||||
openshiftRoute:
|
||||
enabled: false
|
||||
----
|
||||
@@ -1,7 +1,6 @@
|
||||
////
|
||||
This TASK module included in the following assemblies:
|
||||
// Module is included in the following assemblies:
|
||||
// * service_mesh/v2x/ossm-traffic-manage.adoc
|
||||
////
|
||||
//
|
||||
|
||||
[id="ossm-auto-route-create-subdomains_{context}"]
|
||||
= Creating subdomain routes
|
||||
@@ -43,25 +42,3 @@ gateway1-scqhv www.bookinfo.com istio-ingressgateway <all>
|
||||
----
|
||||
|
||||
If the gateway is deleted, {ProductName} deletes the routes. However, routes created manually are never modified by {ProductName}.
|
||||
|
||||
[id="ossm-auto-route-annotations_{context}"]
|
||||
= Annotations
|
||||
|
||||
Sometimes specific annotations are needed in an OpenShift Route. For example, some advanced features in OpenShift Routes are managed via xref:../../networking/routes/route-configuration.adoc[special annotations]. For this and other use cases, {ProductName} will copy all annotations present in the Istio Gateway resource (with the exception of those starting with `kubectl.kubernetes.io`) into the managed OpenShift Route resource.
|
||||
|
||||
If you need specific annotations in the OpenShift Routes created by {ProductShortName}, create them in the Istio Gateway resource and they will be copied into the OpenShift Route resources managed by the {ProductShortName}.
|
||||
|
||||
[id="ossm-auto-route-enable_{context}"]
|
||||
= Disabling automatic route creation
|
||||
|
||||
By default, the `ServiceMeshControlPlane` resource automatically synchronizes the Gateway resources with OpenShift routes. Disabling the automatic route creation allows you more flexibility to control routes if you have a special case or prefer to control routes manually.
|
||||
|
||||
Disable integration between Istio Gateways and OpenShift Routes by setting the `ServiceMeshControlPlane` field `gateways.openshiftRoute.enabled` to `false`. For example, see the following resource snippet.
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
spec:
|
||||
gateways:
|
||||
openshiftRoute:
|
||||
enabled: false
|
||||
----
|
||||
|
||||
69
modules/serverless-domain-mapping-custom-tls-cert.adoc
Normal file
69
modules/serverless-domain-mapping-custom-tls-cert.adoc
Normal file
@@ -0,0 +1,69 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * serverless/security/serverless-custom-tls-cert-domain-mapping.adoc
|
||||
|
||||
[id="serverless-domain-mapping-custom-tls-cert_{context}"]
|
||||
= Adding a custom TLS certificate to a DomainMapping CR
|
||||
|
||||
You can add an existing TLS certificate with a `DomainMapping` custom resource (CR) to secure the mapped service.
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* You configured a custom domain for a Knative service and have a working `DomainMapping` CR.
|
||||
|
||||
* You have a TLS certificate from your Certificate Authority provider or a self-signed certificate.
|
||||
|
||||
* You have obtained the `cert` and `key` files from your Certificate Authority provider, or a self-signed certificate.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create a Kubernetes TLS secret:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc create secret tls <tls_secret_name> --cert=<path_to_certificate_file> --key=<path_to_key_file>
|
||||
----
|
||||
|
||||
. Update the `DomainMapping` CR to use the TLS secret you have created:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: serving.knative.dev/v1alpha1
|
||||
kind: DomainMapping
|
||||
metadata:
|
||||
name: <domain_name>
|
||||
namespace: <namespace>
|
||||
spec:
|
||||
ref:
|
||||
name: <service_name>
|
||||
kind: Service
|
||||
apiVersion: serving.knative.dev/v1
|
||||
# TLS block specifies the secret to be used
|
||||
tls:
|
||||
secretName: <tls_secret_name>
|
||||
----
|
||||
|
||||
.Verification
|
||||
|
||||
. Verify that the `DomainMapping` CR status is `True`, and that the `URL` column of the output shows the mapped domain with the scheme `https`:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get domainmapping <domain_name>
|
||||
----
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
NAME URL READY REASON
|
||||
example.com https://example.com True
|
||||
----
|
||||
|
||||
. Optional: If the service is exposed publicly, verify that it is available by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ curl https://<domain_name>
|
||||
----
|
||||
+
|
||||
If the certificate is self-signed, skip verification by adding the `-k` flag to the `curl` command.
|
||||
@@ -7,4 +7,6 @@ toc::[]
|
||||
|
||||
You can use multicast on your Single Root I/O Virtualization (SR-IOV) hardware network.
|
||||
|
||||
include::modules/nw-high-performance-multicast.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/nw-configuring-high-performance-multicast-with-sriov.adoc[leveloffset=+1]
|
||||
|
||||
@@ -58,6 +58,8 @@ include::modules/nodes-cluster-overcommit-resources-containers.adoc[leveloffset=
|
||||
|
||||
include::modules/nodes-cluster-overcommit-qos-about.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/nodes-qos-about-swap.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/nodes-cluster-overcommit-configure-nodes.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/nodes-cluster-overcommit-node-enforcing.adoc[leveloffset=+2]
|
||||
|
||||
@@ -15,6 +15,4 @@ Leader-with-lease:: The leader pod periodically renews the leader lease and give
|
||||
|
||||
By default, the Operator SDK enables the Leader-for-life implementation. Consult the related Go documentation for both approaches to consider the trade-offs that make sense for your use case.
|
||||
|
||||
The following examples illustrate how to use the two options.
|
||||
|
||||
include::modules/osdk-leader-election-types.adoc[leveloffset=+1]
|
||||
|
||||
@@ -610,6 +610,7 @@ include::modules/disabling-etcd-encryption.adoc[leveloffset=+2]
|
||||
include::modules/backup-etcd.adoc[leveloffset=+2]
|
||||
include::modules/etcd-defrag.adoc[leveloffset=+2]
|
||||
include::modules/dr-restoring-cluster-state.adoc[leveloffset=+2]
|
||||
include::modules/dr-scenario-cluster-state-issues.adoc[leveloffset=+2]
|
||||
|
||||
[id="post-install-pod-disruption-budgets"]
|
||||
== Pod disruption budgets
|
||||
|
||||
@@ -112,6 +112,8 @@ include::modules/nodes-cluster-overcommit-resources-containers.adoc[leveloffset=
|
||||
|
||||
include::modules/nodes-cluster-overcommit-qos-about.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/nodes-qos-about-swap.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/nodes-cluster-overcommit-configure-nodes.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/nodes-cluster-overcommit-node-enforcing.adoc[leveloffset=+2]
|
||||
|
||||
1
rest_api/images
Symbolic link
1
rest_api/images
Symbolic link
@@ -0,0 +1 @@
|
||||
../images/
|
||||
1
rest_api/modules
Symbolic link
1
rest_api/modules
Symbolic link
@@ -0,0 +1 @@
|
||||
../modules/
|
||||
@@ -18,4 +18,6 @@ Red Hat requests that application developers validate that any behavior they dep
|
||||
|
||||
include::modules/api-support-tiers.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/api-support-tiers-mapping.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/api-support-deprecation-policy.adoc[leveloffset=+1]
|
||||
|
||||
@@ -1 +1 @@
|
||||
../images
|
||||
../../images/
|
||||
@@ -1 +1 @@
|
||||
../modules
|
||||
../../modules/
|
||||
@@ -1,9 +1,9 @@
|
||||
include::modules/serverless-document-attributes.adoc[]
|
||||
include::modules/ossm-document-attributes.adoc[]
|
||||
[id="serverless-custom-tls-cert-domain-mapping"]
|
||||
= Using a custom TLS certificate for domain mapping
|
||||
:context: serverless-custom-tls-cert-domain-mapping
|
||||
include::modules/common-attributes.adoc[]
|
||||
include::modules/serverless-document-attributes.adoc[]
|
||||
include::modules/ossm-document-attributes.adoc[]
|
||||
|
||||
toc::[]
|
||||
|
||||
@@ -11,61 +11,6 @@ You can use an existing TLS certificate with a `DomainMapping` custom resource (
|
||||
|
||||
.Prerequisites
|
||||
|
||||
* You have completed the steps in xref:../../serverless/security/serverless-custom-domains.adoc#serverless-custom-domains[Configuring a custom domain for a Knative service], and have a working `DomainMapping` CR.
|
||||
* You have completed the steps in xref:../../serverless/security/serverless-custom-domains.adoc#serverless-custom-domains[Configuring a custom domain for a Knative service] and have a working `DomainMapping` CR.
|
||||
|
||||
* You have a TLS certificate from your Certificate Authority provider, or a self-signed certificate.
|
||||
|
||||
* You have obtained the `cert` and `key` files from your Certificate Authority provider, or a self-signed certificate.
|
||||
|
||||
.Procedure
|
||||
|
||||
. Create a Kubernetes TLS secret:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc create secret tls <tls_secret_name> --cert=<path_to_certificate_file> --key=<path_to_key_file>
|
||||
----
|
||||
|
||||
. Update the `DomainMapping` CR to use the TLS secret you have created:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: serving.knative.dev/v1alpha1
|
||||
kind: DomainMapping
|
||||
metadata:
|
||||
name: <domain_name>
|
||||
namespace: <namespace>
|
||||
spec:
|
||||
ref:
|
||||
name: <service_name>
|
||||
kind: Service
|
||||
apiVersion: serving.knative.dev/v1
|
||||
# TLS block specifies the secret to be used
|
||||
tls:
|
||||
secretName: <tls_secret_name>
|
||||
----
|
||||
|
||||
.Verification
|
||||
|
||||
. Verify that the `DomainMapping` CR status is `True`, and that the `URL` column of the output shows the mapped domain with the scheme `https`:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ oc get domainmapping <domain_name>
|
||||
----
|
||||
+
|
||||
.Example output
|
||||
[source,terminal]
|
||||
----
|
||||
NAME URL READY REASON
|
||||
example.com https://example.com True
|
||||
----
|
||||
|
||||
. Optional: If the service is exposed publicly, verify that it is available by running the following command:
|
||||
+
|
||||
[source,terminal]
|
||||
----
|
||||
$ curl https://<domain_name>
|
||||
----
|
||||
+
|
||||
If the certificate is self-signed, skip verification by adding the `-k` flag to the `curl` command.
|
||||
include::modules/serverless-domain-mapping-custom-tls-cert.adoc[leveloffset=+1]
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
[id="ossm-config-v2x"]
|
||||
= Configuring Service Mesh
|
||||
include::modules/ossm-document-attributes.adoc[]
|
||||
:context: ossm-config-v2x
|
||||
|
||||
toc::[]
|
||||
|
||||
After you create a `ServiceMeshControlPlane` resource, configure the resource to suit your environment and requirements.
|
||||
|
||||
This guide references the Bookinfo sample application to provide examples of security in a sample application. Install the xref:../../service_mesh/v2x/prepare-to-deploy-applications-ossm.adoc#ossm-tutorial-bookinfo-overview_deploying-applications-ossm[Bookinfo application] to learn how these routing examples work.
|
||||
|
||||
include::modules/ossm-config-security.adoc[leveloffset=+1]
|
||||
|
||||
include::modules/ossm-security-mtls.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ossm-config-sec-mtls-mesh.adoc[leveloffset=+3]
|
||||
|
||||
include::modules/ossm-config-sidecar-mtls.adoc[leveloffset=+3]
|
||||
|
||||
include::modules/ossm-config-sidecar-out-mtls.adoc[leveloffset=+3]
|
||||
|
||||
include::modules/ossm-config-mtls-min-max.adoc[leveloffset=+3]
|
||||
|
||||
include::modules/ossm-security-auth-policy.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ossm-security-cipher.adoc[leveloffset=+2]
|
||||
|
||||
include::modules/ossm-security-cert-manage.adoc[leveloffset=+2]
|
||||
|
||||
|
||||
@@ -34,5 +34,8 @@ OpenShift routes for Istio Gateways are automatically managed in {ProductShortNa
|
||||
{ProductName} creates the route with the subdomain, but {product-title} must be configured to enable it. Subdomains, for example `*.domain.com`, are supported but not by default. Configure an {product-title} wildcard policy before configuring a wildcard host Gateway. For more information, see xref:../../networking/ingress-operator.adoc#using-wildcard-routes_configuring-ingress[Using wildcard routes].
|
||||
|
||||
include::modules/ossm-auto-route.adoc[leveloffset=+2]
|
||||
include::modules/ossm-auto-route-annotations.adoc[leveloffset=+2]
|
||||
include::modules/ossm-auto-route-enable.adoc[leveloffset=+2]
|
||||
|
||||
|
||||
include::modules/ossm-routing-sc.adoc[leveloffset=+2]
|
||||
|
||||
Reference in New Issue
Block a user