1
0
mirror of https://github.com/openshift/openshift-docs.git synced 2026-02-05 21:46:22 +01:00

Merge pull request #15130 from mburke5678/remove-product-title

Removing in {product-title} from titles in nodes and logging
This commit is contained in:
Michael Burke
2019-06-03 16:46:54 -04:00
committed by GitHub
49 changed files with 49 additions and 49 deletions

View File

@@ -1,6 +1,6 @@
:context: efk-logging-elasticsearch
[id="efk-logging-elasticsearch"]
= Configuring Elasticsearch in {product-title}
= Configuring Elasticsearch
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: efk-logging-fluentd
[id="efk-logging-fluentd"]
= Configuring Fluentd in {product-title}
= Configuring Fluentd
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: efk-logging-kibana
[id="efk-logging-kibana"]
= Configuring Kibana in {product-title}
= Configuring Kibana
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: efk-logging-systemd
[id="efk-logging-systemd"]
= Configuring systemd-journald and rsyslog in {product-title}
= Configuring systemd-journald and rsyslog
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: efk-logging-curator
[id="efk-logging-eventrouter"]
= Working with Event Router in {product-title}
= Working with Event Router
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: efk-logging-troubleshooting
[id="efk-logging-troubleshooting"]
= Troubleshooting Kibana in {product-title}
= Troubleshooting Kibana
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -3,7 +3,7 @@
// * logging/efk-logging.adoc
[id="efk-logging-about-curator_{context}"]
= About Curator in {product-title}
= About Curator
The Elasticsearch Curator tool performs scheduled maintenance operations on a global and/or on a per-project basis. Curator performs actions daily based on its configuration. Only one Curator Pod is
recommended per Elasticsearch cluster.

View File

@@ -3,7 +3,7 @@
// * logging/efk-logging.adoc
[id="efk-logging-about-elasticsearch_{context}"]
= About Elasticsearch in {product-title}
= About Elasticsearch
{product-title} uses link:https://www.elastic.co/products/elasticsearch[Elasticsearch (ES)] to organize the log data from Fluentd into datastores, or _indices_.

View File

@@ -3,7 +3,7 @@
// * logging/efk-logging.adoc
[id="efk-logging-about-eventrouter_{context}"]
= About Event Router in {product-title}
= About Event Router
The Event Router is a pod that forwards {product-title} events to cluster logging.
You must manually deploy Event Router.

View File

@@ -3,7 +3,7 @@
// * logging/efk-logging.adoc
[id="efk-logging-about-fluentd_{context}"]
= About Fluentd in {product-title}
= About Fluentd
{product-title} uses Fluentd to collect data about your cluster.

View File

@@ -3,7 +3,7 @@
// * logging/efk-logging.adoc
[id="efk-logging-about-kibana_{context}"]
= About Kibana in {product-title}
= About Kibana
{product-title} uses Kibana to display the log data collected by Fluentd and indexed by Elasticsearch.

View File

@@ -3,7 +3,7 @@
// * logging/efk-logging.adoc
[id="efk-logging-about_{context}"]
= About cluster logging in {product-title}
= About cluster logging
As an {product-title} cluster administrator, you can deploy cluster logging to
aggregate logs for a range of {product-title} services.

View File

@@ -3,7 +3,7 @@
// * logging/efk-logging-uninstall.adoc
[id="efk-logging-uninstall-efk-ops_{context}"]
= Uninstall the infra cluster in {product-title}
= Uninstall the infra cluster
You can uninstall the infra cluster from the {product-title} cluster logging.
After uninstalling, Fluentd no longer splits logs.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-cluster-overcommit.adoc
[id="nodes-cluster-overcommit-about_{context}"]
= Understanding overcommitment in {product-title}
= Understanding overcommitment
Requests and limits enable administrators to allow and manage the overcommitment of resources on a node. The scheduler uses requests for scheduling your container and providing a minimum service guarantee. Limits constrain the amount of compute resource that may be consumed on your node.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-cluster-overcommit.adoc
[id="resource-requests_{context}"]
= Understanding resource requests and overcommitment in {product-title}
= Understanding resource requests and overcommitment
For each compute resource, a container may specify a resource request and limit.
Scheduling decisions are made based on the request to ensure that a node has

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-cluster-resource-configure.adoc
[id="nodes-cluster-resource-configure-oom_{context}"]
= Understanding OOM kill policy in {product-title}
= Understanding OOM kill policy
{product-title} may kill a process in a container if the total memory usage of
all the processes in the container exceeds the memory limit, or in serious cases

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-containers-copying-files.adoc
[id="nodes-containers-copying-files-rsync_{context}"]
= Using advanced Rsync features in {product-title}
= Using advanced Rsync features
The `oc rsync` command exposes fewer command line options than standard `rsync`.
In the case that you wish to use a standard `rsync` command line option which is

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-containers-events.adoc
[id="nodes-containers-events-about_{context}"]
= Understanding events in {product-title}
= Understanding events
Events allow {product-title} to record
information about real-world events in a resource-agnostic manner. They also

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-containers-using.adoc
[id="nodes-containers-using-about_{context}"]
= Understanding Containers in {product-title}
= Understanding Containers
The basic units of {product-title} applications are called _containers_.
link:https://access.redhat.com/articles/1353593[Linux container technologies]

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-nodes-jobs.adoc
[id="nodes-nodes-jobs-about_{context}"]
= Understanding jobs and CronJobs in {product-title}
= Understanding jobs and CronJobs
A job tracks the overall progress of a task and updates its status with information
about active, succeeded, and failed pods. Deleting a job will clean up any pods it created.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-nodes-rebooting.adoc
[id="nodes-nodes-rebooting-infrastructure_{context}"]
= Understanding infrastructure node rebooting in {product-title}
= Understanding infrastructure node rebooting
Infrastructure nodes are nodes that are labeled to run pieces of the
{product-title} environment. Currently, the easiest way to manage node reboots

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-pods-using.adoc
[id="nodes-pods-about_{context}"]
= About pods in {product-title}
= About pods
{product-title} leverages the Kubernetes concept of a _pod_, which is one or more containers deployed
together on one host, and the smallest compute unit that can be defined,

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-pods-priority.adoc
[id="nodes-pods-priority-about_{context}"]
= Understanding pod priority in {product-title}
= Understanding pod priority
When you use the Pod Priority and Preemption feature, the scheduler orders pending pods by their priority, and a pending pod is placed ahead of other pending pods with lower priority in the scheduling queue. As a result, the higher priority pod might be scheduled sooner than pods with lower priority if its scheduling requirements are met. If a pod cannot be scheduled, scheduler continues to schedule other lower priority pods.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-pods-priority.adoc
[id="nodes-pods-priority-preempt-about_{context}"]
= Understanding pod preemption in {product-title}
= Understanding pod preemption
When a developer creates a pod, the pod goes into a queue. If the developer configured the pod for pod priority or preemption, the scheduler picks a pod from the queue and tries to schedule the pod on a node. If the scheduler cannot find space on an appropriate node that satisfies all the specified requirements of the pod, preemption logic is triggered for the pending pod.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-pods-using.adoc
[id="nodes-pods-using-about_{context}"]
= Understanding pods in {product-title}
= Understanding pods
Pods are the rough equivalent of a machine instance (physical or virtual) to a Container. Each pod is allocated its own internal IP address, therefore owning its entire port space, and Containers within pods can share their local storage and networking.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-pods-using.adoc
[id="nodes-pods-using-example_{context}"]
= Example pod configurations in {product-title}
= Example pod configurations
{product-title} leverages the Kubernetes concept of a _pod_, which is one or more Containers deployed
together on one host, and the smallest compute unit that can be defined,

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-default.adoc
[id="nodes-scheduler-default-about_{context}"]
= Understanding default scheduling in {product-title}
= Understanding default scheduling
The existing generic scheduler is the default platform-provided scheduler
_engine_ that selects a node to host the pod in a three-step operation:

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-node-names.adoc
[id="nodes-scheduler-node-name-configuring_{context}"]
= Configuring the Pod Node Constraints admission controller to use names in {product-title}
= Configuring the Pod Node Constraints admission controller to use names
You can configure the Pod Node Constraints admission controller to ensure that pods are only placed onto nodes with a specific name.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-node-projects.adoc
[id="nodes-scheduler-node-projects-about_{context}"]
= Understanding how to constrain pods by project name in {product-title}
= Understanding how to constrain pods by project name
The Pod Node Selector admission controller determines where a pod can be placed using labels on projects and node selectors specified in pods. A new pod will be placed on a node associated with a project only if the node selectors in the pod match the labels in the project.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-node-projects.adoc
[id="nodes-scheduler-node-projects-configuring_{context}"]
= Configuring the Pod Node Selector admission controller to use projects in {product-title}
= Configuring the Pod Node Selector admission controller to use projects
You can configure the Pod Node Selector admission controller to ensure that pods are only placed onto nodes in specific projects.
The Pod Node Selector admission controller uses a configuration file to set options for the behavior of the backend.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-node-selector.adoc
[id="nodes-scheduler-node-selector-about_{context}"]
= Understanding node selectors in {product-title}
= Understanding node selectors
Using _node selectors_, you can ensure that pods are only placed onto nodes with specific labels. As a cluster administrator, you can
use the Pod Node Constraints admission controller to set a policy that prevents users without the *pods/binding* permission

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-node-selector.adoc
[id="nodes-scheduler-node-selectors-configuring_{context}"]
= Configuring the Pod Node Constraints admission controller to use node selectors in {product-title}
= Configuring the Pod Node Constraints admission controller to use node selectors
You can configure the Pod Node Constraints admission controller to ensure that pods are only placed onto nodes with specific labels.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-pod-affinity.adoc
[id="nodes-scheduler-pod-affinity-about_{context}"]
= Understanding pod affinity in {product-title}
= Understanding pod affinity
_Pod affinity_ and _pod anti-affinity_ allow you to constrain which nodes your pod is eligible to be scheduled on based on the key/value labels on other pods.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-taints-tolerations.adoc
[id="nodes-scheduler-taints-tolerations-about_{context}"]
= Understanding taints and tolerations in {product-title}
= Understanding taints and tolerations
A _taint_ allows a node to refuse pod to be scheduled unless that pod has a matching _toleration_.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-taints-tolerations.adoc
[id="nodes-scheduler-taints-tolerations-adding_{context}"]
= Adding taints and tolerations in {product-title}
= Adding taints and tolerations
You add taints to nodes and tolerations to pods allow the node to control which pods should (or should not) be scheduled on them.

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-taints-tolerations.adoc
[id="nodes-scheduler-taints-tolerations-examples_{context}"]
= Example taint and toleration scenarios in {product-title}
= Example taint and toleration scenarios
Taints and tolerations are a flexible way to steer pods away from nodes or evict pods that should not be running on a node. A few of typical scenarios are:

View File

@@ -3,7 +3,7 @@
// * nodes/nodes-scheduler-taints-tolerations.adoc
[id="nodes-scheduler-taints-tolerations-seconds_{context}"]
= Setting a default value for toleration seconds in {product-title}
= Setting a default value for toleration seconds
When using taints and tolerations, if taints are added to an existing node, non-matching pods on that node will be evicted. You can modify the time allowed before pods are evicted using the toleration seconds plug-in, which sets the eviction period at five minutes, by default.

View File

@@ -1,6 +1,6 @@
:context: nodes-cluster-overcommit
[id="nodes-cluster-overcommit"]
= Configuring your cluster to place pods on overcommited nodes in {product-title}
= Configuring your cluster to place pods on overcommited nodes
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: nodes-cluster-resource-configure
[id="nodes-cluster-resource-configure"]
= Configuring cluster memory to meet container memory and risk requirements in {product-title}
= Configuring cluster memory to meet container memory and risk requirements
include::modules/common-attributes.adoc[]

View File

@@ -1,6 +1,6 @@
:context: nodes-containers-using
[id="nodes-containers-using"]
= Understanding Containers in {product-title}
= Understanding Containers
include::modules/common-attributes.adoc[]

View File

@@ -1,7 +1,7 @@
:context: nodes-nodes-jobs
[id="nodes-nodes-jobs"]
= Running tasks in pods using jobs in {product-title}
= Running tasks in pods using jobs
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: nodes-pods-daemonsets
[id="nodes-pods-daemonsets"]
= Running background tasks on nodes automatically with daemonsets in {product-title}
= Running background tasks on nodes automatically with daemonsets
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,7 +1,7 @@
:context: nodes-nodes-rebooting
[id="nodes-nodes-rebooting"]
= Understanding node rebooting in {product-title}
= Understanding node rebooting
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: nodes-pods-device
[id="nodes-pods-device"]
= Using device plug-ins to access external resouces with pods in {product-title}
= Using device plug-ins to access external resouces with pods
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: nodes-pods-priority
[id="nodes-pods-priority"]
= Including pod priority in pod scheduling decisions in {product-title}
= Including pod priority in pod scheduling decisions
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: nodes-pods-secrets
[id="nodes-pods-secrets"]
= Providing sensitive data to pods in {product-title}
= Providing sensitive data to pods
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: nodes-pods-using-ssy
[id="nodes-pods-using-pp"]
= Using pods in {product-title}
= Using pods
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: nodes-pods-viewing
[id="nodes-pods-viewing"]
= Viewing pods in {product-title}
= Viewing pods
include::modules/common-attributes.adoc[]
toc::[]

View File

@@ -1,6 +1,6 @@
:context: nodes-scheduler-node-project
[id="nodes-scheduler-node-project"]
= Placing a pod in a specific project in {product-title}
= Placing a pod in a specific project
include::modules/common-attributes.adoc[]
:relfileprefix: ../