mirror of
https://github.com/openshift/openshift-docs.git
synced 2026-02-05 12:46:18 +01:00
TELCODOCS-1506 Telco CORE Reference Design Specification
ref config updates Adding generated YAML + modules from the RDS Reorganizing TOC Adding latest RDS updates RDS doc updates RDS YAML updates Shanes review latest updates + new components overview diagram David J's review comments CCS attributes update Ian's review comments adding Hari's deviations update updating 4.14 link URLs Generalizes scope and deviation topics for RAN and Core updates for RDS prod version update deviations wording Updating deviation and scope topics consolidate dev and scope topics + intro Adding link to ztp-site-generate procedure typo final changes for RDS typos Ian's comments update for RDS terminology remove core CRs note add GH core CRs link
This commit is contained in:
@@ -7,21 +7,24 @@
|
||||
[Mm]idhaul
|
||||
[Pp]assthrough
|
||||
[Pp]ostinstall
|
||||
[Pp]recaching
|
||||
[Pp]reinstall
|
||||
[Rr]ealtime
|
||||
[Tt]elco
|
||||
Assisted Installer
|
||||
Control Plane Machine Set Operator
|
||||
custom resources?
|
||||
GHz
|
||||
gpsd
|
||||
gpspipe
|
||||
hyperthreads?
|
||||
KPIs?
|
||||
linuxptp
|
||||
Mbps
|
||||
MBps
|
||||
Mellanox
|
||||
MetalLB
|
||||
NICs?
|
||||
Operator
|
||||
Operators
|
||||
Operators?
|
||||
pmc
|
||||
ubxtool
|
||||
|
||||
@@ -128,6 +128,16 @@ endif::[]
|
||||
:TempoShortName: distributed tracing platform (Tempo)
|
||||
:TempoOperator: Tempo Operator
|
||||
:TempoVersion: 2.3.0
|
||||
//telco
|
||||
ifdef::telco-ran[]
|
||||
:rds: telco RAN DU
|
||||
:rds-caps: Telco RAN DU
|
||||
:rds-first: Telco RAN distributed unit (DU)
|
||||
endif::[]
|
||||
ifdef::telco-core[]
|
||||
:rds: telco core
|
||||
:rds-caps: Telco core
|
||||
endif::[]
|
||||
//logging
|
||||
:logging-title: logging subsystem for Red Hat OpenShift
|
||||
:logging-title-uc: Logging subsystem for Red Hat OpenShift
|
||||
|
||||
@@ -45,6 +45,19 @@ openshift-rosa-portal:
|
||||
enterprise-4.13:
|
||||
name: ''
|
||||
dir: rosa-portal/
|
||||
openshift-telco:
|
||||
name: OpenShift Container Platform
|
||||
author: OpenShift Documentation Project <openshift-docs@redhat.com>
|
||||
site: commercial
|
||||
site_name: Documentation
|
||||
site_url: https://docs.openshift.com/
|
||||
branches:
|
||||
enterprise-4.14:
|
||||
name: '4.14'
|
||||
dir: container-platform-telco/4.14
|
||||
# enterprise-4.15:
|
||||
# name: '4.15'
|
||||
# dir: container-platform-telco/4.15
|
||||
microshift:
|
||||
name: Red Hat build of MicroShift
|
||||
author: OpenShift Documentation Project <openshift-docs@redhat.com>
|
||||
|
||||
@@ -2874,10 +2874,32 @@ Name: Reference design specifications
|
||||
Dir: telco_ref_design_specs
|
||||
Distros: openshift-telco
|
||||
Topics:
|
||||
- Name: Telco RAN reference design specification
|
||||
File: ztp-ran-reference-design
|
||||
- Name: Telco reference design specifications
|
||||
File: telco-ref-design-specs-overview
|
||||
- Name: Telco RAN DU reference design specification
|
||||
Dir: ran
|
||||
Topics:
|
||||
- Name: Telco RAN DU reference design overview
|
||||
File: telco-ran-ref-design-spec
|
||||
- Name: Telco RAN DU use model overview
|
||||
File: telco-ran-du-overview
|
||||
- Name: RAN DU reference design components
|
||||
File: telco-ran-ref-du-components
|
||||
- Name: RAN DU reference design configuration CRs
|
||||
File: telco-ran-ref-du-crs
|
||||
- Name: Telco RAN DU software specifications
|
||||
File: telco-ran-ref-software-artifacts
|
||||
- Name: Telco core reference design specification
|
||||
File: cnf-core-reference-design
|
||||
Dir: core
|
||||
Topics:
|
||||
- Name: Telco core reference design overview
|
||||
File: telco-core-rds-overview
|
||||
- Name: Telco core use model overview
|
||||
File: telco-core-rds-use-cases
|
||||
- Name: Core reference design components
|
||||
File: telco-core-ref-design-components
|
||||
- Name: Core reference design configuration CRs
|
||||
File: telco-core-ref-crs
|
||||
---
|
||||
Name: Specialized hardware and driver enablement
|
||||
Dir: hardware_enablement
|
||||
|
||||
BIN
images/473_OpenShift_Telco_Core_Reference_arch_1123.png
Normal file
BIN
images/473_OpenShift_Telco_Core_Reference_arch_1123.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 93 KiB |
@@ -78,7 +78,7 @@ metadata:
|
||||
name: numaresourcesscheduler
|
||||
spec:
|
||||
imageSpec: "registry.redhat.io/openshift4/noderesourcetopology-scheduler-container-rhel8:v{product-version}"
|
||||
cacheResyncPeriod: "5s" <1>
|
||||
cacheResyncPeriod: "5s" <1>
|
||||
----
|
||||
<1> Enter an interval value in seconds for synchronization of the scheduler cache. A value of `5s` is typical for most implementations.
|
||||
+
|
||||
|
||||
@@ -39,7 +39,7 @@ registry.redhat.io/openshift4/cnf-tests-rhel8:v{product-version} \
|
||||
/usr/bin/test-run.sh -ginkgo.v -ginkgo.focus="cyclictest"
|
||||
----
|
||||
+
|
||||
The command runs the `cyclictest` tool for 10 minutes (600 seconds). The test runs successfully when the maximum observed latency is lower than `MAXIMUM_LATENCY` (in this example, 20 μs). Latency spikes of 20 μs and above are generally not acceptable for telco RAN workloads.
|
||||
The command runs the `cyclictest` tool for 10 minutes (600 seconds). The test runs successfully when the maximum observed latency is lower than `MAXIMUM_LATENCY` (in this example, 20 μs). Latency spikes of 20 μs and above are generally not acceptable for {rds} workloads.
|
||||
+
|
||||
If the results exceed the latency threshold, the test fails.
|
||||
+
|
||||
|
||||
56
modules/telco-core-414-whats-new-ref-design.adoc
Normal file
56
modules/telco-core-414-whats-new-ref-design.adoc
Normal file
@@ -0,0 +1,56 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
[id="telco-core-whats-new-ref-design_{context}""]
|
||||
= {product-title} {product-version} features for {rds}
|
||||
|
||||
The following features that are included in {product-title} {product-version} and are leveraged by the {rds} reference design specification (RDS) have been added or updated.
|
||||
|
||||
.New features for {rds} in {product-title} {product-version}
|
||||
[cols="1,3", options="header"]
|
||||
|====
|
||||
|Feature
|
||||
|Description
|
||||
|
||||
//CNF-7349 Rootless DPDK pods
|
||||
|Support for running rootless Data Plane Development Kit (DPDK) workloads with kernel access by using the TAP CNI plugin
|
||||
a|DPDK applications that inject traffic into the kernel can run in non-privileged pods with the help of the TAP CNI plugin.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/networking/hardware_networks/using-dpdk-and-rdma.html#nw-running-dpdk-rootless-tap_using-dpdk-and-rdma[Using the TAP CNI to run a rootless DPDK workload with kernel access]
|
||||
|
||||
//CNF-5977 Better pinning of the networking stack
|
||||
|Dynamic use of non-reserved CPUs for OVS
|
||||
a|With this release, the Open vSwitch (OVS) networking stack can dynamically use non-reserved CPUs.
|
||||
The dynamic use of non-reserved CPUs occurs by default in performance-tuned clusters with a CPU manager policy set to `static`.
|
||||
The dynamic use of available, non-reserved CPUs maximizes compute resources for OVS and minimizes network latency for workloads during periods of high demand.
|
||||
OVS cannot use isolated CPUs assigned to containers in `Guaranteed` QoS pods. This separation avoids disruption to critical application workloads.
|
||||
|
||||
//CNF-7760
|
||||
|Enabling more control over the C-states for each pod
|
||||
a|The `PerformanceProfile` supports `perPodPowerManagement` which provides more control over the C-states for pods. Now, instead of disabling C-states completely, you can specify a maximum latency in microseconds for C-states. You configure this option in the `cpu-c-states.crio.io` annotation, which helps to optimize power savings for high-priority applications by enabling some of the shallower C-states instead of disabling them completely.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/scalability_and_performance/cnf-low-latency-tuning.html#node-tuning-operator-pod-power-saving-config_cnf-master[Optional: Power saving configurations]
|
||||
|
||||
//CNF-7741 Permit to disable NUMA Aware scheduling hints based on SR-IOV VFs
|
||||
|Exclude SR-IOV network topology for NUMA-aware scheduling
|
||||
a|You can exclude advertising Non-Uniform Memory Access (NUMA) nodes for the SR-IOV network to the Topology Manager. By not advertising NUMA nodes for the SR-IOV network, you can permit more flexible SR-IOV network deployments during NUMA-aware pod scheduling.
|
||||
|
||||
For example, in some scenarios, you want flexibility for how a pod is deployed. By not providing a NUMA node hint to the Topology Manager for the pod's SR-IOV network resource, the Topology Manager can deploy the SR-IOV network resource and the pod CPU and memory resources to different NUMA nodes. In previous {product-title} releases, the Topology Manager attempted to place all resources on the same NUMA node.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/networking/hardware_networks/configuring-sriov-device.html#nw-sriov-exclude-topology-manager_configuring-sriov-device[Exclude the SR-IOV network topology for NUMA-aware scheduling]
|
||||
|
||||
//CNF-8035 MetalLB VRF Egress interface selection with VRFs (Tech Preview)
|
||||
|Egress service resource to manage egress traffic for pods behind a load balancer (Technology Preview)
|
||||
a|With this update, you can use an `EgressService` custom resource (CR) to manage egress traffic for pods behind a load balancer service.
|
||||
|
||||
You can use the `EgressService` CR to manage egress traffic in the following ways:
|
||||
|
||||
* Assign the load balancer service's IP address as the source IP address of egress traffic for pods behind the load balancer service.
|
||||
|
||||
* Configure the egress traffic for pods behind a load balancer to a different network than the default node network.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/networking/ovn_kubernetes_network_provider/configuring-egress-traffic-for-vrf-loadbalancer-services.html#configuring-egress-traffic-loadbalancer-services[Configuring an egress service]
|
||||
|
||||
|====
|
||||
28
modules/telco-core-cluster-network-operator.adoc
Normal file
28
modules/telco-core-cluster-network-operator.adoc
Normal file
@@ -0,0 +1,28 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-cluster-network-operator_{context}"]
|
||||
= Cluster Network Operator (CNO)
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable.
|
||||
|
||||
Description::
|
||||
|
||||
The CNO deploys and manages the cluster network components including the default OVN-Kubernetes network plugin during {product-title} cluster installation. It allows configuring primary interface MTU settings, OVN gateway modes to use node routing tables for pod egress, and additional secondary networks such as MACVLAN.
|
||||
+
|
||||
In support of network traffic segregation, multiple network interfaces are configured through the CNO. Traffic steering to these interfaces is configured through static routes applied by using the NMState Operator. To ensure that pod traffic is properly routed, OVN-K is configured with the `routingViaHost` option enabled. This setting uses the kernel routing table and the applied static routes rather than OVN for pod egress traffic.
|
||||
+
|
||||
The Whereabouts CNI plugin is used to provide dynamic IPv4 and IPv6 addressing for additional pod network interfaces without the use of a DHCP server.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* OVN-Kubernetes is required for IPv6 support.
|
||||
* Large MTU cluster support requires connected network equipment to be set to the same or larger value.
|
||||
|
||||
Engineering considerations::
|
||||
* Pod egress traffic is handled by kernel routing table with the `routingViaHost` option. Appropriate static routes must be configured in the host.
|
||||
|
||||
49
modules/telco-core-cpu-partitioning-performance-tune.adoc
Normal file
49
modules/telco-core-cpu-partitioning-performance-tune.adoc
Normal file
@@ -0,0 +1,49 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-cpu-partitioning-performance-tune_{context}"]
|
||||
= CPU partitioning and performance tuning
|
||||
|
||||
New in this release::
|
||||
|
||||
Open vSwitch (OVS) is removed from CPU partitioning. OVS manages its cpuset dynamically to automatically adapt to network traffic needs. Users no longer need to reserve additional CPUs for handling high network throughput on the primary container network interface (CNI). There is no impact on the configuration needed to benefit from this change.
|
||||
|
||||
Description::
|
||||
|
||||
CPU partitioning allows for the separation of sensitive workloads from generic purposes, auxiliary processes, interrupts, and driver work queues to achieve improved performance and latency. The CPUs allocated to those auxiliary processes are referred to as `reserved` in the following sections. In hyperthreaded systems, a CPU is one hyperthread.
|
||||
+
|
||||
For more information, see https://docs.openshift.com/container-platform/latest/scalability_and_performance/cnf-low-latency-tuning.html#cnf-cpu-infra-container_cnf-master[Restricting CPUs for infra and application containers].
|
||||
+
|
||||
Configure system level performance.
|
||||
For recommended settings, see link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-du-configuring-host-firmware-requirements_sno-configure-for-vdu[Configuring host firmware for low latency and high performance].
|
||||
|
||||
Limits and requirements::
|
||||
* The operating system needs a certain amount of CPU to perform all the support tasks including kernel networking.
|
||||
** A system with just user plane networking applications (DPDK) needs at least one Core (2 hyperthreads when enabled) reserved for the operating system and the infrastructure components.
|
||||
* A system with Hyper-Threading enabled must always put all core sibling threads to the same pool of CPUs.
|
||||
* The set of reserved and isolated cores must include all CPU cores.
|
||||
* Core 0 of each NUMA node must be included in the reserved CPU set.
|
||||
* Isolated cores might be impacted by interrupts. The following annotations must be attached to the pod if guaranteed QoS pods require full use of the CPU:
|
||||
+
|
||||
----
|
||||
cpu-load-balancing.crio.io: "disable"
|
||||
cpu-quota.crio.io: "disable"
|
||||
irq-load-balancing.crio.io: "disable"
|
||||
----
|
||||
* When per-pod power management is enabled with `PerformanceProfile.workloadHints.perPodPowerManagement` the following annotations must also be attached to the pod if guaranteed QoS pods require full use of the CPU:
|
||||
+
|
||||
----
|
||||
cpu-c-states.crio.io: "disable"
|
||||
cpu-freq-governor.crio.io: "performance"
|
||||
----
|
||||
|
||||
Engineering considerations::
|
||||
* The minimum reserved capacity (`systemReserved`) required can be found by following the guidance in link:https://access.redhat.com/solutions/5843241["Which amount of CPU and memory are recommended to reserve for the system in OCP 4 nodes?"]
|
||||
* The actual required reserved CPU capacity depends on the cluster configuration and workload attributes.
|
||||
* This reserved CPU value must be rounded up to a full core (2 hyper-thread) alignment.
|
||||
* Changes to the CPU partitioning will drain and reboot the nodes in the MCP.
|
||||
* The reserved CPUs reduce the pod density, as the reserved CPUs are removed from the allocatable capacity of the OpenShift node.
|
||||
* The real-time workload hint should be enabled if the workload is real-time capable.
|
||||
* Hardware without Interrupt Request (IRQ) affinity support will impact isolated CPUs. To ensure that pods with guaranteed CPU QoS have full use of allocated CPU, all hardware in the server must support IRQ affinity.
|
||||
17
modules/telco-core-crs-machine-configuration.adoc
Normal file
17
modules/telco-core-crs-machine-configuration.adoc
Normal file
@@ -0,0 +1,17 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-du-crs.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="reference-crs_{context}"]
|
||||
= Reference CRs
|
||||
|
||||
.Resource Tuning
|
||||
[cols="3*"]
|
||||
|====
|
||||
| Component | Reference CR | Optional
|
||||
|
||||
| System Reserved capacity a| * xref:telco-core-ref-du-crs.adoc#pid-limits-cr-yaml[pid-limits-cr.yaml]
|
||||
* xref:telco-core-ref-du-crs.adoc#control-plane-system-reserved[control-plane-system-reserved.yaml] | Yes
|
||||
|
||||
|====
|
||||
30
modules/telco-core-crs-networking.adoc
Normal file
30
modules/telco-core-crs-networking.adoc
Normal file
@@ -0,0 +1,30 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="networking-crs_{context}"]
|
||||
= Networking reference CRs
|
||||
|
||||
.Networking CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
Baseline,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-network-yaml[Network.yaml],No,No
|
||||
Baseline,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-networkattachmentdefinition-yaml[networkAttachmentDefinition.yaml],Yes,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovsubscriptionns-yaml[SriovSubscriptionNS.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovsubscriptionopergroup-yaml[SriovSubscriptionOperGroup.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovsubscription-yaml[SriovSubscription.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovoperatorconfig-yaml[SriovOperatorConfig.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovnetworknodepolicy-yaml[sriovNetworkNodePolicy.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovnetwork-yaml[sriovNetwork.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-metallbns-yaml[metallbNS.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-metallbopergroup-yaml[metallbOperGroup.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-metallbsubscription-yaml[metallbSubscription.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-metallb-yaml[metallb.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-bgp-peer-yaml[bgp-peer.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-bfd-profile-yaml[bfd-profile.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-addr-pool-yaml[addr-pool.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-bgp-advr-yaml[bgp-advr.yaml],No,No
|
||||
Multus - Tap CNI for rootless DPDK pod,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-mc_rootless_pods_selinux-yaml[mc_rootless_pods_selinux.yaml],Yes,No
|
||||
|====
|
||||
26
modules/telco-core-crs-other.adoc
Normal file
26
modules/telco-core-crs-other.adoc
Normal file
@@ -0,0 +1,26 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="other-crs_{context}"]
|
||||
= Other reference CRs
|
||||
|
||||
.Other CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
Disconnected configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-catalog-source-yaml[catalog-source.yaml],No,No
|
||||
Disconnected configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-icsp-yaml[icsp.yaml],No,No
|
||||
Disconnected configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-operator-hub-yaml[operator-hub.yaml],No,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogns-yaml[ClusterLogNS.yaml],Yes,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogopergroup-yaml[ClusterLogOperGroup.yaml],Yes,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogsubscription-yaml[ClusterLogSubscription.yaml],Yes,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogforwarder-yaml[ClusterLogForwarder.yaml],Yes,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogging-yaml[ClusterLogging.yaml],Yes,No
|
||||
Additional kernel modules,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-control-plane-load-kernel-modules-yaml[control-plane-load-kernel-modules.yaml],Yes,No
|
||||
Additional kernel modules,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-worker-load-kernel-modules-yaml[worker-load-kernel-modules.yaml],Yes,No
|
||||
Additional kernel modules,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sctp_module_mc-yaml[sctp_module_mc.yaml],Yes,No
|
||||
Power management,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-performanceprofile-yaml[PerformanceProfile.yaml],No,No
|
||||
Monitoring and observability,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-monitoring-config-cm-yaml[monitoring-config-cm.yaml],Yes,No
|
||||
|====
|
||||
15
modules/telco-core-crs-resource-tuning.adoc
Normal file
15
modules/telco-core-crs-resource-tuning.adoc
Normal file
@@ -0,0 +1,15 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="resource-tuning-crs_{context}"]
|
||||
= Resource Tuning reference CRs
|
||||
|
||||
.Resource Tuning CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
System reserved capacity,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-pid-limits-cr-yaml[pid-limits-cr.yaml],Yes,No
|
||||
System reserved capacity,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-control-plane-system-reserved-yaml[control-plane-system-reserved.yaml],Yes,No
|
||||
|====
|
||||
18
modules/telco-core-crs-scheduling.adoc
Normal file
18
modules/telco-core-crs-scheduling.adoc
Normal file
@@ -0,0 +1,18 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="scheduling-crs_{context}"]
|
||||
= Scheduling reference CRs
|
||||
|
||||
.Scheduling CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nropsubscriptionns-yaml[NROPSubscriptionNS.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nropsubscriptionopergroup-yaml[NROPSubscriptionOperGroup.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nropsubscription-yaml[NROPSubscription.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sched-yaml[sched.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nrop-yaml[nrop.yaml],No,No
|
||||
|====
|
||||
18
modules/telco-core-crs-storage.adoc
Normal file
18
modules/telco-core-crs-storage.adoc
Normal file
@@ -0,0 +1,18 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="storage-crs_{context}"]
|
||||
= Storage reference CRs
|
||||
|
||||
.Storage CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-odfns-yaml[odfNS.yaml],No,No
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-odfopergroup-yaml[odfOperGroup.yaml],No,No
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-odfsubscription-yaml[odfSubscription.yaml],No,No
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-01-rook-ceph-external-cluster-details.secret-yaml[01-rook-ceph-external-cluster-details.secret.yaml],No,No
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-02-ocs-external-storagecluster-yaml[02-ocs-external-storagecluster.yaml],No,No
|
||||
|====
|
||||
61
modules/telco-core-hardware-platform-specifications.adoc
Normal file
61
modules/telco-core-hardware-platform-specifications.adoc
Normal file
@@ -0,0 +1,61 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-validation-artifacts.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-hardware-platform-specifications_{context}"]
|
||||
= Hardware platform specifications
|
||||
|
||||
The telco core DU reference configuration is validated with the following hardware:
|
||||
|
||||
.Validated {sno} DU cluster hardware
|
||||
[cols="1,3", width="90%", options="header"]
|
||||
|====
|
||||
|Server
|
||||
|Specifications
|
||||
|
||||
|Dell PowerEdge R640
|
||||
a|* 192G RAM
|
||||
* 64 cores
|
||||
|
||||
|HP ProLiant e910
|
||||
a|* 96G RAM
|
||||
* 48 cores
|
||||
|====
|
||||
|
||||
.Validated CNF compute cluster hardware
|
||||
[cols="1,3", width="90%", options="header"]
|
||||
|====
|
||||
|Server
|
||||
|Specifications
|
||||
|
||||
|Dell R640/750 (2-sockets)
|
||||
a|* 2 x Intel Xeon Gold 6248 CPUs
|
||||
* 196 to 256GB RAM
|
||||
* 2.5 to 3 TB HD
|
||||
|====
|
||||
|
||||
.Validated NICs
|
||||
[cols="1,3", width="90%", options="header"]
|
||||
|====
|
||||
|Network interface
|
||||
|Description
|
||||
|
||||
|Intel X722
|
||||
a|* 10/1 GbE backplane
|
||||
|
||||
|Mellanox ConnectX-4 Lx
|
||||
a|* 2 ports
|
||||
* 25 GbE
|
||||
* PCIe 3.0
|
||||
|====
|
||||
|
||||
.Validated Storage
|
||||
[cols="1,3", width="90%", options="header"]
|
||||
|====
|
||||
|Device
|
||||
|Description
|
||||
|
||||
|NVME drive
|
||||
a|
|
||||
|====
|
||||
26
modules/telco-core-installation.adoc
Normal file
26
modules/telco-core-installation.adoc
Normal file
@@ -0,0 +1,26 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-installation_{context}"]
|
||||
= Installation
|
||||
|
||||
New in this release::
|
||||
|
||||
|
||||
Description::
|
||||
|
||||
{rds-caps} clusters can be installed using the Agent Based Installer (ABI). This method allows users to install {product-title} on bare metal servers without requiring additional servers or VMs for managing the installation. The ABI installer can be run on any system for example a laptop to generate an ISO installation image. This ISO is used as the installation media for the cluster supervisor nodes. Progress can be monitored using the ABI tool from any system with network connectivity to the supervisor node’s API interfaces.
|
||||
|
||||
* Installation from declarative CRs
|
||||
* Does not require additional servers to support installation
|
||||
* Supports install in disconnected environment
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* Disconnected installation requires a reachable registry with all required content mirrored.
|
||||
|
||||
Engineering considerations::
|
||||
|
||||
* Networking configuration should be applied as NMState configuration during installation in preference to day-2 configuration by using the NMState Operator.
|
||||
44
modules/telco-core-kernel.adoc
Normal file
44
modules/telco-core-kernel.adoc
Normal file
@@ -0,0 +1,44 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/core/telco-core-ref-design-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-kernel_{context}"]
|
||||
= Kernel
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable
|
||||
|
||||
Description::
|
||||
|
||||
The user can install the following kernel modules by using `MachineConfig` to provide extended kernel functionality to CNFs:
|
||||
|
||||
* sctp
|
||||
* ip_gre
|
||||
* ip6_tables
|
||||
* ip6t_REJECT
|
||||
* ip6table_filter
|
||||
* ip6table_mangle
|
||||
* iptable_filter
|
||||
* iptable_mangle
|
||||
* iptable_nat
|
||||
* xt_multiport
|
||||
* xt_owner
|
||||
* xt_REDIRECT
|
||||
* xt_statistic
|
||||
* xt_TCPMSS
|
||||
* xt_u32
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* Use of functionality available through these kernel modules must be analyzed by the user to determine the impact on CPU load, system performance, and ability to sustain KPI.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
Out of tree drivers are not supported.
|
||||
====
|
||||
|
||||
Engineering considerations::
|
||||
|
||||
Not applicable
|
||||
29
modules/telco-core-load-balancer.adoc
Normal file
29
modules/telco-core-load-balancer.adoc
Normal file
@@ -0,0 +1,29 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-load-balancer_{context}"]
|
||||
= Load Balancer
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable.
|
||||
|
||||
Description::
|
||||
|
||||
MetalLB is a load-balancer implementation for bare metal Kubernetes clusters using standard routing protocols. It enables a Kubernetes service to get an external IP address which is also added to the host network for the cluster.
|
||||
+
|
||||
Some use cases might require features not available in MetalLB, for example stateful load balancing. Where necessary, you can use an external third party load balancer. Selection and configuration of an external load balancer is outside the scope of this specification. When an external third party load balancer is used, the integration effort must include enough analysis to ensure all performance and resource utilization requirements are met.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* Stateful load balancing is not supported by MetalLB. An alternate load balancer implementation must be used if this is a requirement for workload CNFs.
|
||||
* The networking infrastructure must ensure that the external IP address is routable from clients to the host network for the cluster.
|
||||
|
||||
Engineering considerations::
|
||||
* MetalLB is used in BGP mode only for core use case models.
|
||||
* For core use models, MetalLB is supported with only the OVN-Kubernetes network provider used in local gateway mode. See `routingViaHost` in the "Cluster Network Operator" section.
|
||||
* BGP configuration in MetalLB varies depending on the requirements of the network and peers.
|
||||
* Address pools can be configured as needed, allowing variation in addresses, aggregation length, auto assignment, and other relevant parameters.
|
||||
* The values of parameters in the Bi-Directional Forwarding Detection (BFD) profile should remain close to the defaults. Shorter values might lead to false negatives and impact performance.
|
||||
23
modules/telco-core-logging.adoc
Normal file
23
modules/telco-core-logging.adoc
Normal file
@@ -0,0 +1,23 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-logging_{context}"]
|
||||
= Logging
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable
|
||||
|
||||
Description::
|
||||
|
||||
The ClusterLogging Operator enables collection and shipping of logs off the node for remote archival and analysis. The reference configuration ships audit and infrastructure logs to a remote archive by using Kafka.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
Not applicable
|
||||
|
||||
Engineering considerations::
|
||||
* The impact of cluster CPU use is based on the number or size of logs generated and the amount of log filtering configured.
|
||||
* The reference configuration does not include shipping of application logs. Inclusion of application logs in the configuration requires evaluation of the application logging rate and sufficient additional CPU resources allocated to the reserved set.
|
||||
36
modules/telco-core-monitoring.adoc
Normal file
36
modules/telco-core-monitoring.adoc
Normal file
@@ -0,0 +1,36 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-monitoring_{context}"]
|
||||
= Monitoring
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable
|
||||
|
||||
Description::
|
||||
|
||||
The Cluster Monitoring Operator is included by default on all OpenShift clusters and provides monitoring (metrics, dashboards, and alerting) for the platform components and optionally user projects as well.
|
||||
+
|
||||
Configuration of the monitoring operator allows for customization, including:
|
||||
+
|
||||
--
|
||||
- Default retention period
|
||||
- Custom alert rules
|
||||
--
|
||||
The default handling of pod CPU and memory metrics is based on upstream Kubernetes `cAdvisor` and makes a tradeoff that prefers handling of stale data over metric accuracy. This leads to spiky data that will create false triggers of alerts over user-specified thresholds. OpenShift supports an opt-in dedicated service monitor feature creating an additional set of pod CPU and memory metrics that do not suffer from the spiky behavior. For additional information, see link:https://access.redhat.com/solutions/7012719[this solution guide].
|
||||
+
|
||||
In addition to default configuration, the following metrics are expected to be configured for {rds} clusters:
|
||||
|
||||
* Pod CPU and memory metrics and alerts for user workloads
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* Monitoring configuration must enable the dedicated service monitor feature for accurate representation of pod metrics
|
||||
|
||||
Engineering considerations::
|
||||
|
||||
* The Prometheus retention period is specified by the user. The value used is a tradeoff between operational requirements for maintaining historical data on the cluster against CPU and storage resources. Longer retention periods increase the need for storage and require additional CPU to manage the indexing of data.
|
||||
|
||||
21
modules/telco-core-power-management.adoc
Normal file
21
modules/telco-core-power-management.adoc
Normal file
@@ -0,0 +1,21 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-power-management_{context}"]
|
||||
= Power Management
|
||||
|
||||
New in this release::
|
||||
* You can specify a maximum latency that is C-state for a low latency pod when using per-pod power management. Previously, C-states could only be disabled completely on a per pod basis.
|
||||
|
||||
Description::
|
||||
|
||||
The https://docs.openshift.com/container-platform/4.14/rest_api/node_apis/performanceprofile-performance-openshift-io-v2.html#spec-workloadhints[Performance Profile] can be used to configure a cluster in a high power, low power or mixed (https://docs.openshift.com/container-platform/4.14/scalability_and_performance/cnf-low-latency-tuning.html#node-tuning-operator-pod-power-saving-config_cnf-master[per-pod power management]) mode. The choice of power mode depends on the characteristics of the workloads running on the cluster particularly how sensitive they are to latency.
|
||||
|
||||
Limits and requirements::
|
||||
* Power configuration relies on appropriate BIOS configuration, for example, enabling C-states and P-states. Configuration varies between hardware vendors.
|
||||
|
||||
|
||||
Engineering considerations::
|
||||
* Latency: To ensure that latency-sensitive workloads meet their requirements, you will need either a high-power configuration or a per-pod power management configuration. Per-pod power management is only available for `Guaranteed` QoS Pods with dedicated pinned CPUs.
|
||||
22
modules/telco-core-rds-disconnected.adoc
Normal file
22
modules/telco-core-rds-disconnected.adoc
Normal file
@@ -0,0 +1,22 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/core/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-disconnected-environment_{context}"]
|
||||
= Disconnected environment
|
||||
|
||||
Description::
|
||||
{rds-caps} clusters are expected to be installed in networks without direct access to the internet. All container images needed to install, configure, and operator the cluster must be available in a disconnected registry. This includes {product-title} images, day-2 Operator Lifecycle Manager (OLM) Operator images, and application workload images. The use of a disconnected environment provides multiple benefits, for example:
|
||||
|
||||
* Limiting access to the cluster for security
|
||||
* Curated content: The registry is populated based on curated and approved updates for the clusters
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* A unique name is required for all custom CatalogSources. Do not reuse the default catalog names.
|
||||
* A valid time source must be configured as part of cluster installation.
|
||||
|
||||
Engineering considerations::
|
||||
|
||||
Not applicable
|
||||
10
modules/telco-core-rds-networking.adoc
Normal file
10
modules/telco-core-rds-networking.adoc
Normal file
@@ -0,0 +1,10 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/core/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-networking_{context}"]
|
||||
= Networking
|
||||
|
||||
{product-title} networking is an ecosystem of features, plugins, and advanced networking capabilities that extend Kubernetes networking with the advanced networking-related features that your cluster needs to manage its network traffic for one or multiple hybrid clusters.
|
||||
|
||||
30
modules/telco-core-ref-application-workloads.adoc
Normal file
30
modules/telco-core-ref-application-workloads.adoc
Normal file
@@ -0,0 +1,30 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-ref-application-workloads_{context}"]
|
||||
= Application workloads
|
||||
|
||||
Application workloads running on core clusters might include a mix of high-performance networking CNFs and traditional best-effort or burstable pod workloads.
|
||||
|
||||
Guaranteed QoS scheduling is available to pods that require exclusive or dedicated use of CPUs due to performance or security requirements. Typically pods hosting high-performance and low-latency-sensitive Cloud Native Functions (CNFs) utilizing user plane networking with DPDK necessitate the exclusive utilization of entire CPUs. This is accomplished through node tuning and guaranteed Quality of Service (QoS) scheduling. For pods that require exclusive use of CPUs, be aware of the potential implications of hyperthreaded systems and configure them to request multiples of 2 CPUs when the entire core (2 hyperthreads) must be allocated to the pod.
|
||||
|
||||
Pods running network functions that do not require the high throughput and low latency networking are typically scheduled with best-effort or burstable QoS and do not require dedicated or isolated CPU cores.
|
||||
|
||||
Description of limits::
|
||||
|
||||
* CNF applications should conform to the latest version of the _CNF Best Practices_ guide.
|
||||
* For a mix of best-effort and burstable QoS pods.
|
||||
** Guaranteed QoS pods might be used but require correct configuration of reserved and isolated CPUs in the `PerformanceProfile`.
|
||||
** Guaranteed QoS Pods must include annotations for fully isolating CPUs.
|
||||
** Best effort and burstable pods are not guaranteed exclusive use of a CPU. Workloads might be preempted by other workloads, operating system daemons, or kernel tasks.
|
||||
* Exec probes should be avoided unless there is no viable alternative.
|
||||
** Do not use exec probes if a CNF is using CPU pinning.
|
||||
** Other probe implementations, for example `httpGet/tcpSocket`, should be used.
|
||||
|
||||
Signaling workload::
|
||||
|
||||
* Signaling workloads typically use SCTP, REST, gRPC, or similar TCP or UDP protocols.
|
||||
* The transactions per second (TPS) is in the order of hundreds of thousands using secondary CNI (multus) configured as MACVLAN or SR-IOV.
|
||||
* Signaling workloads run in pods with either guaranteed or burstable QoS.
|
||||
28
modules/telco-core-ref-config-kpi-testing.adoc
Normal file
28
modules/telco-core-ref-config-kpi-testing.adoc
Normal file
@@ -0,0 +1,28 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-validation-artifacts.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-rcorean-ref-config-kpi-testing_{context}"]
|
||||
= KPI testing results
|
||||
|
||||
Key Performance Indicator (KPI) testing is used to validate the core reference design configuration.
|
||||
Some key metrics and results from these tests are described below.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
|
||||
====
|
||||
|
||||
.Summary of telco core {product-version} performance KPI results
|
||||
[cols="1,3,2", options="header"]
|
||||
|====
|
||||
|KPI test
|
||||
|Measurements
|
||||
|Results
|
||||
|
||||
|
|
||||
a|
|
||||
a|
|
||||
|
||||
|====
|
||||
11
modules/telco-core-ref-config-scale-testing.adoc
Normal file
11
modules/telco-core-ref-config-scale-testing.adoc
Normal file
@@ -0,0 +1,11 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-validation-artifacts.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-ref-config-scale-testing_{context}"]
|
||||
= Telco core scale test results
|
||||
|
||||
The Red Hat performance and scale lab successfully
|
||||
|
||||
Scale tests are run with a representative three-node hub cluster with the following specifications and lab network configurations applied.
|
||||
51
modules/telco-core-ref-design-baseline-model.adoc
Normal file
51
modules/telco-core-ref-design-baseline-model.adoc
Normal file
@@ -0,0 +1,51 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-ref-design-baseline-model_{context}"]
|
||||
= Common baseline model
|
||||
|
||||
The following configurations and use model description are applicable to all {rds} use cases.
|
||||
|
||||
Cluster::
|
||||
|
||||
The cluster conforms to these requirements:
|
||||
|
||||
* High-availability (3+ supervisor nodes) control plane
|
||||
* Non-schedulable supervisor nodes
|
||||
|
||||
Storage::
|
||||
|
||||
Core use cases require persistent storage as provided by external {rh-storage}. For more information, see the "Storage" subsection in "Reference core design components".
|
||||
|
||||
Networking::
|
||||
|
||||
{rds-caps} clusters networking conforms to these requirements:
|
||||
|
||||
* Dual stack IPv4/IPv6
|
||||
|
||||
* Fully disconnected: Clusters do not have access to public networking at any point in their lifecycle.
|
||||
|
||||
* Multiple networks: Segmented networking provides isolation between OAM, signaling, and storage traffic.
|
||||
|
||||
* Cluster network type: OVN-Kubernetes is required for IPv6 support.
|
||||
|
||||
+
|
||||
Core clusters have multiple layers of networking supported by underlying RHCOS, SR-IOV Operator, Load Balancer, and other components detailed in the following "Networking" section. At a high level these layers include:
|
||||
|
||||
* Cluster networking: The cluster network configuration is defined and applied through the installation configuration. Updates to the configuration can be done at day-2 through the NMState Operator. Initial configuration can be used to establish:
|
||||
|
||||
** Host interface configuration
|
||||
|
||||
** A/A Bonding (Link Aggregation Control Protocol (LACP))
|
||||
|
||||
* Secondary or additional networks: OpenShift CNI is configured through the Network `additionalNetworks` or NetworkAttachmentDefinition CRs.
|
||||
|
||||
** MACVLAN
|
||||
|
||||
* Application Workload: User plane networking is running in cloud-native network functions (CNFs).
|
||||
|
||||
Service Mesh::
|
||||
|
||||
Use of Service Mesh by telco CNFs is very common. It is expected that all core clusters will include a Service Mesh implementation. Service Mesh implementation and configuration is outside the scope of this specification.
|
||||
28
modules/telco-core-ref-eng-usecase-model.adoc
Normal file
28
modules/telco-core-ref-eng-usecase-model.adoc
Normal file
@@ -0,0 +1,28 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-ref-eng-usecase-model_{context}"]
|
||||
= Engineering Considerations common use model
|
||||
|
||||
The following engineering considerations are relevant for the common use model.
|
||||
|
||||
Worker nodes::
|
||||
|
||||
* Worker nodes run on Intel 3rd Generation Xeon (IceLake) processors or newer. Alternatively, if using Skylake or earlier processors, the mitigations for silicon security vulnerabilities such as Spectre must be disabled; failure to do so may result in a significant 40 percent decrease in transaction performance.
|
||||
|
||||
* IRQ Balancing is enabled on worker nodes. The `PerformanceProfile` sets `globallyDisableIrqLoadBalancing: false`. Guaranteed QoS Pods are annotated to ensure isolation as described in "CPU partitioning and performance tuning" subsection in "Reference core design components" section.
|
||||
|
||||
All nodes::
|
||||
|
||||
* Hyper-Threading is enabled on all nodes
|
||||
* CPU architecture is `x86_64` only
|
||||
* Nodes are running the stock (non-RT) kernel
|
||||
* Nodes are not configured for workload partitioning
|
||||
|
||||
The balance of node configuration between power management and maximum performance varies between `MachineConfigPools` in the cluster. This configuration is consistent for all nodes within a `MachineConfigPool`.
|
||||
|
||||
CPU partitioning::
|
||||
|
||||
CPU partitioning is configured using the PerformanceProfile and applied on a per `MachineConfigPool` basis. See the "CPU partitioning and performance tuning" subsection in "Reference core design components".
|
||||
26
modules/telco-core-scalability.adoc
Normal file
26
modules/telco-core-scalability.adoc
Normal file
@@ -0,0 +1,26 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-scalability_{context}"]
|
||||
= Scalability
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable
|
||||
|
||||
Description::
|
||||
|
||||
Clusters will scale to the sizing listed in the limits and requirements section.
|
||||
+
|
||||
Scaling of workloads is described in the use model section.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* Cluster scales to at least 120 nodes
|
||||
|
||||
Engineering considerations::
|
||||
|
||||
Not applicable
|
||||
|
||||
28
modules/telco-core-scheduling.adoc
Normal file
28
modules/telco-core-scheduling.adoc
Normal file
@@ -0,0 +1,28 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-scheduling_{context}"]
|
||||
= Scheduling
|
||||
|
||||
New in this release::
|
||||
|
||||
* NUMA-aware scheduling with the NUMA Resources Operator is now generally available in {product-title} {product-version}.
|
||||
* With this release, you can exclude advertising the Non-Uniform Memory Access (NUMA) node for the SR-IOV network to the Topology Manager. By not advertising the NUMA node for the SR-IOV network, you can permit more flexible SR-IOV network deployments during NUMA-aware pod scheduling. To exclude advertising the NUMA node for the SR-IOV network resource to the Topology Manager, set the value `excludeTopology` to `true` in the `SriovNetworkNodePolicy` CR. For more information, see link:https://docs.openshift.com/container-platform/4.14/networking/hardware_networks/configuring-sriov-device.html#nw-sriov-exclude-topology-manager_configuring-sriov-device[Exclude the SR-IOV network topology for NUMA-aware scheduling].
|
||||
|
||||
Description::
|
||||
|
||||
* The scheduler is a cluster-wide component responsible for selecting the right node for a given workload. It is a core part of the platform and does not require any specific configuration in the common deployment scenarios. However, there are few specific use cases described in the following section.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* The default scheduler does not understand the NUMA locality of workloads. It only knows about the sum of all free resources on a worker node. This might cause workloads to be rejected when scheduled to a node with https://docs.openshift.com/container-platform/latest/scalability_and_performance/using-cpu-manager.html#topology_manager_policies_using-cpu-manager-and-topology_manager[Topology manager policy] set to `single-numa-node` or `restricted`.
|
||||
** For example, consider a pod requesting 6 CPUs and being scheduled to an empty node that has 4 CPUs per NUMA node. The total allocatable capacity of the node is 8 CPUs and the scheduler will place the pod there. The node local admission will fail, however, as there are only 4 CPUs available in each of the NUMA nodes.
|
||||
** All clusters with multi-NUMA nodes are required to use the https://docs.openshift.com/container-platform/latest/scalability_and_performance/cnf-numa-aware-scheduling.html#installing-the-numa-resources-operator_numa-aware[NUMA Resources Operator]. The `machineConfigPoolSelector` of the NUMA Resources Operator must select all nodes where NUMA aligned scheduling is needed.
|
||||
* All machine config pools must have consistent hardware configuration for example all nodes are expected to have the same NUMA zone count.
|
||||
|
||||
Engineering considerations::
|
||||
|
||||
* Pods might require annotations for correct scheduling and isolation. For more information on annotations, see the "CPU Partitioning and performance tuning" section.
|
||||
|
||||
30
modules/telco-core-security.adoc
Normal file
30
modules/telco-core-security.adoc
Normal file
@@ -0,0 +1,30 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-security_{context}"]
|
||||
= Security
|
||||
|
||||
New in this release::
|
||||
|
||||
* DPDK applications that need to inject traffic to the kernel can run in non-privileged pods with the help of the TAP CNI plugin. Furthermore, in this 4.14 release that ability to create a MAC-VLAN, IP-VLAN, and VLAN subinterface based on a master interface in a container namespace is generally available.
|
||||
|
||||
Description::
|
||||
|
||||
Telco operators are security conscious and require clusters to be hardened against multiple attack vectors. Within {product-title}, there is no single component or feature responsible for securing a cluster. This section provides details of security-oriented features and configuration for the use models covered in this specification.
|
||||
|
||||
* **SecurityContextConstraints**: All workload pods should be run with restricted-v2 or restricted SCC.
|
||||
* **Seccomp**: All pods should be run with the `RuntimeDefault` (or stronger) seccomp profile.
|
||||
* **Rootless DPDK pods**: Many user-plane networking (DPDK) CNFs require pods to run with root privileges. With this feature, a conformant DPDK pod can be run without requiring root privileges.
|
||||
* **Storage**: The storage network should be isolated and non-routable to other cluster networks. See the "Storage" section for additional details.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* Rootless DPDK pods requires the following additional configuration steps:
|
||||
** Configure the TAP plugin with the `container_t` SELinux context.
|
||||
** Enable the `container_use_devices` SELinux boolean on the hosts.
|
||||
|
||||
Engineering considerations::
|
||||
|
||||
* For rootless DPDK pod support, the SELinux boolean `container_use_devices` must be enabled on the host for the TAP device to be created. This introduces a security risk that is acceptable for short to mid-term use. Other solutions will be explored.
|
||||
12
modules/telco-core-service-mesh.adoc
Normal file
12
modules/telco-core-service-mesh.adoc
Normal file
@@ -0,0 +1,12 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-service-mesh_{context}"]
|
||||
= Service Mesh
|
||||
|
||||
Description::
|
||||
|
||||
{rds-caps} CNFs typically require a service mesh implementation. The specific features and performance required are dependent on the application. The selection of service mesh implementation and configuration is outside the scope of this documentation. The impact of service mesh on cluster resource utilization and performance, including additional latency introduced into pod networking, must be accounted for in the overall solution engineering.
|
||||
|
||||
24
modules/telco-core-sriov.adoc
Normal file
24
modules/telco-core-sriov.adoc
Normal file
@@ -0,0 +1,24 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-sriov_{context}"]
|
||||
= SR-IOV
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable
|
||||
|
||||
Description::
|
||||
|
||||
SR-IOV enables physical network interfaces (PFs) to be divided into multiple virtual functions (VFs). VFs can then be assigned to multiple pods to achieve higher throughput performance while keeping the pods isolated. The SR-IOV Network Operator provisions and manages SR-IOV CNI, network device plugin, and other components of the SR-IOV stack.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
* The network interface controllers supported are listed in https://docs.openshift.com/container-platform/4.14/networking/hardware_networks/about-sriov.html#nw-sriov-supported-platforms_about-sriov[OCP supported SR-IOV devices]
|
||||
* SR-IOV and IOMMU enablement in BIOS: The SR-IOV Network Operator automatically enables IOMMU on the kernel command line.
|
||||
* SR-IOV VFs do not receive link state updates from PF. If link down detection is needed, it must be done at the protocol level.
|
||||
|
||||
Engineering considerations::
|
||||
* SR-IOV interfaces in `vfio` mode are typically used to enable additional secondary networks for applications that require high throughput or low latency.
|
||||
39
modules/telco-core-storage.adoc
Normal file
39
modules/telco-core-storage.adoc
Normal file
@@ -0,0 +1,39 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/core/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-storage_{context}"]
|
||||
= Storage
|
||||
|
||||
Overview::
|
||||
Cloud native storage services can be provided by multiple solutions including {rh-storage} from Red Hat or third parties.
|
||||
+
|
||||
{rh-storage} is a Ceph based software-defined storage solution for containers. It provides block storage, file system storage, and on-premises object storage, which can be dynamically provisioned for both persistent and non-persistent data requirements. {rds-caps} applications require persistent storage.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
All storage data may not be encrypted in flight. To reduce risk, isolate the storage network from other cluster networks. The storage network must not be reachable, or routable, from other cluster networks. Only nodes directly attached to the storage network should be allowed to gain access to it.
|
||||
====
|
||||
|
||||
== {rh-storage}
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable
|
||||
|
||||
Description::
|
||||
|
||||
{rh-storage-first} is a software-defined storage service for containers.
|
||||
For {rds-caps} clusters, storage support is provided by {rh-storage} storage services running externally to the application workload cluster. {rh-storage} supports separation of storage traffic using secondary CNI networks.
|
||||
|
||||
Limits and requirements::
|
||||
* In an IPv4/IPv6 dual-stack networking environment, {rh-storage} uses IPv4 addressing. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.13/html-single/4.13_release_notes/index#support_openshift_dual_stack_with_odf_using_ipv4[Support OpenShift dual stack with ODF using IPv4].
|
||||
|
||||
|
||||
Engineering considerations::
|
||||
* {rh-storage} network traffic should be isolated from other traffic on a dedicated network, for example, by using VLAN isolation.
|
||||
|
||||
== Other Storage
|
||||
|
||||
Other storage solutions can be used to provide persistent storage for core clusters. The configuration and integration of these solutions is outside the scope of the {rds} RDS. Integration of the storage solution into the core cluster must include correct sizing and performance analysis to ensure the storage meets overall performance and resource utilization requirements.
|
||||
127
modules/telco-core-yaml-ref-networking.adoc
Normal file
127
modules/telco-core-yaml-ref-networking.adoc
Normal file
@@ -0,0 +1,127 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="networking-yaml_{context}"]
|
||||
= Networking reference YAML
|
||||
|
||||
[id="telco-core-network-yaml"]
|
||||
.Network.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_Network.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-networkattachmentdefinition-yaml"]
|
||||
.networkAttachmentDefinition.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_networkAttachmentDefinition.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-sriovsubscriptionns-yaml"]
|
||||
.SriovSubscriptionNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_SriovSubscriptionNS.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-sriovsubscriptionopergroup-yaml"]
|
||||
.SriovSubscriptionOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_SriovSubscriptionOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-sriovsubscription-yaml"]
|
||||
.SriovSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_SriovSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-sriovoperatorconfig-yaml"]
|
||||
.SriovOperatorConfig.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_SriovOperatorConfig.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-sriovnetworknodepolicy-yaml"]
|
||||
.sriovNetworkNodePolicy.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_sriovNetworkNodePolicy.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-sriovnetwork-yaml"]
|
||||
.sriovNetwork.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_sriovNetwork.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-metallbns-yaml"]
|
||||
.metallbNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_metallbNS.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-metallbopergroup-yaml"]
|
||||
.metallbOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_metallbOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-metallbsubscription-yaml"]
|
||||
.metallbSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_metallbSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-metallb-yaml"]
|
||||
.metallb.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_metallb.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-bgp-peer-yaml"]
|
||||
.bgp-peer.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_bgp-peer.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-bfd-profile-yaml"]
|
||||
.bfd-profile.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_bfd-profile.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-addr-pool-yaml"]
|
||||
.addr-pool.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_addr-pool.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-bgp-advr-yaml"]
|
||||
.bgp-advr.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_bgp-advr.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-mc_rootless_pods_selinux-yaml"]
|
||||
.mc_rootless_pods_selinux.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_mc_rootless_pods_selinux.yaml[]
|
||||
----
|
||||
|
||||
99
modules/telco-core-yaml-ref-other.adoc
Normal file
99
modules/telco-core-yaml-ref-other.adoc
Normal file
@@ -0,0 +1,99 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="other-yaml_{context}"]
|
||||
= Other reference YAML
|
||||
|
||||
[id="telco-core-catalog-source-yaml"]
|
||||
.catalog-source.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_catalog-source.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-icsp-yaml"]
|
||||
.icsp.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_icsp.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-operator-hub-yaml"]
|
||||
.operator-hub.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_operator-hub.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-clusterlogns-yaml"]
|
||||
.ClusterLogNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_ClusterLogNS.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-clusterlogopergroup-yaml"]
|
||||
.ClusterLogOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_ClusterLogOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-clusterlogsubscription-yaml"]
|
||||
.ClusterLogSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_ClusterLogSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-clusterlogforwarder-yaml"]
|
||||
.ClusterLogForwarder.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_ClusterLogForwarder.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-clusterlogging-yaml"]
|
||||
.ClusterLogging.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_ClusterLogging.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-control-plane-load-kernel-modules-yaml"]
|
||||
.control-plane-load-kernel-modules.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_control-plane-load-kernel-modules.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-worker-load-kernel-modules-yaml"]
|
||||
.worker-load-kernel-modules.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_worker-load-kernel-modules.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-sctp_module_mc-yaml"]
|
||||
.sctp_module_mc.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_sctp_module_mc.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-performanceprofile-yaml"]
|
||||
.PerformanceProfile.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_PerformanceProfile.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-monitoring-config-cm-yaml"]
|
||||
.monitoring-config-cm.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_monitoring-config-cm.yaml[]
|
||||
----
|
||||
|
||||
22
modules/telco-core-yaml-ref-resource-tuning.adoc
Normal file
22
modules/telco-core-yaml-ref-resource-tuning.adoc
Normal file
@@ -0,0 +1,22 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="resource-tuning-yaml_{context}"]
|
||||
= Resource Tuning reference YAML
|
||||
|
||||
[id="telco-core-pid-limits-cr-yaml"]
|
||||
.pid-limits-cr.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_pid-limits-cr.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-control-plane-system-reserved-yaml"]
|
||||
.control-plane-system-reserved.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_control-plane-system-reserved.yaml[]
|
||||
----
|
||||
|
||||
43
modules/telco-core-yaml-ref-scheduling.adoc
Normal file
43
modules/telco-core-yaml-ref-scheduling.adoc
Normal file
@@ -0,0 +1,43 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="scheduling-yaml_{context}"]
|
||||
= Scheduling reference YAML
|
||||
|
||||
[id="telco-core-nropsubscriptionns-yaml"]
|
||||
.NROPSubscriptionNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_NROPSubscriptionNS.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-nropsubscriptionopergroup-yaml"]
|
||||
.NROPSubscriptionOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_NROPSubscriptionOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-nropsubscription-yaml"]
|
||||
.NROPSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_NROPSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-sched-yaml"]
|
||||
.sched.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_sched.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-nrop-yaml"]
|
||||
.nrop.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_nrop.yaml[]
|
||||
----
|
||||
|
||||
43
modules/telco-core-yaml-ref-storage.adoc
Normal file
43
modules/telco-core-yaml-ref-storage.adoc
Normal file
@@ -0,0 +1,43 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="storage-yaml_{context}"]
|
||||
= Storage reference YAML
|
||||
|
||||
[id="telco-core-odfns-yaml"]
|
||||
.odfNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_odfNS.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-odfopergroup-yaml"]
|
||||
.odfOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_odfOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-odfsubscription-yaml"]
|
||||
.odfSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_odfSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-01-rook-ceph-external-cluster-details.secret-yaml"]
|
||||
.01-rook-ceph-external-cluster-details.secret.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_01-rook-ceph-external-cluster-details.secret.yaml[]
|
||||
----
|
||||
|
||||
[id="telco-core-02-ocs-external-storagecluster-yaml"]
|
||||
.02-ocs-external-storagecluster.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/telco-core_02-ocs-external-storagecluster.yaml[]
|
||||
----
|
||||
|
||||
31
modules/telco-deviations-from-the-ref-design.adoc
Normal file
31
modules/telco-deviations-from-the-ref-design.adoc
Normal file
@@ -0,0 +1,31 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
[id="telco-deviations-from-the-ref-design_{context}"]
|
||||
= Deviations from the reference design
|
||||
|
||||
Deviating from the validated telco core and telco RAN DU reference design specifications (RDS) can have significant impact beyond the specific component or feature that you change.
|
||||
Deviations require analysis and engineering in the context of the complete solution.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
All deviations from the RDS should be analyzed and documented with clear action tracking information.
|
||||
Due diligence is expected from partners to understand how to bring deviations into line with the reference design.
|
||||
This might require partners to provide additional resources to engage with Red Hat to work towards enabling their use case to achieve a best in class outcome with the platform.
|
||||
This is critical for the supportability of the solution and ensuring alignment across Red Hat and with partners.
|
||||
====
|
||||
|
||||
Deviation from the RDS can have some or all of the following consequences:
|
||||
|
||||
* It can take longer to resolve issues.
|
||||
|
||||
* There is a risk of missing project service-level agreements (SLAs), project deadlines, end provider performance requirements, and so on.
|
||||
|
||||
* Unapproved deviations may require escalation at executive levels.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
Red Hat prioritizes the servicing of requests for deviations based on partner engagement priorities.
|
||||
====
|
||||
23
modules/telco-nmstate-operator.adoc
Normal file
23
modules/telco-nmstate-operator.adoc
Normal file
@@ -0,0 +1,23 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-core-nmstate-operator_{context}"]
|
||||
= NMState Operator
|
||||
|
||||
New in this release::
|
||||
|
||||
Not applicable
|
||||
|
||||
Description::
|
||||
|
||||
The NMState Operator provides a Kubernetes API for performing network configurations across the cluster's nodes. It enables network interface configurations, static IPs and DNS, VLANs, trunks, bonding, static routes, MTU, and enabling promiscuous mode on the secondary interfaces. The cluster nodes periodically report on the state of each node's network interfaces to the API server.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
Not applicable
|
||||
|
||||
Engineering considerations::
|
||||
* The initial networking configuration is applied using `NMStateConfig` content in the installation CRs. The NMState Operator is used only when needed for network updates.
|
||||
* When SR-IOV virtual functions are used for host networking, the NMState Operator using `NodeNetworkConfigurationPolicy` is used to configure those VF interfaces, for example, VLANs and the MTU.
|
||||
70
modules/telco-ran-414-ref-design-features.adoc
Normal file
70
modules/telco-ran-414-ref-design-features.adoc
Normal file
@@ -0,0 +1,70 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
[id="telco-ran-414-ref-design-features_{context}"]
|
||||
= {product-title} {product-version} features for {rds}
|
||||
|
||||
The following features that are included in {product-title} {product-version} and are leveraged by the {rds} reference design specification (RDS) have been added or updated.
|
||||
|
||||
.{product-title} {product-version} features for the {rds} RDS
|
||||
[cols="1,3", options="header"]
|
||||
|====
|
||||
|Feature
|
||||
|Description
|
||||
|
||||
//CNF-7365
|
||||
|{ztp} independence from managed cluster version
|
||||
a|You can now use {ztp} to manage clusters that are running different versions of {product-title} compared to the version that is running on the hub cluster. You can also have a mix of {product-title} versions in the deployed fleet of clusters.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/scalability_and_performance/ztp_far_edge/ztp-preparing-the-hub-cluster.html#ztp-preparing-the-ztp-git-repository-ver-ind_ztp-preparing-the-hub-cluster[Preparing the {ztp} site configuration repository for version independence]
|
||||
|
||||
//CNF-6925
|
||||
|Using custom CRs alongside the reference CRs in {ztp}
|
||||
a|You can now use custom CRs alongside the reference configuration CRs provided in the `ztp-site-generate` container.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/scalability_and_performance/ztp_far_edge/ztp-advanced-policy-config.html#ztp-adding-new-content-to-gitops-ztp_ztp-advanced-policy-config[Adding custom content to the {ztp} pipeline]
|
||||
|
||||
//CNF-7078
|
||||
//|Intel Westport Channel e810 NIC as PTP Grandmaster clock
|
||||
//a|You can use the Intel Westport Channel E810-XXVDA4T as a GNSS-sourced grandmaster clock.
|
||||
//The NIC is automatically configured by the PTP Operator with the E810 hardware plugin.
|
||||
//This feature is scheduled for an upcoming 4.14 z-stream release.
|
||||
|
||||
//* link:https://docs.openshift.com/container-platform/4.14/networking/using-ptp.html#configuring-linuxptp-services-as-grandmaster-clock_using-ptp[Configuring linuxptp services as a grandmaster clock]
|
||||
|
||||
//CNF-6527
|
||||
//|PTP Operator hardware specific functionality plugin
|
||||
//a|A new E810 NIC hardware plugin is now available in the PTP Operator.
|
||||
//You can use the E810 plugin to configure the NIC directly.
|
||||
//This feature is scheduled for an upcoming 4.14 z-stream release.
|
||||
|
||||
// * link:https://docs.openshift.com/container-platform/4.14/networking/ptp/configuring-ptp.html#nw-ptp-wpc-hardware-pins-reference_configuring-ptp[Intel Westport Channel E810 hardware configuration reference]
|
||||
|
||||
//CNF-8035
|
||||
|Using custom node labels in the `SiteConfig` CR with {ztp}
|
||||
a|you can now use the `nodeLabels` field in the `SiteConfig` CR to create custom roles for nodes in managed clusters.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-sites.html#ztp-sno-siteconfig-config-reference_ztp-deploying-far-edge-sites[{sno} SiteConfig CR installation reference]
|
||||
|
||||
//OCPBUGS-13050, CTONET-3072
|
||||
|PTP events and metrics
|
||||
a|The `PtpConfig` reference configuration CRs have been updated.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/networking/using-ptp.html#discover-ptp-devices_using-ptp[Discovering PTP capable network devices in your cluster]
|
||||
|
||||
//CNF-7517
|
||||
|Precaching user-specified images
|
||||
a|You can now precache application workload images before upgrading your applications on {sno} clusters with {cgu-operator-full}.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/scalability_and_performance/ztp_far_edge/ztp-precaching-tool.html#ztp-pre-staging-tool[Precaching images for {sno} deployments]
|
||||
|
||||
//CNF-6318
|
||||
|Using OpenShift capabilities to further reduce the {sno} DU footprint
|
||||
a|Use cluster capabilities to enable or disable optional components before you install the cluster.
|
||||
In {product-title} {product-version}, the following optional capabilities are available:
|
||||
`baremetal`, `marketplace`, `openshift-samples`, `Console`, `Insights`, `Storage`, `CSISnapshot`, `NodeTuning`, `MachineAPI`. The reference configuration includes only those features required for RAN DU.
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/4.14/installing/cluster-capabilities.html#cluster-capabilities[Cluster capabilities]
|
||||
|====
|
||||
34
modules/telco-ran-agent-based-installer-abi.adoc
Normal file
34
modules/telco-ran-agent-based-installer-abi.adoc
Normal file
@@ -0,0 +1,34 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-agent-based-installer-abi_{context}"]
|
||||
= Agent-based installer
|
||||
|
||||
New in this release::
|
||||
* No reference design updates in this release
|
||||
|
||||
Description::
|
||||
Agent-based installer (ABI) provides installation capabilities without centralized infrastructure.
|
||||
The installation program creates an ISO image that you mount to the server.
|
||||
When the server boots it installs {product-title} and supplied extra manifests.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
You can also use ABI to install {product-title} clusters without a hub cluster.
|
||||
An image registry is still required when you use ABI in this manner.
|
||||
====
|
||||
|
||||
Agent-based installer (ABI) is an optional component.
|
||||
|
||||
Limits and requirements::
|
||||
* You can supply a limited set of additional manifests at installation time.
|
||||
|
||||
* You must include `MachineConfiguration` CRs that are required by the RAN DU use case.
|
||||
|
||||
Engineering considerations::
|
||||
|
||||
* ABI provides a baseline {product-title} installation.
|
||||
|
||||
* You install Day 2 Operators and the remainder of the RAN DU use case configurations after installation.
|
||||
13
modules/telco-ran-architecture-overview.adoc
Normal file
13
modules/telco-ran-architecture-overview.adoc
Normal file
@@ -0,0 +1,13 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
[id="telco-ran-architecture-overview_{context}"]
|
||||
= Deployment architecture overview
|
||||
|
||||
You deploy the {rds} {product-version} reference configuration to managed clusters from a centrally managed {rh-rhacm} hub cluster.
|
||||
The reference design specification (RDS) includes configuration of the managed clusters and the hub cluster components.
|
||||
|
||||
.{rds-caps} deployment architecture overview
|
||||
image::474_OpenShift_OpenShift_RAN_RDS_arch_updates_1023.png[A diagram showing two distinctive network far edge deployment processes, one show how the hub cluster uses {gitops-title} to install managed clusters, and the other showing how the hub cluster uses {cgu-operator-full} to apply policies to managed clusters]
|
||||
41
modules/telco-ran-bios-tuning.adoc
Normal file
41
modules/telco-ran-bios-tuning.adoc
Normal file
@@ -0,0 +1,41 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-bios-tuning_{context}"]
|
||||
= Host firmware tuning
|
||||
|
||||
New in this release::
|
||||
* No reference design updates in this release
|
||||
|
||||
Description::
|
||||
Configure system level performance.
|
||||
See link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-du-configuring-host-firmware-requirements_sno-configure-for-vdu[Configuring host firmware for low latency and high performance] for recommended settings.
|
||||
+
|
||||
If Ironic inspection is enabled, the firmware setting values are available from the per-cluster `BareMetalHost` CR on the hub cluster.
|
||||
You enable Ironic inspection with a label in the `spec.clusters.nodes` field in the `SiteConfig` CR that you use to install the cluster.
|
||||
For example:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
nodes:
|
||||
- hostName: "example-node1.example.com"
|
||||
ironicInspect: "enabled"
|
||||
----
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
The {rds} reference `SiteConfig` does not enable the `ironicInspect` field by default.
|
||||
====
|
||||
|
||||
Limits and requirements::
|
||||
* Hyperthreading must be enabled
|
||||
|
||||
Engineering considerations::
|
||||
* Tune all settings for maximum performance
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
You can tune firmware selections for power savings at the expense of performance as required.
|
||||
====
|
||||
64
modules/telco-ran-cluster-tuning.adoc
Normal file
64
modules/telco-ran-cluster-tuning.adoc
Normal file
@@ -0,0 +1,64 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-cluster-tuning_{context}"]
|
||||
= Cluster tuning
|
||||
|
||||
New in this release::
|
||||
* You can remove the Image Registry Operator by using the cluster capabilities feature.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
You configure cluster capabilities by using the `spec.clusters.installConfigOverrides` field in the `SiteConfig` CR that you use to install the cluster.
|
||||
====
|
||||
|
||||
Description::
|
||||
The cluster capabilities feature now includes a `MachineAPI` component which, when excluded, disables the following Operators and their resources in the cluster:
|
||||
|
||||
* `openshift/cluster-autoscaler-operator`
|
||||
|
||||
* `openshift/cluster-control-plane-machine-set-operator`
|
||||
|
||||
* `openshift/machine-api-operator`
|
||||
|
||||
Limits and requirements::
|
||||
* Cluster capabilities are not available for installer-provisioned installation methods.
|
||||
|
||||
* You must apply all platform tuning configurations.
|
||||
The following table lists the required platform tuning configurations:
|
||||
+
|
||||
.Cluster capabilities configurations
|
||||
[cols=2*, width="90%", options="header"]
|
||||
|====
|
||||
|Feature
|
||||
|Description
|
||||
|
||||
|Remove optional cluster capabilities
|
||||
a|Reduce the {product-title} footprint by disabling optional cluster Operators on {sno} clusters only.
|
||||
|
||||
* Remove all optional Operators except the Marketplace and Node Tuning Operators.
|
||||
|
||||
|Configure cluster monitoring
|
||||
a|Configure the monitoring stack for reduced footprint by doing the following:
|
||||
|
||||
* Disable the local `alertmanager` and `telemeter` components.
|
||||
|
||||
* If you use {rh-rhacm} observability, the CR must be augmented with appropriate `additionalAlertManagerConfigs` CRs to forward alerts to the hub cluster.
|
||||
|
||||
* Reduce the `Prometheus` retention period to 24h.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
The {rh-rhacm} hub cluster aggregates managed cluster metrics.
|
||||
====
|
||||
|
||||
|Disable networking diagnostics
|
||||
|Disable networking diagnostics for {sno} because they are not required.
|
||||
|
||||
|Configure a single Operator Hub catalog source
|
||||
|Configure the cluster to use a single catalog source that contains only the Operators required for a RAN DU deployment.
|
||||
Each catalog source increases the CPU use on the cluster.
|
||||
Using a single `CatalogSource` fits within the platform CPU budget.
|
||||
|====
|
||||
20
modules/telco-ran-core-ref-design-spec.adoc
Normal file
20
modules/telco-ran-core-ref-design-spec.adoc
Normal file
@@ -0,0 +1,20 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-design-spec.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-core-ref-design-spec_{context}"]
|
||||
= Reference design scope
|
||||
|
||||
The telco core and telco RAN reference design specifications (RDS) capture the recommended, tested, and supported configurations to get reliable and repeatable performance for clusters running the telco core and telco RAN profiles.
|
||||
|
||||
Each RDS includes the released features and supported configurations that are engineered and validated for clusters to run the individual profiles.
|
||||
The configurations provide a baseline {product-title} installation that meets feature and KPI targets.
|
||||
Each RDS also describes expected variations for each individual configuration.
|
||||
Validation of each RDS includes many long duration and at-scale tests.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The validated reference configurations are updated for each major Y-stream release of {product-title}.
|
||||
Z-stream patch releases are periodically re-tested against the reference configurations.
|
||||
====
|
||||
19
modules/telco-ran-crs-cluster-tuning.adoc
Normal file
19
modules/telco-ran-crs-cluster-tuning.adoc
Normal file
@@ -0,0 +1,19 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="cluster-tuning-crs_{context}"]
|
||||
= Cluster tuning reference CRs
|
||||
|
||||
.Cluster tuning CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
Cluster capabilities,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-example-sno-yaml[example-sno.yaml],No,No
|
||||
Disabling network diagnostics,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disablesnonetworkdiag-yaml[DisableSnoNetworkDiag.yaml],No,No
|
||||
Monitoring configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-reducemonitoringfootprint-yaml[ReduceMonitoringFootprint.yaml],No,No
|
||||
OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-defaultcatsrc-yaml[DefaultCatsrc.yaml],No,No
|
||||
OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disconnectedicsp-yaml[DisconnectedICSP.yaml],No,No
|
||||
OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-operatorhub-yaml[OperatorHub.yaml],No,No
|
||||
|====
|
||||
42
modules/telco-ran-crs-day-2-operators.adoc
Normal file
42
modules/telco-ran-crs-day-2-operators.adoc
Normal file
@@ -0,0 +1,42 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="day-2-operators-crs_{context}"]
|
||||
= Day 2 Operators reference CRs
|
||||
|
||||
.Day 2 Operators CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogforwarder-yaml[ClusterLogForwarder.yaml],No,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogging-yaml[ClusterLogging.yaml],No,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogns-yaml[ClusterLogNS.yaml],No,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogopergroup-yaml[ClusterLogOperGroup.yaml],No,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogsubscription-yaml[ClusterLogSubscription.yaml],No,No
|
||||
Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storageclass-yaml[StorageClass.yaml],Yes,No
|
||||
Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelv-yaml[StorageLV.yaml],Yes,No
|
||||
Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagens-yaml[StorageNS.yaml],Yes,No
|
||||
Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storageopergroup-yaml[StorageOperGroup.yaml],Yes,No
|
||||
Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagesubscription-yaml[StorageSubscription.yaml],Yes,No
|
||||
Node Tuning Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-performanceprofile-yaml[PerformanceProfile.yaml],No,No
|
||||
Node Tuning Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-tunedperformancepatch-yaml[TunedPerformancePatch.yaml],No,No
|
||||
PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpoperatorconfigforevent-yaml[PtpOperatorConfigForEvent.yaml],Yes,No
|
||||
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigboundary-yaml[PtpConfigBoundary.yaml],No,No
|
||||
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfiggmwpc-yaml[PtpConfigGmWpc.yaml],No,Yes
|
||||
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigslave-yaml[PtpConfigSlave.yaml],No,No
|
||||
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpsubscription-yaml[PtpSubscription.yaml],No,No
|
||||
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpsubscriptionns-yaml[PtpSubscriptionNS.yaml],No,No
|
||||
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpsubscriptionopergroup-yaml[PtpSubscriptionOperGroup.yaml],No,No
|
||||
SR-IOV FEC Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-acceleratorsns-yaml[AcceleratorsNS.yaml],Yes,No
|
||||
SR-IOV FEC Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-acceleratorsopergroup-yaml[AcceleratorsOperGroup.yaml],Yes,No
|
||||
SR-IOV FEC Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-acceleratorssubscription-yaml[AcceleratorsSubscription.yaml],Yes,No
|
||||
SR-IOV FEC Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovfecclusterconfig-yaml[SriovFecClusterConfig.yaml],Yes,No
|
||||
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovnetwork-yaml[SriovNetwork.yaml],No,No
|
||||
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovnetworknodepolicy-yaml[SriovNetworkNodePolicy.yaml],No,No
|
||||
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovoperatorconfig-yaml[SriovOperatorConfig.yaml],No,No
|
||||
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscription-yaml[SriovSubscription.yaml],No,No
|
||||
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscriptionns-yaml[SriovSubscriptionNS.yaml],No,No
|
||||
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscriptionopergroup-yaml[SriovSubscriptionOperGroup.yaml],No,No
|
||||
|====
|
||||
28
modules/telco-ran-crs-machine-configuration.adoc
Normal file
28
modules/telco-ran-crs-machine-configuration.adoc
Normal file
@@ -0,0 +1,28 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="machine-configuration-crs_{context}"]
|
||||
= Machine configuration reference CRs
|
||||
|
||||
.Machine configuration CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
Container runtime (crun),xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-enable-crun-master-yaml[enable-crun-master.yaml],No,No
|
||||
Container runtime (crun),xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-enable-crun-worker-yaml[enable-crun-worker.yaml],No,No
|
||||
Disabling CRI-O wipe,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-99-crio-disable-wipe-master-yaml[99-crio-disable-wipe-master.yaml],No,No
|
||||
Disabling CRI-O wipe,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-99-crio-disable-wipe-worker-yaml[99-crio-disable-wipe-worker.yaml],No,No
|
||||
Enabling kdump,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-05-kdump-config-master-yaml[05-kdump-config-master.yaml],No,Yes
|
||||
Enabling kdump,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-05-kdump-config-worker-yaml[05-kdump-config-worker.yaml],No,Yes
|
||||
Enabling kdump,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-06-kdump-master-yaml[06-kdump-master.yaml],No,No
|
||||
Enabling kdump,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-06-kdump-worker-yaml[06-kdump-worker.yaml],No,No
|
||||
Kubelet configuration and container mount hiding,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-01-container-mount-ns-and-kubelet-conf-master-yaml[01-container-mount-ns-and-kubelet-conf-master.yaml],No,No
|
||||
Kubelet configuration and container mount hiding,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-01-container-mount-ns-and-kubelet-conf-worker-yaml[01-container-mount-ns-and-kubelet-conf-worker.yaml],No,No
|
||||
One-shot time sync,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-99-sync-time-once-master-yaml[99-sync-time-once-master.yaml],No,Yes
|
||||
One-shot time sync,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-99-sync-time-once-worker-yaml[99-sync-time-once-worker.yaml],No,Yes
|
||||
SCTP,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-03-sctp-machine-config-master-yaml[03-sctp-machine-config-master.yaml],No,No
|
||||
SCTP,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-03-sctp-machine-config-worker-yaml[03-sctp-machine-config-worker.yaml],No,No
|
||||
SR-IOV related kernel arguments,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-07-sriov-related-kernel-args-master-yaml[07-sriov-related-kernel-args-master.yaml],No,Yes
|
||||
|====
|
||||
30
modules/telco-ran-crs-networking.adoc
Normal file
30
modules/telco-ran-crs-networking.adoc
Normal file
@@ -0,0 +1,30 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="networking-crs_{context}"]
|
||||
= Networking reference CRs
|
||||
|
||||
.Networking CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
Baseline,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-network-yaml[Network.yaml],No,No
|
||||
Baseline,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-networkattachmentdefinition-yaml[networkAttachmentDefinition.yaml],Yes,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscriptionns-yaml[SriovSubscriptionNS.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscriptionopergroup-yaml[SriovSubscriptionOperGroup.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscription-yaml[SriovSubscription.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovoperatorconfig-yaml[SriovOperatorConfig.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovnetworknodepolicy-yaml[sriovNetworkNodePolicy.yaml],No,No
|
||||
SR-IOV Network Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovnetwork-yaml[sriovNetwork.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-metallbns-yaml[metallbNS.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-metallbopergroup-yaml[metallbOperGroup.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-metallbsubscription-yaml[metallbSubscription.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-metallb-yaml[metallb.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-bgp-peer-yaml[bgp-peer.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-bfd-profile-yaml[bfd-profile.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-addr-pool-yaml[addr-pool.yaml],No,No
|
||||
Load balancer,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-bgp-advr-yaml[bgp-advr.yaml],No,No
|
||||
Multus - Tap CNI for rootless DPDK pod,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-mc_rootless_pods_selinux-yaml[mc_rootless_pods_selinux.yaml],Yes,No
|
||||
|====
|
||||
25
modules/telco-ran-crs-other.adoc
Normal file
25
modules/telco-ran-crs-other.adoc
Normal file
@@ -0,0 +1,25 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="other-crs_{context}"]
|
||||
= Other reference CRs
|
||||
|
||||
.Other CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
Disconnected configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-catalog-source-yaml[catalog-source.yaml],No,No
|
||||
Disconnected configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-icsp-yaml[icsp.yaml],No,No
|
||||
Disconnected configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-operator-hub-yaml[operator-hub.yaml],No,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogns-yaml[ClusterLogNS.yaml],Yes,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogopergroup-yaml[ClusterLogOperGroup.yaml],Yes,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogsubscription-yaml[ClusterLogSubscription.yaml],Yes,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogforwarder-yaml[ClusterLogForwarder.yaml],Yes,No
|
||||
Cluster logging,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-clusterlogging-yaml[ClusterLogging.yaml],Yes,No
|
||||
Additional kernel modules,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-control-plane-load-kernel-modules-yaml[control-plane-load-kernel-modules.yaml],Yes,No
|
||||
Additional kernel modules,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-worker-load-kernel-modules-yaml[worker-load-kernel-modules.yaml],Yes,No
|
||||
Additional kernel modules,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sctp_module_mc-yaml[sctp_module_mc.yaml],Yes,No
|
||||
Power management,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-performanceprofile-yaml[PerformanceProfile.yaml],No,No
|
||||
|====
|
||||
15
modules/telco-ran-crs-resource-tuning.adoc
Normal file
15
modules/telco-ran-crs-resource-tuning.adoc
Normal file
@@ -0,0 +1,15 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="resource-tuning-crs_{context}"]
|
||||
= Resource Tuning reference CRs
|
||||
|
||||
.Resource Tuning CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
System reserved capacity,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-pid-limits-cr-yaml[pid-limits-cr.yaml],Yes,No
|
||||
System reserved capacity,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-control-plane-system-reserved-yaml[control-plane-system-reserved.yaml],Yes,No
|
||||
|====
|
||||
19
modules/telco-ran-crs-scheduling.adoc
Normal file
19
modules/telco-ran-crs-scheduling.adoc
Normal file
@@ -0,0 +1,19 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="scheduling-crs_{context}"]
|
||||
= Scheduling reference CRs
|
||||
|
||||
.Scheduling CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-nropsubscriptionns-yaml[NROPSubscriptionNS.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-nropsubscriptionopergroup-yaml[NROPSubscriptionOperGroup.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-nropsubscription-yaml[NROPSubscription.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-nropsubscriptionns-yaml[NROPSubscriptionNS.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sched-yaml[sched.yaml],No,No
|
||||
NUMA-aware scheduler,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-nrop-yaml[nrop.yaml],No,No
|
||||
|====
|
||||
18
modules/telco-ran-crs-storage.adoc
Normal file
18
modules/telco-ran-crs-storage.adoc
Normal file
@@ -0,0 +1,18 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="storage-crs_{context}"]
|
||||
= Storage reference CRs
|
||||
|
||||
.Storage CRs
|
||||
[cols="4*", options="header", format=csv]
|
||||
|====
|
||||
Component,Reference CR,Optional,New in this release
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-odffns-yaml[odffNS.yaml],No,No
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-odfopergroup-yaml[odfOperGroup.yaml],No,No
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-odfsubscription-yaml[odfSubscription.yaml],No,No
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-01-rook-ceph-external-cluster-details.secret-yaml[01-rook-ceph-external-cluster-details.secret.yaml],No,No
|
||||
External ODF configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-02-ocs-external-storagecluster-yaml[02-ocs-external-storagecluster.yaml],No,No
|
||||
|====
|
||||
23
modules/telco-ran-du-application-workloads.adoc
Normal file
23
modules/telco-ran-du-application-workloads.adoc
Normal file
@@ -0,0 +1,23 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-du-overview.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-du-workloads_{context}"]
|
||||
= {rds-caps} application workloads
|
||||
|
||||
DU worker nodes must have 3rd Generation Xeon (Ice Lake) 2.20 GHz or better CPUs with firmware tuned for maximum performance.
|
||||
|
||||
5G RAN DU user applications and workloads should conform to the following best practices and application limits:
|
||||
|
||||
* Develop cloud-native network functions (CNFs) that conform to the latest version of the link:https://test-network-function.github.io/cnf-best-practices/[CNF best practices guide].
|
||||
|
||||
* Use SR-IOV for high performance networking.
|
||||
|
||||
* Use exec probes sparingly and only when no other suitable options are available
|
||||
|
||||
** Do not use exec probes if a CNF uses CPU pinning.
|
||||
Use other probe implementations, for example, `httpGet` or `tcpSocket`.
|
||||
|
||||
** When you need to use exec probes, limit the exec probe frequency and quantity.
|
||||
The maximum number of exec probes must be kept below 10, and frequency must not be set to less than 10 seconds.
|
||||
22
modules/telco-ran-du-reference-components.adoc
Normal file
22
modules/telco-ran-du-reference-components.adoc
Normal file
@@ -0,0 +1,22 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-du-overview.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-du-reference-components_{context}"]
|
||||
= {rds-caps} RDS components
|
||||
|
||||
The following sections describe the various {product-title} components and configurations that you use to configure and deploy clusters to run {rds} workloads.
|
||||
|
||||
.{rds-caps} reference design components
|
||||
image::319_OpenShift_PTP_bare-metal_OCP_nodes_1023_RAN_DU.png[A diagram describing the {rds} component stack.]
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Ensure that components that are not included in the {rds} profile do not affect the CPU resources allocated to workload applications.
|
||||
====
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
Out of tree drivers are not supported.
|
||||
====
|
||||
51
modules/telco-ran-gitops-operator-and-ztp-plugins.adoc
Normal file
51
modules/telco-ran-gitops-operator-and-ztp-plugins.adoc
Normal file
@@ -0,0 +1,51 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-gitops-operator-and-ztp-plugins_{context}"]
|
||||
= {gitops-shortname} and {ztp} plugins
|
||||
|
||||
New in this release::
|
||||
* GA support for inclusion of user-provided CRs in Git for {ztp} deployments
|
||||
|
||||
* {ztp} independence from the deployed cluster version
|
||||
|
||||
Description::
|
||||
{gitops-shortname} and {ztp} plugins provide a {gitops-shortname}-based infrastructure for managing cluster deployment and configuration.
|
||||
Cluster definitions and configurations are maintained as a declarative state in Git.
|
||||
ZTP plugins provide support for generating installation CRs from the `SiteConfig` CR and automatic wrapping of configuration CRs in policies based on `PolicyGenTemplate` CRs.
|
||||
+
|
||||
You can deploy and manage multiple versions of {product-title} on managed clusters with the baseline reference configuration CRs in a `/source-crs` subdirectory provided that subdirectory also contains the `kustomization.yaml` file.
|
||||
You add user-provided CRs to this subdirectory that you use with the predefined CRs that are specified in the `PolicyGenTemplate` CRs.
|
||||
This allows you to tailor your configurations to suit your specific requirements and provides {ztp} version independence between managed clusters and the hub cluster.
|
||||
+
|
||||
For more information, see the following:
|
||||
|
||||
* link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/ztp_far_edge/ztp-preparing-the-hub-cluster.html#ztp-preparing-the-ztp-git-repository-ver-ind_ztp-preparing-the-hub-cluster[Preparing the site configuration repository for version independence]
|
||||
* link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/ztp_far_edge/ztp-advanced-policy-config.html#ztp-adding-new-content-to-gitops-ztp_ztp-advanced-policy-config[Adding custom content to the {ztp} pipeline]
|
||||
|
||||
Limits::
|
||||
* 300 `SiteConfig` CRs per ArgoCD application.
|
||||
You can use multiple applications to achieve the maximum number of clusters supported by a single hub cluster.
|
||||
|
||||
* Content in the `/source-crs` folder in Git overrides content provided in the {ztp} plugin container.
|
||||
Git takes precedence in the search path.
|
||||
|
||||
* Add the `/source-crs` folder in the same directory as the `kustomization.yaml` file, which includes the `PolicyGenTemplate` as a generator.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
Alternative locations for the `/source-crs` directory are not supported in this context.
|
||||
====
|
||||
|
||||
Engineering considerations::
|
||||
* To avoid confusion or unintentional overwriting of files when updating content, use unique and distinguishable names for user-provided CRs in the `/source-crs` folder and extra manifests in Git.
|
||||
|
||||
* The `SiteConfig` CR allows multiple extra-manifest paths.
|
||||
When files with the same name are found in multiple directory paths, the last file found takes precedence.
|
||||
This allows the full set of version specific Day 0 manifests (extra-manifests) to be placed in Git and referenced from the `SiteConfig`.
|
||||
With this feature, you can deploy multiple {product-title} versions to managed clusters simultaneously.
|
||||
|
||||
* The `extraManifestPath` field of the `SiteConfig` CR is deprecated from {product-title} 4.15 and later.
|
||||
Use the new `extraManifests.searchPaths` field instead.
|
||||
41
modules/telco-ran-hub-cluster-management.adoc
Normal file
41
modules/telco-ran-hub-cluster-management.adoc
Normal file
@@ -0,0 +1,41 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-du-overview.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-hub-cluster-management_{context}"]
|
||||
= Hub cluster management characteristics
|
||||
|
||||
{rh-rhacm-first} is the recommended cluster management solution.
|
||||
Configure it to the following limits on the hub cluster:
|
||||
|
||||
* Configure a maximum of 5 {rh-rhacm} policies with a compliant evaluation interval of at least 10 minutes.
|
||||
|
||||
* Use a maximum of 10 managed cluster templates in policies.
|
||||
Where possible, use hub-side templating.
|
||||
|
||||
* Disable all {rh-rhacm} add-ons except for the `policy-controller` and `observability-controller` add-ons.
|
||||
Set `Observability` to the default configuration.
|
||||
+
|
||||
[IMPORTANT]
|
||||
====
|
||||
Configuring optional components or enabling additional features will result in additional resource usage and can reduce overall system performance.
|
||||
|
||||
For more information, see xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc#telco-reference-ran-du-deployment-components_ran-ref-design-components[Reference design deployment components].
|
||||
====
|
||||
|
||||
.OpenShift platform resource utilization under reference application load
|
||||
[cols="1,2,3", width="90%", options="header"]
|
||||
|====
|
||||
|Metric
|
||||
|Limit
|
||||
|Notes
|
||||
|
||||
|CPU usage
|
||||
|Less than 4000 mc – 2 cores (4 hyperthreads)
|
||||
|Platform CPU is pinned to reserved cores, including both hyperthreads in each reserved core. The system is engineered to use 3 CPUs (3000mc) at steady-state to allow for periodic system tasks and spikes.
|
||||
|
||||
|Memory used
|
||||
|Less than 16G
|
||||
|
|
||||
|====
|
||||
22
modules/telco-ran-local-storage-operator.adoc
Normal file
22
modules/telco-ran-local-storage-operator.adoc
Normal file
@@ -0,0 +1,22 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-local-storage-operator_{context}"]
|
||||
= Local Storage Operator
|
||||
|
||||
New in this release::
|
||||
* No reference design updates in this release
|
||||
|
||||
Description::
|
||||
You can create persistent volumes that can be used as `PVC` resources by applications with the Local Storage Operator.
|
||||
The number and type of `PV` resources that you create depends on your requirements.
|
||||
|
||||
Engineering considerations::
|
||||
* Create backing storage for `PV` CRs before creating the `PV`.
|
||||
This can be a partition, a local volume, LVM volume, or full disk.
|
||||
* Refer to the device listing in `LocalVolume` CRs by the hardware path used to access each device to ensure correct allocation of disks and partitions.
|
||||
Logical names (for example, `/dev/sda`) are not guaranteed to be consistent across node reboots.
|
||||
+
|
||||
For more information, see the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/managing_file_systems/assembly_overview-of-persistent-naming-attributes_managing-file-systems#device-identifiers_assembly_overview-of-persistent-naming-attributes[{op-system-base} 9 documentation on device identifiers].
|
||||
22
modules/telco-ran-logging.adoc
Normal file
22
modules/telco-ran-logging.adoc
Normal file
@@ -0,0 +1,22 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-logging_{context}"]
|
||||
= Logging
|
||||
|
||||
New in this release::
|
||||
* Vector is now the recommended log collector.
|
||||
|
||||
Description::
|
||||
Use logging to collect logs from the far edge node for remote analysis.
|
||||
|
||||
Engineering considerations::
|
||||
* Handling logs beyond the infrastructure and audit logs, for example, from the application workload requires additional CPU and network bandwidth based on additional logging rate.
|
||||
* As of {product-title} 4.14, vector is the reference log collector.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
Use of fluentd in the RAN use model is deprecated.
|
||||
====
|
||||
56
modules/telco-ran-lvms-operator.adoc
Normal file
56
modules/telco-ran-lvms-operator.adoc
Normal file
@@ -0,0 +1,56 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-lvms-operator_{context}"]
|
||||
= LVMS Operator
|
||||
|
||||
New in this release::
|
||||
* No reference design updates in this release
|
||||
|
||||
New in this release::
|
||||
* Simplified LVMS `deviceSelector` logic
|
||||
|
||||
* LVM Storage with `ext4` and `PV` resources
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
LVMS Operator is an optional component.
|
||||
====
|
||||
|
||||
Description::
|
||||
The LVMS Operator provides dynamic provisioning of block and file storage.
|
||||
The LVMS Operator creates logical volumes from local devices that can be used as `PVC` resources by applications.
|
||||
Volume expansion and snapshots are also possible.
|
||||
+
|
||||
The following example configuration creates a `vg1` volume group that leverages all available disks on the node except the installation disk:
|
||||
+
|
||||
.StorageLVMCluster.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: lvm.topolvm.io/v1alpha1
|
||||
kind: LVMCluster
|
||||
metadata:
|
||||
name: storage-lvmcluster
|
||||
namespace: openshift-storage
|
||||
annotations:
|
||||
ran.openshift.io/ztp-deploy-wave: "10"
|
||||
spec: {}
|
||||
storage:
|
||||
deviceClasses:
|
||||
- name: vg1
|
||||
thinPoolConfig:
|
||||
name: thin-pool-1
|
||||
sizePercent: 90
|
||||
overprovisionRatio: 10
|
||||
----
|
||||
|
||||
Limits and requirements::
|
||||
* In {sno} clusters, persistent storage must be provided by either LVMS or Local Storage, not both.
|
||||
|
||||
Engineering considerations::
|
||||
* The LVMS Operator is not the reference storage solution for the DU use case.
|
||||
If you require LVMS Operator for application workloads, the resource use is accounted for against the application cores.
|
||||
|
||||
* Ensure that sufficient disks or partitions are available for storage requirements.
|
||||
48
modules/telco-ran-machine-configuration.adoc
Normal file
48
modules/telco-ran-machine-configuration.adoc
Normal file
@@ -0,0 +1,48 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-machine-configuration_{context}"]
|
||||
= Machine configuration
|
||||
|
||||
New in this release::
|
||||
* Set `rcu_normal` after node recovery
|
||||
|
||||
Limits and requirements::
|
||||
* The CRI-O wipe disable `MachineConfig` assumes that images on disk are static other than during scheduled maintenance in defined maintenance windows.
|
||||
To ensure the images are static, do not set the pod `imagePullPolicy` field to `Always`.
|
||||
+
|
||||
.Machine configuration options
|
||||
[cols=2*, width="90%", options="header"]
|
||||
|====
|
||||
|Feature
|
||||
|Description
|
||||
|
||||
|Container runtime
|
||||
|Sets the container runtime to `crun` for all node roles.
|
||||
|
||||
|kubelet config and container mount hiding
|
||||
|Reduces the frequency of kubelet housekeeping and eviction monitoring to reduce CPU usage.
|
||||
Create a container mount namespace, visible to kubelet and CRI-O, to reduce system mount scanning resource usage.
|
||||
|
||||
|SCTP
|
||||
|Optional configuration (enabled by default)
|
||||
Enables SCTP. SCTP is required by RAN applications but disabled by default in {op-system}.
|
||||
|
||||
|kdump
|
||||
|Optional configuration (enabled by default)
|
||||
Enables kdump to capture debug information when a kernel panic occurs.
|
||||
|
||||
|CRI-O wipe disable
|
||||
|Disables automatic wiping of the CRI-O image cache after unclean shutdown.
|
||||
|
||||
|SR-IOV-related kernel arguments
|
||||
|Includes additional SR-IOV related arguments in the kernel command line.
|
||||
|
||||
|RCU Normal systemd service
|
||||
|Sets `rcu_normal` after the system is fully started.
|
||||
|
||||
|One-shot time sync
|
||||
|Runs a one-time system time synchronization job for control plane or worker nodes.
|
||||
|====
|
||||
38
modules/telco-ran-managed-cluster-resources.adoc
Normal file
38
modules/telco-ran-managed-cluster-resources.adoc
Normal file
@@ -0,0 +1,38 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-du-overview.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-managed-cluster-resources_{context}"]
|
||||
= {rds-caps} worker node cluster resource utilization
|
||||
|
||||
The maximum number of running pods in the system, inclusive of application workloads and {product-title} pods, is 120.
|
||||
|
||||
Resource utilization::
|
||||
{product-title} resource utilization varies depending on many factors including application workload characteristics such as:
|
||||
|
||||
* Pod count
|
||||
|
||||
* Type and frequency of probes
|
||||
|
||||
* Messaging rates on primary CNI or secondary CNI with kernel networking
|
||||
|
||||
* API access rate
|
||||
|
||||
* Logging rates
|
||||
|
||||
* Storage IOPS
|
||||
|
||||
Cluster resource requirements are applicable under the following conditions:
|
||||
|
||||
* The cluster is running the described representative application workload.
|
||||
|
||||
* The cluster is managed with the constraints described in xref:../../telco_ref_design_specs/ran/telco-ran-ref-validation-artifacts.html#telco-ran-hub-cluster-management_ran-ref-design-overview[Hub cluster management resources].
|
||||
|
||||
* Components noted as optional in the RAN DU use model configuration are not applied.
|
||||
|
||||
[IMPORTANT]
|
||||
====
|
||||
You will need to do additional analysis to determine the impact on resource utilization and ability to meet KPI targets for configurations outside the scope of the {rds-caps} reference design.
|
||||
You might have to allocate additional resources in the cluster depending on your requirements.
|
||||
====
|
||||
40
modules/telco-ran-measured-kpi-results.adoc
Normal file
40
modules/telco-ran-measured-kpi-results.adoc
Normal file
@@ -0,0 +1,40 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-du-overview.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-measured-kpi-results_{context}"]
|
||||
= KPI test results overview
|
||||
|
||||
All results are taken from 12-hour test runs.
|
||||
|
||||
Realtime kernel KPI test::
|
||||
Workload nodes running the realtime kernel are validated to these performance KPIs:
|
||||
|
||||
* No `oslat` samples greater than 20µs.
|
||||
|
||||
* 99.9999% of `cyclictest` samples are less than 10µs. No samples above 20µs.
|
||||
|
||||
Non-realtime kernel KPI test::
|
||||
The non-realtime kernel can be run with a reduced predictable latency target.
|
||||
The following performance KPIs are validated:
|
||||
|
||||
* No `oslat` samples above 20µs.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
`cyclictest` tests are not applicable for non-realtime systems.
|
||||
====
|
||||
|
||||
RFC2544 KPI test::
|
||||
* Zero packet loss at 99.9% line rate (25 Gbps) for 512 byte frames.
|
||||
|
||||
* Packet latency is less than 30µs at 80% line rate for 512 byte frames over 12 hour test.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The test application is the DPDK `testpmd` utility.
|
||||
====
|
||||
|
||||
PTP network synchronization::
|
||||
* The time offset for boundary and ordinary clock configurations, as measured at the follower port on the Intel E810-XXVDA4 (Salem Channel) NIC is less than 100 ns as indicated by the `openshift_ptp_offset_ns` metric.
|
||||
74
modules/telco-ran-node-tuning-operator.adoc
Normal file
74
modules/telco-ran-node-tuning-operator.adoc
Normal file
@@ -0,0 +1,74 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-node-tuning-operator_{context}"]
|
||||
= Node Tuning Operator
|
||||
|
||||
New in this release::
|
||||
* No reference design updates in this release
|
||||
|
||||
Description::
|
||||
You tune the cluster performance by link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/cnf-create-performance-profiles.html[creating a performance profile].
|
||||
Settings that you configure with a performance profile include:
|
||||
+
|
||||
* Selecting the realtime or non-realtime kernel.
|
||||
|
||||
* Allocating cores to a reserved or isolated `cpuset`.
|
||||
{product-title} processes allocated to the management workload partition are pinned to reserved set.
|
||||
|
||||
* Enabling kubelet features (CPU manager, topology manager, and memory manager).
|
||||
|
||||
* Configuring huge pages.
|
||||
|
||||
* Setting additional kernel arguments.
|
||||
|
||||
* Setting per-core power tuning and max CPU frequency.
|
||||
|
||||
Limits and requirements::
|
||||
|
||||
The Node Tuning Operator uses the `PerformanceProfile` CR to configure the cluster. You need to configure the following settings in the RAN DU profile `PerformanceProfile` CR:
|
||||
|
||||
* Select reserved and isolated cores and ensure that you allocate at least 4 hyperthreads (equivalent to 2 cores) on Intel 3rd Generation Xeon (Ice Lake) 2.20 GHz CPUs or better with firmware tuned for maximum performance.
|
||||
|
||||
* Set the reserved `cpuset` to include both hyperthread siblings for each included core.
|
||||
Unreserved cores are available as allocatable CPU for scheduling workloads.
|
||||
Ensure that hyperthread siblings are not split across reserved and isolated cores.
|
||||
|
||||
* Configure reserved and isolated CPUs to include all threads in all cores based on what you have set as reserved and isolated CPUs.
|
||||
|
||||
* Set core 0 of each NUMA node to be included in the reserved CPU set.
|
||||
|
||||
* Set the huge page size to 1G.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
You should not add additional workloads to the management partition.
|
||||
Only those pods which are part of the OpenShift management platform should be annotated into the management partition.
|
||||
====
|
||||
|
||||
Engineering considerations::
|
||||
* You should use the RT kernel to meet performance requirements.
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
You can use the non-RT kernel if required.
|
||||
====
|
||||
|
||||
* The number of huge pages that you configure depends on the application workload requirements.
|
||||
Variation in this parameter is expected and allowed.
|
||||
|
||||
* Variation is expected in the configuration of reserved and isolated CPU sets based on selected hardware and additional components in use on the system.
|
||||
Variation must still meet the specified limits.
|
||||
|
||||
* Hardware without IRQ affinity support impacts isolated CPUs.
|
||||
To ensure that pods with guaranteed whole CPU QoS have full use of the allocated CPU, all hardware in the server must support IRQ affinity.
|
||||
For more information, see link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/cnf-low-latency-tuning.html#about_irq_affinity_setting_cnf-master[About support of IRQ affinity setting].
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
In {product-title} {product-version}, any `PerformanceProfile` CR configured on the cluster causes the Node Tuning Operator to automatically set all cluster nodes to use cgroup v1.
|
||||
|
||||
For more information about cgroups, see link:https://docs.openshift.com/container-platform/4.14/nodes/clusters/nodes-cluster-cgroups-2.html#nodes-clusters-cgroups-2_nodes-cluster-cgroups-2[Configuring Linux cgroup].
|
||||
====
|
||||
45
modules/telco-ran-ptp-operator.adoc
Normal file
45
modules/telco-ran-ptp-operator.adoc
Normal file
@@ -0,0 +1,45 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-ptp-operator_{context}"]
|
||||
= PTP Operator
|
||||
|
||||
New in this release::
|
||||
* PTP grandmaster clock (T-GM) GPS timing with Intel E810-XXV-4T Westport Channel NIC – minimum firmware version 4.30 (Technology Preview)
|
||||
|
||||
* PTP events and metrics for grandmaster (T-GM) are new in {product-title} {product-version} (Technology Preview)
|
||||
|
||||
Description::
|
||||
Configure of link:https://docs.openshift.com/container-platform/latest/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-sno-du-configuring-ptp_sno-configure-for-vdu[PTP timing] support for cluster nodes.
|
||||
The DU node can run in the following modes:
|
||||
+
|
||||
* As an ordinary clock synced to a T-GM or boundary clock (T-BC)
|
||||
|
||||
* As dual boundary clocks, one per NIC (high availability is not supported)
|
||||
|
||||
* As grandmaster clock with support for E810 Westport Channel NICs (Technology Preview)
|
||||
|
||||
* Optionally as a boundary clock for radio units (RUs)
|
||||
|
||||
+
|
||||
Optional: subscribe applications to PTP events that happen on the node that the application is running.
|
||||
You subscribe the application to events via HTTP.
|
||||
|
||||
Limits and requirements::
|
||||
* High availability is not supported with dual NIC configurations.
|
||||
|
||||
* Westport Channel NICs configured as T-GM do not support DPLL with the current ice driver version.
|
||||
|
||||
* GPS offsets are not reported.
|
||||
Use a default offset of less than or equal to 5.
|
||||
|
||||
* DPLL offsets are not reported.
|
||||
Use a default offset of less than or equal to 5.
|
||||
|
||||
|
||||
Engineering considerations::
|
||||
* Configurations are provided for ordinary clock, boundary clock, or grandmaster clock
|
||||
|
||||
* PTP fast event notifications uses `ConfigMap` CRs to store PTP event subscriptions.
|
||||
@@ -0,0 +1,27 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-red-hat-advanced-cluster-management-rhacm_{context}"]
|
||||
= {rh-rhacm-first}
|
||||
|
||||
New in this release::
|
||||
* Additional node labels can be configured during installation.
|
||||
|
||||
Description::
|
||||
{rh-rhacm} provides Multi Cluster Engine (MCE) installation and ongoing lifecycle management functionality for deployed clusters.
|
||||
You declaratively specify configurations and upgrades with `Policy` CRs and apply the policies to clusters with the {rh-rhacm} policy controller as managed by {cgu-operator-full}.
|
||||
+
|
||||
* {ztp-first} uses the MCE feature of {rh-rhacm}
|
||||
* Configuration, upgrades, and cluster status are managed with the {rh-rhacm} policy controller
|
||||
|
||||
Limits and requirements::
|
||||
* A single hub cluster supports up to 3500 deployed {sno} clusters with 5 `Policy` CRs bound to each cluster.
|
||||
|
||||
Engineering considerations::
|
||||
* Cluster specific configuration: managed clusters typically have some number of configuration values that are specific to the individual cluster.
|
||||
These configurations should be managed using {rh-rhacm} policy hub-side templating with values pulled from `ConfigMap` CRs based on the cluster name.
|
||||
|
||||
* To save CPU resources on managed clusters, policies that apply static configurations should be unbound from managed clusters after {ztp} installation of the cluster.
|
||||
For more information, see link:https://docs.openshift.com/container-platform/latest/storage/understanding-persistent-storage.html#releasing_understanding-persistent-storage[Release a persistent volume].
|
||||
15
modules/telco-ran-redfish-operator.adoc
Normal file
15
modules/telco-ran-redfish-operator.adoc
Normal file
@@ -0,0 +1,15 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-redfish-operator_{context}"]
|
||||
= {redfish-operator}
|
||||
|
||||
The {redfish-operator} is an optional Operator that runs exclusively on the managed spoke cluster. It relays Redfish hardware events to cluster applications.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The {redfish-operator} is not included in the RAN DU use model reference configuration and is an optional feature.
|
||||
If you want to use the {redfish-operator}, assign additional CPU resources from the application CPU budget.
|
||||
====
|
||||
0
modules/telco-ran-ref-design-spec.adoc
Normal file
0
modules/telco-ran-ref-design-spec.adoc
Normal file
19
modules/telco-ran-sr-iov-operator.adoc
Normal file
19
modules/telco-ran-sr-iov-operator.adoc
Normal file
@@ -0,0 +1,19 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-sr-iov-operator_{context}"]
|
||||
= SR-IOV Operator
|
||||
|
||||
New in this release::
|
||||
* No reference design updates in this release
|
||||
|
||||
Description::
|
||||
The SR-IOV Operator provisions and configures the SR-IOV CNI and device plugins.
|
||||
Both `netdevice` (kernel VFs) and `vfio` (DPDK) devices are supported.
|
||||
|
||||
Engineering considerations::
|
||||
* Customer variation on the configuration and number of `SriovNetwork` and `SriovNetworkNodePolicy` custom resources (CRs) is expected.
|
||||
|
||||
* IOMMU kernel command line settings are applied with a `MachineConfig` CR at install time. This ensures that the `SriovOperator` CR does not cause a reboot of the node when adding them.
|
||||
26
modules/telco-ran-sriov-fec-operator.adoc
Normal file
26
modules/telco-ran-sriov-fec-operator.adoc
Normal file
@@ -0,0 +1,26 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-sriov-fec-operator_{context}"]
|
||||
= SRIOV-FEC Operator
|
||||
|
||||
New in this release::
|
||||
* No reference design updates in this release
|
||||
|
||||
Description::
|
||||
SRIOV-FEC Operator is an optional 3rd party Certified Operator supporting FEC accelerator hardware.
|
||||
|
||||
Limits and requirements::
|
||||
* Starting with FEC Operator v2.7.0:
|
||||
|
||||
** `SecureBoot` is supported
|
||||
|
||||
** The `vfio` driver for the `PF` requires the usage of `vfio-token` that is injected into Pods.
|
||||
The `VF` token can be passed to DPDK by using the EAL parameter `--vfio-vf-token`.
|
||||
|
||||
Engineering considerations::
|
||||
* The SRIOV-FEC Operator uses CPU cores from the `isolated` CPU set.
|
||||
|
||||
* You can validate FEC readiness as part of the pre-checks for application deployment, for example, by extending the validation policy.
|
||||
71
modules/telco-ran-topology-aware-lifecycle-manager-talm.adoc
Normal file
71
modules/telco-ran-topology-aware-lifecycle-manager-talm.adoc
Normal file
@@ -0,0 +1,71 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-topology-aware-lifecycle-manager-talm_{context}"]
|
||||
= {cgu-operator-first}
|
||||
|
||||
New in this release::
|
||||
* Added support for pre-caching additional user-specified images
|
||||
|
||||
Description::
|
||||
+
|
||||
--
|
||||
Managed updates::
|
||||
{cgu-operator} is an Operator that runs only on the hub cluster for managing how changes (including cluster and Operator upgrades, configuration, and so on) are rolled out to the network.
|
||||
{cgu-operator} does the following:
|
||||
|
||||
* Progressively applies updates to fleets of clusters in user-configurable batches by using `Policy` CRs.
|
||||
* Adds `ztp-done` labels or other user configurable labels on a per-cluster basis
|
||||
|
||||
Precaching for {sno} clusters::
|
||||
{cgu-operator} supports optional precaching of {product-title}, OLM Operator, and additional user images to {sno} clusters before initiating an upgrade.
|
||||
+
|
||||
* A new `PreCachingConfig` custom resource is available for specifying optional pre-caching configurations.
|
||||
For example:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
apiVersion: ran.openshift.io/v1alpha1
|
||||
kind: PreCachingConfig
|
||||
metadata:
|
||||
name: example-config
|
||||
namespace: example-ns
|
||||
spec:
|
||||
additionalImages:
|
||||
- quay.io/foobar/application1@sha256:3d5800990dee7cd4727d3fe238a97e2d2976d3808fc925ada29c559a47e2e
|
||||
- quay.io/foobar/application2@sha256:3d5800123dee7cd4727d3fe238a97e2d2976d3808fc925ada29c559a47adf
|
||||
- quay.io/foobar/applicationN@sha256:4fe1334adfafadsf987123adfffdaf1243340adfafdedga0991234afdadfs
|
||||
spaceRequired: 45 GiB <1>
|
||||
overrides:
|
||||
preCacheImage: quay.io/test_images/pre-cache:latest
|
||||
platformImage: quay.io/openshift-release-dev/ocp-release@sha256:3d5800990dee7cd4727d3fe238a97e2d2976d3808fc925ada29c559a47e2e
|
||||
operatorsIndexes:
|
||||
- registry.example.com:5000/custom-redhat-operators:1.0.0
|
||||
operatorsPackagesAndChannels:
|
||||
- local-storage-operator: stable
|
||||
- ptp-operator: stable
|
||||
- sriov-network-operator: stable
|
||||
excludePrecachePatterns: <2>
|
||||
- aws
|
||||
- vsphere
|
||||
----
|
||||
<1> Configurable `space-required` parameter allows you to validate before and after pre-caching storage space
|
||||
<2> Configurable filtering allows exclusion of unused images
|
||||
--
|
||||
|
||||
Backup and restore for {sno}::
|
||||
{cgu-operator} supports taking a snapshot of the cluster operating system and configuration to a dedicated partition on a local disk.
|
||||
A restore script is provided that returns the cluster to the backed up state.
|
||||
|
||||
Limits and requirements::
|
||||
* {cgu-operator} supports concurrent cluster deployment in batches of 400
|
||||
|
||||
* Precaching and backup features are for {sno} clusters only.
|
||||
|
||||
Engineering considerations::
|
||||
* The `PreCachingConfig` CR is optional and does not need to be created if you just wants to precache platform related (OpenShift and OLM Operator) images.
|
||||
The `PreCachingConfig` CR must be applied before referencing it in the `ClusterGroupUpgrade` CR.
|
||||
|
||||
* Create a recovery partition during installation if you opt to use the {cgu-operator} backup and restore feature.
|
||||
43
modules/telco-ran-workload-partitioning.adoc
Normal file
43
modules/telco-ran-workload-partitioning.adoc
Normal file
@@ -0,0 +1,43 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-ran-workload-partitioning_{context}"]
|
||||
= Workload partitioning
|
||||
|
||||
New in this release::
|
||||
* No reference design updates in this release
|
||||
|
||||
Description::
|
||||
Workload partitioning pins OpenShift platform and Day 2 Operator pods that are part of the DU profile to the reserved `cpuset` and removes the reserved CPU from node accounting.
|
||||
This leaves all unreserved CPU cores available for user workloads.
|
||||
+
|
||||
The method of enabling and configuring workload partitioning changed in {product-title} 4.14.
|
||||
+
|
||||
--
|
||||
4.14 and later::
|
||||
* Configure partitions by setting installation parameters:
|
||||
+
|
||||
[source,yaml]
|
||||
----
|
||||
cpuPartitioningMode: AllNodes
|
||||
----
|
||||
|
||||
* Configure management partition cores with the reserved CPU set in the `PerformanceProfile` CR
|
||||
|
||||
4.13 and earlier::
|
||||
* Configure partitions with extra `MachineConfiguration` CRs applied at install-time
|
||||
--
|
||||
|
||||
Limits and requirements::
|
||||
* `Namespace` and `Pod` CRs must be annotated to allow the pod to be applied to the management partition
|
||||
|
||||
* Pods with CPU limits cannot be allocated to the partition.
|
||||
This is because mutation can change the pod QoS.
|
||||
|
||||
* For more information about the minimum number of CPUs that can be allocated to the management partition, see xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc#telco-ran-node-tuning-operator_ran-ref-design-components[Node Tuning Operator].
|
||||
|
||||
Engineering considerations::
|
||||
* Workload Partitioning pins all management pods to reserved cores.
|
||||
A sufficient number of cores must be allocated to the reserved set to account for operating system, management pods, and expected spikes in CPU use that occur when the workload starts, the node reboots, or other system events happen.
|
||||
49
modules/telco-ran-yaml-ref-cluster-tuning.adoc
Normal file
49
modules/telco-ran-yaml-ref-cluster-tuning.adoc
Normal file
@@ -0,0 +1,49 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="cluster-tuning-yaml_{context}"]
|
||||
= Cluster tuning reference YAML
|
||||
|
||||
[id="ztp-example-sno-yaml"]
|
||||
.example-sno.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_example-sno.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-disablesnonetworkdiag-yaml"]
|
||||
.DisableSnoNetworkDiag.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_DisableSnoNetworkDiag.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-reducemonitoringfootprint-yaml"]
|
||||
.ReduceMonitoringFootprint.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ReduceMonitoringFootprint.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-defaultcatsrc-yaml"]
|
||||
.DefaultCatsrc.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_DefaultCatsrc.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-disconnectedicsp-yaml"]
|
||||
.DisconnectedICSP.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_DisconnectedICSP.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-operatorhub-yaml"]
|
||||
.OperatorHub.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_OperatorHub.yaml[]
|
||||
----
|
||||
210
modules/telco-ran-yaml-ref-day-2-operators.adoc
Normal file
210
modules/telco-ran-yaml-ref-day-2-operators.adoc
Normal file
@@ -0,0 +1,210 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="day-2-operators-yaml_{context}"]
|
||||
= Day 2 Operators reference YAML
|
||||
|
||||
[id="ztp-clusterlogforwarder-yaml"]
|
||||
.ClusterLogForwarder.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogForwarder.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogging-yaml"]
|
||||
.ClusterLogging.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogging.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogns-yaml"]
|
||||
.ClusterLogNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogopergroup-yaml"]
|
||||
.ClusterLogOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogsubscription-yaml"]
|
||||
.ClusterLogSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-storageclass-yaml"]
|
||||
.StorageClass.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_StorageClass.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-storagelv-yaml"]
|
||||
.StorageLV.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_StorageLV.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-storagens-yaml"]
|
||||
.StorageNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_StorageNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-storageopergroup-yaml"]
|
||||
.StorageOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_StorageOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-storagesubscription-yaml"]
|
||||
.StorageSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_StorageSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-performanceprofile-yaml"]
|
||||
.PerformanceProfile.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PerformanceProfile.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-tunedperformancepatch-yaml"]
|
||||
.TunedPerformancePatch.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_TunedPerformancePatch.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-ptpoperatorconfigforevent-yaml"]
|
||||
.PtpOperatorConfigForEvent.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PtpOperatorConfigForEvent.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-ptpconfigboundary-yaml"]
|
||||
.PtpConfigBoundary.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PtpConfigBoundary.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-ptpconfiggmwpc-yaml"]
|
||||
.PtpConfigGmWpc.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PtpConfigGmWpc.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-ptpconfigslave-yaml"]
|
||||
.PtpConfigSlave.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PtpConfigSlave.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-ptpsubscription-yaml"]
|
||||
.PtpSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PtpSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-ptpsubscriptionns-yaml"]
|
||||
.PtpSubscriptionNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PtpSubscriptionNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-ptpsubscriptionopergroup-yaml"]
|
||||
.PtpSubscriptionOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PtpSubscriptionOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-acceleratorsns-yaml"]
|
||||
.AcceleratorsNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_AcceleratorsNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-acceleratorsopergroup-yaml"]
|
||||
.AcceleratorsOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_AcceleratorsOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-acceleratorssubscription-yaml"]
|
||||
.AcceleratorsSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_AcceleratorsSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovfecclusterconfig-yaml"]
|
||||
.SriovFecClusterConfig.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovFecClusterConfig.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovnetwork-yaml"]
|
||||
.SriovNetwork.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovNetwork.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovnetworknodepolicy-yaml"]
|
||||
.SriovNetworkNodePolicy.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovNetworkNodePolicy.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovoperatorconfig-yaml"]
|
||||
.SriovOperatorConfig.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovOperatorConfig.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovsubscription-yaml"]
|
||||
.SriovSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovsubscriptionns-yaml"]
|
||||
.SriovSubscriptionNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovSubscriptionNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovsubscriptionopergroup-yaml"]
|
||||
.SriovSubscriptionOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovSubscriptionOperGroup.yaml[]
|
||||
----
|
||||
112
modules/telco-ran-yaml-ref-machine-configuration.adoc
Normal file
112
modules/telco-ran-yaml-ref-machine-configuration.adoc
Normal file
@@ -0,0 +1,112 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="machine-configuration-yaml_{context}"]
|
||||
= Machine configuration reference YAML
|
||||
|
||||
[id="ztp-enable-crun-master-yaml"]
|
||||
.enable-crun-master.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_enable-crun-master.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-enable-crun-worker-yaml"]
|
||||
.enable-crun-worker.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_enable-crun-worker.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-99-crio-disable-wipe-master-yaml"]
|
||||
.99-crio-disable-wipe-master.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_99-crio-disable-wipe-master.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-99-crio-disable-wipe-worker-yaml"]
|
||||
.99-crio-disable-wipe-worker.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_99-crio-disable-wipe-worker.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-05-kdump-config-master-yaml"]
|
||||
.05-kdump-config-master.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_05-kdump-config-master.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-05-kdump-config-worker-yaml"]
|
||||
.05-kdump-config-worker.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_05-kdump-config-worker.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-06-kdump-master-yaml"]
|
||||
.06-kdump-master.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_06-kdump-master.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-06-kdump-worker-yaml"]
|
||||
.06-kdump-worker.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_06-kdump-worker.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-01-container-mount-ns-and-kubelet-conf-master-yaml"]
|
||||
.01-container-mount-ns-and-kubelet-conf-master.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_01-container-mount-ns-and-kubelet-conf-master.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-01-container-mount-ns-and-kubelet-conf-worker-yaml"]
|
||||
.01-container-mount-ns-and-kubelet-conf-worker.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_01-container-mount-ns-and-kubelet-conf-worker.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-99-sync-time-once-master-yaml"]
|
||||
.99-sync-time-once-master.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_99-sync-time-once-master.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-99-sync-time-once-worker-yaml"]
|
||||
.99-sync-time-once-worker.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_99-sync-time-once-worker.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-03-sctp-machine-config-master-yaml"]
|
||||
.03-sctp-machine-config-master.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_03-sctp-machine-config-master.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-03-sctp-machine-config-worker-yaml"]
|
||||
.03-sctp-machine-config-worker.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_03-sctp-machine-config-worker.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-07-sriov-related-kernel-args-master-yaml"]
|
||||
.07-sriov-related-kernel-args-master.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_07-sriov-related-kernel-args-master.yaml[]
|
||||
----
|
||||
127
modules/telco-ran-yaml-ref-networking.adoc
Normal file
127
modules/telco-ran-yaml-ref-networking.adoc
Normal file
@@ -0,0 +1,127 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="networking-yaml_{context}"]
|
||||
= Networking reference YAML
|
||||
|
||||
[id="ztp-network-yaml"]
|
||||
.Network.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_Network.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-networkattachmentdefinition-yaml"]
|
||||
.networkAttachmentDefinition.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_networkAttachmentDefinition.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovsubscriptionns-yaml"]
|
||||
.SriovSubscriptionNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovSubscriptionNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovsubscriptionopergroup-yaml"]
|
||||
.SriovSubscriptionOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovSubscriptionOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovsubscription-yaml"]
|
||||
.SriovSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovoperatorconfig-yaml"]
|
||||
.SriovOperatorConfig.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_SriovOperatorConfig.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovnetworknodepolicy-yaml"]
|
||||
.sriovNetworkNodePolicy.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_sriovNetworkNodePolicy.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sriovnetwork-yaml"]
|
||||
.sriovNetwork.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_sriovNetwork.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-metallbns-yaml"]
|
||||
.metallbNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_metallbNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-metallbopergroup-yaml"]
|
||||
.metallbOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_metallbOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-metallbsubscription-yaml"]
|
||||
.metallbSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_metallbSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-metallb-yaml"]
|
||||
.metallb.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_metallb.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-bgp-peer-yaml"]
|
||||
.bgp-peer.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_bgp-peer.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-bfd-profile-yaml"]
|
||||
.bfd-profile.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_bfd-profile.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-addr-pool-yaml"]
|
||||
.addr-pool.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_addr-pool.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-bgp-advr-yaml"]
|
||||
.bgp-advr.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_bgp-advr.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-mc_rootless_pods_selinux-yaml"]
|
||||
.mc_rootless_pods_selinux.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_mc_rootless_pods_selinux.yaml[]
|
||||
----
|
||||
|
||||
92
modules/telco-ran-yaml-ref-other.adoc
Normal file
92
modules/telco-ran-yaml-ref-other.adoc
Normal file
@@ -0,0 +1,92 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="other-yaml_{context}"]
|
||||
= Other reference YAML
|
||||
|
||||
[id="ztp-catalog-source-yaml"]
|
||||
.catalog-source.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_catalog-source.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-icsp-yaml"]
|
||||
.icsp.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_icsp.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-operator-hub-yaml"]
|
||||
.operator-hub.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_operator-hub.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogns-yaml"]
|
||||
.ClusterLogNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogopergroup-yaml"]
|
||||
.ClusterLogOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogsubscription-yaml"]
|
||||
.ClusterLogSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogforwarder-yaml"]
|
||||
.ClusterLogForwarder.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogForwarder.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-clusterlogging-yaml"]
|
||||
.ClusterLogging.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_ClusterLogging.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-control-plane-load-kernel-modules-yaml"]
|
||||
.control-plane-load-kernel-modules.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_control-plane-load-kernel-modules.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-worker-load-kernel-modules-yaml"]
|
||||
.worker-load-kernel-modules.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_worker-load-kernel-modules.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sctp_module_mc-yaml"]
|
||||
.sctp_module_mc.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_sctp_module_mc.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-performanceprofile-yaml"]
|
||||
.PerformanceProfile.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_PerformanceProfile.yaml[]
|
||||
----
|
||||
|
||||
22
modules/telco-ran-yaml-ref-resource-tuning.adoc
Normal file
22
modules/telco-ran-yaml-ref-resource-tuning.adoc
Normal file
@@ -0,0 +1,22 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="resource-tuning-yaml_{context}"]
|
||||
= Resource Tuning reference YAML
|
||||
|
||||
[id="ztp-pid-limits-cr-yaml"]
|
||||
.pid-limits-cr.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_pid-limits-cr.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-control-plane-system-reserved-yaml"]
|
||||
.control-plane-system-reserved.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_control-plane-system-reserved.yaml[]
|
||||
----
|
||||
|
||||
50
modules/telco-ran-yaml-ref-scheduling.adoc
Normal file
50
modules/telco-ran-yaml-ref-scheduling.adoc
Normal file
@@ -0,0 +1,50 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="scheduling-yaml_{context}"]
|
||||
= Scheduling reference YAML
|
||||
|
||||
[id="ztp-nropsubscriptionns-yaml"]
|
||||
.NROPSubscriptionNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_NROPSubscriptionNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-nropsubscriptionopergroup-yaml"]
|
||||
.NROPSubscriptionOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_NROPSubscriptionOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-nropsubscription-yaml"]
|
||||
.NROPSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_NROPSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-nropsubscriptionns-yaml"]
|
||||
.NROPSubscriptionNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_NROPSubscriptionNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-sched-yaml"]
|
||||
.sched.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_sched.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-nrop-yaml"]
|
||||
.nrop.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_nrop.yaml[]
|
||||
----
|
||||
|
||||
43
modules/telco-ran-yaml-ref-storage.adoc
Normal file
43
modules/telco-ran-yaml-ref-storage.adoc
Normal file
@@ -0,0 +1,43 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="storage-yaml_{context}"]
|
||||
= Storage reference YAML
|
||||
|
||||
[id="ztp-odffns-yaml"]
|
||||
.odffNS.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_odffNS.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-odfopergroup-yaml"]
|
||||
.odfOperGroup.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_odfOperGroup.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-odfsubscription-yaml"]
|
||||
.odfSubscription.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_odfSubscription.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-01-rook-ceph-external-cluster-details.secret-yaml"]
|
||||
.01-rook-ceph-external-cluster-details.secret.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_01-rook-ceph-external-cluster-details.secret.yaml[]
|
||||
----
|
||||
|
||||
[id="ztp-02-ocs-external-storagecluster-yaml"]
|
||||
.02-ocs-external-storagecluster.yaml
|
||||
[source,yaml]
|
||||
----
|
||||
include::snippets/ztp_02-ocs-external-storagecluster.yaml[]
|
||||
----
|
||||
|
||||
17
modules/telco-ref-design-overview.adoc
Normal file
17
modules/telco-ref-design-overview.adoc
Normal file
@@ -0,0 +1,17 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// *
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
[id="telco-ref-design-overview_{context}"]
|
||||
= Reference design specifications for telco 5G deployments
|
||||
|
||||
Red Hat and certified partners offer deep technical expertise and support for networking and operational capabilities required to run telco applications on {product-title} {product-version} clusters.
|
||||
|
||||
Red Hat's telco partners require a well-integrated, well-tested, and stable environment that can be replicated at scale for enterprise 5G solutions.
|
||||
The telco core and RAN DU reference design specifications (RDS) outline the recommended solution architecture based on a specific version of {product-title}.
|
||||
Each RDS describes a tested and validated platform configuration for telco core and RAN DU use models.
|
||||
The RDS ensures an optimal experience when running your applications by defining the set of critical KPIs for telco 5G core and RAN DU.
|
||||
Following the RDS minimizes high severity escalations and improves application stability.
|
||||
|
||||
5G use cases are evolving and your workloads are continually changing.
|
||||
Red Hat is committed to iterating over the telco core and RAN DU RDS to support evolving requirements based on customer and partner feedback.
|
||||
@@ -0,0 +1,32 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/ran/telco-ran-du-overview.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="telco-reference-application-workload-characteristics_{context}"]
|
||||
= {rds-caps} representative reference application workload characteristics
|
||||
|
||||
The representative reference application workload has the following characteristics:
|
||||
|
||||
* Has a maximum of 15 pods and 30 containers for the vRAN application including its management and control functions
|
||||
|
||||
* Uses a maximum of 2 `ConfigMap` and 4 `Secret` CRs per pod
|
||||
|
||||
* Uses a maximum of 10 exec probes with a frequency of not less than 10 seconds
|
||||
|
||||
* Incremental application load on the `kube-apiserver` is less than 10% of the cluster platform usage
|
||||
+
|
||||
[NOTE]
|
||||
====
|
||||
You can extract CPU load can from the platform metrics.
|
||||
For example:
|
||||
|
||||
[source,text]
|
||||
----
|
||||
query=avg_over_time(pod:container_cpu_usage:sum{namespace="openshift-kube-apiserver"}[30m])
|
||||
----
|
||||
====
|
||||
|
||||
* Application logs are not collected by the platform log collector
|
||||
|
||||
* Aggregate traffic on the primary CNI is less than 1 MBps
|
||||
@@ -6,7 +6,7 @@
|
||||
[id="ztp-getting-tool_{context}"]
|
||||
= Getting the {factory-prestaging-tool}
|
||||
|
||||
The {factory-prestaging-tool} Go binary is publicly available in link:https://quay.io/openshift-kni/telco-ran-tools:latest[the Telco RAN tools container image].
|
||||
The {factory-prestaging-tool} Go binary is publicly available in link:https://quay.io/openshift-kni/telco-ran-tools:latest[the {rds-first} tools container image].
|
||||
The {factory-prestaging-tool} Go binary in the container image is executed on the server running an {op-system} live image using `podman`.
|
||||
If you are working in a disconnected environment or have a private registry, you need to copy the image there so you can download the image to the server.
|
||||
|
||||
|
||||
31
modules/ztp-telco-core-software-versions.adoc
Normal file
31
modules/ztp-telco-core-software-versions.adoc
Normal file
@@ -0,0 +1,31 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * telco_ref_design_specs/core/telco-core-ref-validation-artifacts.adoc
|
||||
|
||||
:_mod-docs-content-type: CONCEPT
|
||||
[id="ztp-telco-core-software-versions_{context}"]
|
||||
= Telco core {product-version} validated solution software versions
|
||||
|
||||
The Red Hat telco core version {product-version} solution has been validated using the following Red Hat software products.
|
||||
|
||||
.Telco core {product-version} validated solution software
|
||||
[cols=2*, width="80%", options="header"]
|
||||
|====
|
||||
|Product
|
||||
|Software version
|
||||
|
||||
|Hub cluster {product-title} version
|
||||
|4.13
|
||||
|
||||
|{ztp} plugin
|
||||
|4.11, 4.12, or 4.13
|
||||
|
||||
|{rh-rhacm-first}
|
||||
|2.7
|
||||
|
||||
|{gitops-title}
|
||||
|1.9
|
||||
|
||||
|{cgu-operator-first}
|
||||
|4.11, 4.12, or 4.13
|
||||
|====
|
||||
@@ -1,25 +1,21 @@
|
||||
// Module included in the following assemblies:
|
||||
//
|
||||
// * scalability_and_performance/ztp_far_edge/ztp-preparing-the-hub-cluster.adoc
|
||||
// * telco_ref_design_specs/ran/telco-ran-ref-software-artifacts.adoc
|
||||
|
||||
:_mod-docs-content-type: REFERENCE
|
||||
[id="ztp-telco-ran-software-versions_{context}"]
|
||||
= Telco RAN {product-version} validated software components
|
||||
= Telco RAN DU {product-version} validated software components
|
||||
|
||||
The Red Hat Telco RAN {product-version} solution has been validated using the following Red Hat software products for {product-title}.
|
||||
The Red Hat telco RAN DU {product-version} solution has been validated using the following Red Hat software products for {product-title} managed clusters and hub clusters.
|
||||
|
||||
.Telco RAN DU profile components
|
||||
image::319_OpenShift_PTP_bare-metal_OCP_nodes_1023_RAN_DU.png[A diagram describing the telco RAN DU component stack]
|
||||
|
||||
The following tables list the validated software versions for managed clusters and hub clusters.
|
||||
|
||||
.Telco RAN DU {product-version} validated software components
|
||||
.Telco RAN DU managed cluster validated software components
|
||||
[cols=2*, width="80%", options="header"]
|
||||
|====
|
||||
|Component
|
||||
|Software version
|
||||
|
||||
|RAN DU cluster version
|
||||
|Managed cluster version
|
||||
|4.14
|
||||
|
||||
|Cluster Logging Operator
|
||||
@@ -44,7 +40,7 @@ The following tables list the validated software versions for managed clusters a
|
||||
|2.7
|
||||
|====
|
||||
|
||||
.Telco RAN {product-version} hub cluster validated software components
|
||||
.Hub cluster validated software components
|
||||
[cols=2*, width="80%", options="header"]
|
||||
|====
|
||||
|Component
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
# required
|
||||
# count: 1
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: rook-ceph-external-cluster-details
|
||||
namespace: openshift-storage
|
||||
type: Opaque
|
||||
data:
|
||||
# encoded content has been made generic
|
||||
external_cluster_details: eyJuYW1lIjoicm9vay1jZXBoLW1vbi1lbmRwb2ludHMiLCJraW5kIjoiQ29uZmlnTWFwIiwiZGF0YSI6eyJkYXRhIjoiY2VwaHVzYTE9MS4yLjMuNDo2Nzg5IiwibWF4TW9uSWQiOiIwIiwibWFwcGluZyI6Int9In19LHsibmFtZSI6InJvb2stY2VwaC1tb24iLCJraW5kIjoiU2VjcmV0IiwiZGF0YSI6eyJhZG1pbi1zZWNyZXQiOiJhZG1pbi1zZWNyZXQiLCJmc2lkIjoiMTExMTExMTEtMTExMS0xMTExLTExMTEtMTExMTExMTExMTExIiwibW9uLXNlY3JldCI6Im1vbi1zZWNyZXQifX0seyJuYW1lIjoicm9vay1jZXBoLW9wZXJhdG9yLWNyZWRzIiwia2luZCI6IlNlY3JldCIsImRhdGEiOnsidXNlcklEIjoiY2xpZW50LmhlYWx0aGNoZWNrZXIiLCJ1c2VyS2V5IjoiYzJWamNtVjAifX0seyJuYW1lIjoibW9uaXRvcmluZy1lbmRwb2ludCIsImtpbmQiOiJDZXBoQ2x1c3RlciIsImRhdGEiOnsiTW9uaXRvcmluZ0VuZHBvaW50IjoiMS4yLjMuNCwxLjIuMy4zLDEuMi4zLjIiLCJNb25pdG9yaW5nUG9ydCI6IjkyODMifX0seyJuYW1lIjoiY2VwaC1yYmQiLCJraW5kIjoiU3RvcmFnZUNsYXNzIiwiZGF0YSI6eyJwb29sIjoib2RmX3Bvb2wifX0seyJuYW1lIjoicm9vay1jc2ktcmJkLW5vZGUiLCJraW5kIjoiU2VjcmV0IiwiZGF0YSI6eyJ1c2VySUQiOiJjc2ktcmJkLW5vZGUiLCJ1c2VyS2V5IjoiIn19LHsibmFtZSI6InJvb2stY3NpLXJiZC1wcm92aXNpb25lciIsImtpbmQiOiJTZWNyZXQiLCJkYXRhIjp7InVzZXJJRCI6ImNzaS1yYmQtcHJvdmlzaW9uZXIiLCJ1c2VyS2V5IjoiYzJWamNtVjAifX0seyJuYW1lIjoicm9vay1jc2ktY2VwaGZzLXByb3Zpc2lvbmVyIiwia2luZCI6IlNlY3JldCIsImRhdGEiOnsiYWRtaW5JRCI6ImNzaS1jZXBoZnMtcHJvdmlzaW9uZXIiLCJhZG1pbktleSI6IiJ9fSx7Im5hbWUiOiJyb29rLWNzaS1jZXBoZnMtbm9kZSIsImtpbmQiOiJTZWNyZXQiLCJkYXRhIjp7ImFkbWluSUQiOiJjc2ktY2VwaGZzLW5vZGUiLCJhZG1pbktleSI6ImMyVmpjbVYwIn19LHsibmFtZSI6ImNlcGhmcyIsImtpbmQiOiJTdG9yYWdlQ2xhc3MiLCJkYXRhIjp7ImZzTmFtZSI6ImNlcGhmcyIsInBvb2wiOiJtYW5pbGFfZGF0YSJ9fQ==
|
||||
12
snippets/telco-core_02-ocs-external-storagecluster.yaml
Normal file
12
snippets/telco-core_02-ocs-external-storagecluster.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
# required
|
||||
# count: 1
|
||||
---
|
||||
apiVersion: ocs.openshift.io/v1
|
||||
kind: StorageCluster
|
||||
metadata:
|
||||
name: ocs-external-storagecluster
|
||||
namespace: openshift-storage
|
||||
spec:
|
||||
externalStorage:
|
||||
enable: true
|
||||
labelSelector: {}
|
||||
26
snippets/telco-core_ClusterLogForwarder.yaml
Normal file
26
snippets/telco-core_ClusterLogForwarder.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
# required
|
||||
# count: 1
|
||||
apiVersion: logging.openshift.io/v1
|
||||
kind: ClusterLogForwarder
|
||||
metadata:
|
||||
name: instance
|
||||
namespace: openshift-logging
|
||||
spec:
|
||||
outputs:
|
||||
- type: "kafka"
|
||||
name: kafka-open
|
||||
url: tcp://10.11.12.13:9092/test
|
||||
pipelines:
|
||||
- inputRefs:
|
||||
- infrastructure
|
||||
#- application
|
||||
- audit
|
||||
labels:
|
||||
label1: test1
|
||||
label2: test2
|
||||
label3: test3
|
||||
label4: test4
|
||||
label5: test5
|
||||
name: all-to-default
|
||||
outputRefs:
|
||||
- kafka-open
|
||||
7
snippets/telco-core_ClusterLogNS.yaml
Normal file
7
snippets/telco-core_ClusterLogNS.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: openshift-logging
|
||||
annotations:
|
||||
workload.openshift.io/allowed: management
|
||||
9
snippets/telco-core_ClusterLogOperGroup.yaml
Normal file
9
snippets/telco-core_ClusterLogOperGroup.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: cluster-logging
|
||||
namespace: openshift-logging
|
||||
spec:
|
||||
targetNamespaces:
|
||||
- openshift-logging
|
||||
11
snippets/telco-core_ClusterLogSubscription.yaml
Normal file
11
snippets/telco-core_ClusterLogSubscription.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: cluster-logging
|
||||
namespace: openshift-logging
|
||||
spec:
|
||||
channel: "stable"
|
||||
name: cluster-logging
|
||||
source: redhat-operators-disconnected
|
||||
sourceNamespace: openshift-marketplace
|
||||
installPlanApproval: Automatic
|
||||
11
snippets/telco-core_ClusterLogging.yaml
Normal file
11
snippets/telco-core_ClusterLogging.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
# required
|
||||
# count: 1
|
||||
apiVersion: logging.openshift.io/v1
|
||||
kind: ClusterLogging
|
||||
metadata:
|
||||
name: instance
|
||||
namespace: openshift-logging
|
||||
spec:
|
||||
collection:
|
||||
type: vector
|
||||
managementState: Managed
|
||||
12
snippets/telco-core_NROPSubscription.yaml
Normal file
12
snippets/telco-core_NROPSubscription.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
# required
|
||||
# count: 1
|
||||
apiVersion: operators.coreos.com/v1alpha1
|
||||
kind: Subscription
|
||||
metadata:
|
||||
name: numaresources-operator
|
||||
namespace: openshift-numaresources
|
||||
spec:
|
||||
channel: "4.14"
|
||||
name: numaresources-operator
|
||||
source: redhat-operators-disconnected
|
||||
sourceNamespace: openshift-marketplace
|
||||
8
snippets/telco-core_NROPSubscriptionNS.yaml
Normal file
8
snippets/telco-core_NROPSubscriptionNS.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
# required: yes
|
||||
# count: 1
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: openshift-numaresources
|
||||
annotations:
|
||||
workload.openshift.io/allowed: management
|
||||
10
snippets/telco-core_NROPSubscriptionOperGroup.yaml
Normal file
10
snippets/telco-core_NROPSubscriptionOperGroup.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
# required: yes
|
||||
# count: 1
|
||||
apiVersion: operators.coreos.com/v1
|
||||
kind: OperatorGroup
|
||||
metadata:
|
||||
name: numaresources-operator
|
||||
namespace: openshift-numaresources
|
||||
spec:
|
||||
targetNamespaces:
|
||||
- openshift-numaresources
|
||||
22
snippets/telco-core_Network.yaml
Normal file
22
snippets/telco-core_Network.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
# required
|
||||
# count: 1
|
||||
apiVersion: operator.openshift.io/v1
|
||||
kind: Network
|
||||
metadata:
|
||||
name: cluster
|
||||
spec:
|
||||
defaultNetwork:
|
||||
ovnKubernetesConfig:
|
||||
gatewayConfig:
|
||||
routingViaHost: true
|
||||
# additional networks are optional and may alternatively be specified using NetworkAttachmentDefinition CRs
|
||||
additionalNetworks: [$additionalNetworks]
|
||||
# eg
|
||||
#- name: add-net-1
|
||||
# namespace: app-ns-1
|
||||
# rawCNIConfig: '{ "cniVersion": "0.3.1", "name": "add-net-1", "plugins": [{"type": "macvlan", "master": "bond1", "ipam": {}}] }'
|
||||
# type: Raw
|
||||
#- name: add-net-2
|
||||
# namespace: app-ns-1
|
||||
# rawCNIConfig: '{ "cniVersion": "0.4.0", "name": "add-net-2", "plugins": [ {"type": "macvlan", "master": "bond1", "mode": "private" },{ "type": "tuning", "name": "tuning-arp" }] }'
|
||||
# type: Raw
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user