1
0
mirror of https://github.com/openshift/openshift-docs.git synced 2026-02-05 12:46:18 +01:00

TELCODOCS-1920: Updating RDS for 4.16

This commit is contained in:
Ronan Hennessy
2024-07-08 14:06:17 +01:00
parent 77a5a6679b
commit cfa5386489
99 changed files with 1274 additions and 395 deletions

View File

@@ -2998,37 +2998,38 @@ Topics:
File: recommended-infrastructure-practices
- Name: Recommended etcd practices
File: recommended-etcd-practices
# Uncomment on release branch when RDS for that release is published
# - Name: Reference design specifications
# Dir: telco_ref_design_specs
# Distros: openshift-origin,openshift-enterprise
# Topics:
# - Name: Telco reference design specifications
# File: telco-ref-design-specs-overview
# - Name: Telco RAN DU reference design specification
# Dir: ran
# Topics:
# - Name: Telco RAN DU reference design overview
# File: telco-ran-ref-design-spec
# - Name: Telco RAN DU use model overview
# File: telco-ran-du-overview
# - Name: RAN DU reference design components
# File: telco-ran-ref-du-components
# - Name: RAN DU reference design configuration CRs
# File: telco-ran-ref-du-crs
# - Name: Telco RAN DU software specifications
# File: telco-ran-ref-software-artifacts
# - Name: Telco core reference design specification
# Dir: core
# Topics:
# - Name: Telco core reference design overview
# File: telco-core-rds-overview
# - Name: Telco core use model overview
# File: telco-core-rds-use-cases
# - Name: Core reference design components
# File: telco-core-ref-design-components
# - Name: Core reference design configuration CRs
# File: telco-core-ref-crs
- Name: Reference design specifications
Dir: telco_ref_design_specs
Distros: openshift-origin,openshift-enterprise
Topics:
- Name: Telco reference design specifications
File: telco-ref-design-specs-overview
- Name: Telco RAN DU reference design specification
Dir: ran
Topics:
- Name: Telco RAN DU reference design overview
File: telco-ran-ref-design-spec
- Name: Telco RAN DU use model overview
File: telco-ran-du-overview
- Name: RAN DU reference design components
File: telco-ran-ref-du-components
- Name: RAN DU reference design configuration CRs
File: telco-ran-ref-du-crs
- Name: Telco RAN DU software specifications
File: telco-ran-ref-software-artifacts
- Name: Telco core reference design specification
Dir: core
Topics:
- Name: Telco core reference design overview
File: telco-core-rds-overview
- Name: Telco core use model overview
File: telco-core-rds-use-cases
- Name: Core reference design components
File: telco-core-ref-design-components
- Name: Core reference design configuration CRs
File: telco-core-ref-crs
- Name: Telco core software specifications
File: telco-core-ref-software-artifacts
- Name: Planning your environment according to object maximums
File: planning-your-environment-according-to-object-maximums
Distros: openshift-origin,openshift-enterprise

View File

@@ -7,7 +7,7 @@
= CPU partitioning and performance tuning
New in this release::
* No reference design updates in this release
* In this release, {product-title} deployments use Control Groups version 2 (cgroup v2) by default. As a consequence, performance profiles in a cluster use cgroups v2 for the underlying resource management layer.
Description::
CPU partitioning allows for the separation of sensitive workloads from generic purposes, auxiliary processes, interrupts, and driver work queues to achieve improved performance and latency. The CPUs allocated to those auxiliary processes are referred to as `reserved` in the following sections. In hyperthreaded systems, a CPU is one hyperthread.
@@ -42,3 +42,4 @@ Engineering considerations::
* Hardware without Interrupt Request (IRQ) affinity support will impact isolated CPUs. To ensure that pods with guaranteed CPU QoS have full use of allocated CPU, all hardware in the server must support IRQ affinity.
* OVS dynamically manages its `cpuset` configuration to adapt to network traffic needs.
You do not need to reserve additional CPUs for handling high network throughput on the primary CNI.
* If workloads running on the cluster require cgroups v1, you can configure nodes to use cgroups v1. You can make this configuration as part of initial cluster deployment. For more information, see _Enabling Linux cgroup v1 during installation_ in the _Additional resources_ section.

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="networking-crs_{context}"]
@@ -16,14 +16,19 @@ Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#tel
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-bfd-profile-yaml[bfd-profile.yaml],No,No
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-bgp-advr-yaml[bgp-advr.yaml],No,No
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-bgp-peer-yaml[bgp-peer.yaml],No,No
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-community-yaml[community.yaml],No,Yes
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-metallb-yaml[metallb.yaml],No,No
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-metallbns-yaml[metallbNS.yaml],Yes,No
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-metallbopergroup-yaml[metallbOperGroup.yaml],Yes,No
Load balancer,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-metallbsubscription-yaml[metallbSubscription.yaml],No,No
Multus - Tap CNI for rootless DPDK pod,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-mc_rootless_pods_selinux-yaml[mc_rootless_pods_selinux.yaml],No,No
NMState Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nmstate-yaml[NMState.yaml],No,Yes
NMState Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nmstatens-yaml[NMStateNS.yaml],No,Yes
NMState Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nmstateopergroup-yaml[NMStateOperGroup.yaml],No,Yes
NMState Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nmstatesubscription-yaml[NMStateSubscription.yaml],No,Yes
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovnetwork-yaml[sriovNetwork.yaml],Yes,No
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovnetworknodepolicy-yaml[sriovNetworkNodePolicy.yaml],No,Yes
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovoperatorconfig-yaml[SriovOperatorConfig.yaml],No,Yes
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovnetworknodepolicy-yaml[sriovNetworkNodePolicy.yaml],No,No
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovoperatorconfig-yaml[SriovOperatorConfig.yaml],No,No
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovsubscription-yaml[SriovSubscription.yaml],No,No
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovsubscriptionns-yaml[SriovSubscriptionNS.yaml],No,No
SR-IOV Network Operator,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sriovsubscriptionopergroup-yaml[SriovSubscriptionOperGroup.yaml],No,No

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="other-crs_{context}"]
@@ -17,7 +17,7 @@ Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#t
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogging-yaml[ClusterLogging.yaml],No,No
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogns-yaml[ClusterLogNS.yaml],No,No
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogopergroup-yaml[ClusterLogOperGroup.yaml],No,No
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogsubscription-yaml[ClusterLogSubscription.yaml],No,Yes
Cluster logging,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-clusterlogsubscription-yaml[ClusterLogSubscription.yaml],No,No
Disconnected configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-catalog-source-yaml[catalog-source.yaml],No,No
Disconnected configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-icsp-yaml[icsp.yaml],No,No
Disconnected configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-operator-hub-yaml[operator-hub.yaml],No,No

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="resource-tuning-crs_{context}"]

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="scheduling-crs_{context}"]
@@ -11,5 +11,9 @@
|====
Component,Reference CR,Optional,New in this release
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nrop-yaml[nrop.yaml],No,No
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nropsubscription-yaml[NROPSubscription.yaml],No,No
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nropsubscriptionns-yaml[NROPSubscriptionNS.yaml],No,No
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-nropsubscriptionopergroup-yaml[NROPSubscriptionOperGroup.yaml],No,No
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-sched-yaml[sched.yaml],No,No
NUMA-aware scheduler,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-scheduler-yaml[Scheduler.yaml],No,Yes
|====

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="storage-crs_{context}"]
@@ -10,8 +10,9 @@
[cols="4*", options="header", format=csv]
|====
Component,Reference CR,Optional,New in this release
External {rh-storage-first} configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-01-rook-ceph-external-cluster-details.secret-yaml[01-rook-ceph-external-cluster-details.secret.yaml],No,Yes
External {rh-storage} configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-02-ocs-external-storagecluster-yaml[02-ocs-external-storagecluster.yaml],No,No
External {rh-storage} configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-odfns-yaml[odfNS.yaml],No,No
External {rh-storage} configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-odfopergroup-yaml[odfOperGroup.yaml],No,No
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-01-rook-ceph-external-cluster-details.secret-yaml[01-rook-ceph-external-cluster-details.secret.yaml],No,No
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-02-ocs-external-storagecluster-yaml[02-ocs-external-storagecluster.yaml],No,No
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-odfns-yaml[odfNS.yaml],No,No
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-odfopergroup-yaml[odfOperGroup.yaml],No,No
External ODF configuration,xref:../../telco_ref_design_specs/core/telco-core-ref-crs.adoc#telco-core-odfsubscription-yaml[odfSubscription.yaml],No,No
|====

View File

@@ -8,6 +8,8 @@
New in this release::
* No reference design updates in this release
Description::

View File

@@ -14,6 +14,7 @@ The cluster conforms to these requirements:
* High-availability (3+ supervisor nodes) control plane
* Non-schedulable supervisor nodes
* Multiple `MachineConfigPool` resources
Storage::
@@ -38,7 +39,7 @@ Core clusters have multiple layers of networking supported by underlying RHCOS,
** Host interface configuration
** A/A Bonding (Link Aggregation Control Protocol (LACP))
** Active/Active Bonding (Link Aggregation Control Protocol (LACP))
* Secondary or additional networks: OpenShift CNI is configured through the Network `additionalNetworks` or NetworkAttachmentDefinition CRs.

View File

@@ -0,0 +1,28 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/ran/telco-core-ref-components.adoc
:_mod-docs-content-type: REFERENCE
[id="telco-core-software-stack_{context}"]
= Software stack
The following software versions were used for validating the telco core reference design specification:
.Software versions for validation
[cols="1,1"]
|===
|Component |Software version
|Cluster Logging Operator |5.9.1
|{rh-storage} |4.16
|SR-IOV Operator |4.16
|MetalLB | 4.16
|NMState Operator |4.16
|NUMA-aware scheduler|4.16
|===

View File

@@ -8,16 +8,9 @@
New in this release::
//CNF-5528
* `MultiNetworkPolicy` resources can now be applied to SR-IOV networks to enforce network reachability policies.
* With this release, you can use the SR-IOV Network Operator to configure QinQ (802.1ad and 802.1q) tagging. QinQ tagging provides efficient traffic management by enabling the use of both inner and outer VLAN tags. Outer VLAN tagging is hardware accelerated, leading to faster network performance. The update extends beyond the SR-IOV Network Operator itself. You can now configure QinQ on externally managed VFs by setting the outer VLAN tag using `nmstate`. QinQ support varies across different NICs. For a comprehensive list of known limitations for specific NIC models, see the official documentation.
//CNF-9865
* QinQ is now supported in the SR-IOV Network Operator.
This is a Tech Preview feature.
//CNF-8804
* SR-IOV VFs can now receive all multicast traffic via the allmulti flag when tuning the CNI.
This eliminates the need to add `NET_ADMIN` capability to the pod's security context constraints (SCCs) and enhances security by minimizing potential vulnerabilities for pods.
* With this release, you can configure the SR-IOV Network Operator to drain nodes in parallel during network policy updates, dramatically accelerating the setup process. This translates to significant time savings, especially for large cluster deployments that previously took hours or even days to complete.
Description::
@@ -33,3 +26,5 @@ This is because the implementation uses the `iptables` tool, which cannot manage
Engineering considerations::
* SR-IOV interfaces in `vfio` mode are typically used to enable additional secondary networks for applications that require high throughput or low latency.
* If you exclude the `SriovOperatorConfig` CR from your deployment, the CR will not be created automatically.

View File

@@ -1,21 +0,0 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-rds-overview.adoc
:_mod-docs-content-type: CONCEPT
[id="telco-core-whats-new-ref-design_{context}""]
= {product-title} {product-version} features for {rds}
The following features that are included in {product-title} {product-version} and are leveraged by the {rds} reference design specification (RDS) have been added or updated.
.New features for {rds} in {product-title} {product-version}
[cols="1,3", options="header"]
|====
|Feature
|Description
//CNF-5528
|Multi-network policy support for IPv6 Networks
|You can now create multi-network policies for IPv6 networks.
For more information, see link:https://docs.openshift.com/container-platform/4.16/networking/multiple_networks/configuring-multi-network-policy.html#nw-multi-network-policy-ipv6-support_configuring-multi-network-policy[Supporting multi-network policies in IPv6 networks].
|====

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="networking-yaml_{context}"]
@@ -48,6 +48,13 @@ include::snippets/telco-core_bgp-advr.yaml[]
include::snippets/telco-core_bgp-peer.yaml[]
----
[id="telco-core-community-yaml"]
.community.yaml
[source,yaml]
----
include::snippets/telco-core_community.yaml[]
----
[id="telco-core-metallb-yaml"]
.metallb.yaml
[source,yaml]
@@ -83,6 +90,34 @@ include::snippets/telco-core_metallbSubscription.yaml[]
include::snippets/telco-core_mc_rootless_pods_selinux.yaml[]
----
[id="telco-core-nmstate-yaml"]
.NMState.yaml
[source,yaml]
----
include::snippets/telco-core_NMState.yaml[]
----
[id="telco-core-nmstatens-yaml"]
.NMStateNS.yaml
[source,yaml]
----
include::snippets/telco-core_NMStateNS.yaml[]
----
[id="telco-core-nmstateopergroup-yaml"]
.NMStateOperGroup.yaml
[source,yaml]
----
include::snippets/telco-core_NMStateOperGroup.yaml[]
----
[id="telco-core-nmstatesubscription-yaml"]
.NMStateSubscription.yaml
[source,yaml]
----
include::snippets/telco-core_NMStateSubscription.yaml[]
----
[id="telco-core-sriovnetwork-yaml"]
.sriovNetwork.yaml
[source,yaml]
@@ -124,3 +159,4 @@ include::snippets/telco-core_SriovSubscriptionNS.yaml[]
----
include::snippets/telco-core_SriovSubscriptionOperGroup.yaml[]
----

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="other-yaml_{context}"]
@@ -96,3 +96,4 @@ include::snippets/telco-core_monitoring-config-cm.yaml[]
----
include::snippets/telco-core_PerformanceProfile.yaml[]
----

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="resource-tuning-yaml_{context}"]
@@ -19,3 +19,4 @@ include::snippets/telco-core_control-plane-system-reserved.yaml[]
----
include::snippets/telco-core_pid-limits-cr.yaml[]
----

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="scheduling-yaml_{context}"]
@@ -13,9 +13,38 @@
include::snippets/telco-core_nrop.yaml[]
----
[id="telco-core-nropsubscription-yaml"]
.NROPSubscription.yaml
[source,yaml]
----
include::snippets/telco-core_NROPSubscription.yaml[]
----
[id="telco-core-nropsubscriptionns-yaml"]
.NROPSubscriptionNS.yaml
[source,yaml]
----
include::snippets/telco-core_NROPSubscriptionNS.yaml[]
----
[id="telco-core-nropsubscriptionopergroup-yaml"]
.NROPSubscriptionOperGroup.yaml
[source,yaml]
----
include::snippets/telco-core_NROPSubscriptionOperGroup.yaml[]
----
[id="telco-core-sched-yaml"]
.sched.yaml
[source,yaml]
----
include::snippets/telco-core_sched.yaml[]
----
[id="telco-core-scheduler-yaml"]
.Scheduler.yaml
[source,yaml]
----
include::snippets/telco-core_Scheduler.yaml[]
----

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/core/telco-core-ref-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="storage-yaml_{context}"]
@@ -33,3 +33,11 @@ include::snippets/telco-core_odfNS.yaml[]
----
include::snippets/telco-core_odfOperGroup.yaml[]
----
[id="telco-core-odfsubscription-yaml"]
.odfSubscription.yaml
[source,yaml]
----
include::snippets/telco-core_odfSubscription.yaml[]
----

View File

@@ -10,18 +10,7 @@ New in this release::
* No reference design updates in this release
Description::
The cluster capabilities feature includes a `MachineAPI` component which, when excluded, disables the following Operators and their resources in the cluster:
* `openshift/cluster-autoscaler-operator`
* `openshift/cluster-control-plane-machine-set-operator`
* `openshift/machine-api-operator`
[NOTE]
====
Use cluster capabilities to remove the Image Registry Operator.
====
See the section _Cluster capabilities_ section for a full list of optional components that you enable or disable before installation.
Limits and requirements::
* Cluster capabilities are not available for installer-provisioned installation methods.
@@ -62,3 +51,7 @@ The {rh-rhacm} hub cluster aggregates managed cluster metrics.
Each catalog source increases the CPU use on the cluster.
Using a single `CatalogSource` fits within the platform CPU budget.
|====
Engineering considerations::
* In this release, {product-title} deployments use Control Groups version 2 (cgroup v2) by default. As a consequence, performance profiles in a cluster use cgroups v2 for the underlying resource management layer. If workloads running on the cluster require cgroups v1, you can configure nodes to use cgroups v1. You can make this configuration as part of the initial cluster deployment.

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="cluster-tuning-crs_{context}"]
@@ -12,10 +12,10 @@
Component,Reference CR,Optional,New in this release
Cluster capabilities,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-example-sno-yaml[example-sno.yaml],No,No
Disabling network diagnostics,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disablesnonetworkdiag-yaml[DisableSnoNetworkDiag.yaml],No,No
Disconnected Registry,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-09-openshift-marketplace-ns-yaml[09-openshift-marketplace-ns.yaml],No,Yes
Monitoring configuration,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-reducemonitoringfootprint-yaml[ReduceMonitoringFootprint.yaml],No,No
OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-defaultcatsrc-yaml[DefaultCatsrc.yaml],No,No
OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disableolmpprof-yaml[DisableOLMPprof.yaml],No,No
OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-disconnectedicsp-yaml[DisconnectedICSP.yaml],No,No
OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-operatorhub-yaml[OperatorHub.yaml],Yes,No
OperatorHub,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-09-openshift-marketplace-ns-yaml[09-openshift-marketplace-ns.yaml],No,No
|====

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="day-2-operators-crs_{context}"]
@@ -20,12 +20,28 @@ Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-cr
Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagens-yaml[StorageNS.yaml],Yes,No
Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storageopergroup-yaml[StorageOperGroup.yaml],Yes,No
Local Storage Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagesubscription-yaml[StorageSubscription.yaml],Yes,No
Lifecycle Agent ,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-imagebasedupgrade-yaml[ImageBasedUpgrade.yaml],Yes,Yes
Lifecycle Agent ,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscription-yaml[LcaSubscription.yaml],Yes,Yes
Lifecycle Agent ,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscriptionns-yaml[LcaSubscriptionNS.yaml],Yes,Yes
Lifecycle Agent ,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lcasubscriptionopergroup-yaml[LcaSubscriptionOperGroup.yaml],Yes,Yes
{lvms},xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-lvmoperatorstatus-yaml[LVMOperatorStatus.yaml],No,Yes
{lvms},xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmcluster-yaml[StorageLVMCluster.yaml],No,Yes
{lvms},xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmsubscription-yaml[StorageLVMSubscription.yaml],No,Yes
{lvms},xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmsubscriptionns-yaml[StorageLVMSubscriptionNS.yaml],No,Yes
{lvms},xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-storagelvmsubscriptionopergroup-yaml[StorageLVMSubscriptionOperGroup.yaml],No,Yes
Node Tuning Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-performanceprofile-yaml[PerformanceProfile.yaml],No,No
Node Tuning Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-tunedperformancepatch-yaml[TunedPerformancePatch.yaml],No,No
PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigboundaryforevent-yaml[PtpConfigBoundaryForEvent.yaml],Yes,Yes
PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigdualcardgmwpc-yaml[PtpConfigDualCardGmWpc.yaml],Yes,Yes
PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigforhaforevent-yaml[PtpConfigForHAForEvent.yaml],Yes,Yes
PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfiggmwpc-yaml[PtpConfigGmWpc.yaml],Yes,Yes
PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigmasterforevent-yaml[PtpConfigMasterForEvent.yaml],Yes,Yes
PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigslaveforevent-yaml[PtpConfigSlaveForEvent.yaml],Yes,Yes
PTP fast event notifications,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpoperatorconfigforevent-yaml[PtpOperatorConfigForEvent.yaml],Yes,No
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigboundary-yaml[PtpConfigBoundary.yaml],No,No
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigdualcardgmwpc-yaml[PtpConfigDualCardGmWpc.yaml],No,No
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfiggmwpc-yaml[PtpConfigGmWpc.yaml],No,No
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigdualcardgmwpc-op-yaml[PtpConfigDualCardGmWpc.yaml],No,No
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigforha-yaml[PtpConfigForHA.yaml],No,Yes
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfiggmwpc-op-yaml[PtpConfigGmWpc.yaml],No,No
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpconfigslave-yaml[PtpConfigSlave.yaml],No,No
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpsubscription-yaml[PtpSubscription.yaml],No,No
PTP Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-ptpsubscriptionns-yaml[PtpSubscriptionNS.yaml],No,No
@@ -37,6 +53,7 @@ SR-IOV FEC Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.a
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovnetwork-yaml[SriovNetwork.yaml],No,No
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovnetworknodepolicy-yaml[SriovNetworkNodePolicy.yaml],No,No
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovoperatorconfig-yaml[SriovOperatorConfig.yaml],No,No
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovoperatorconfigforsno-yaml[SriovOperatorConfigForSNO.yaml],No,Yes
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscription-yaml[SriovSubscription.yaml],No,No
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscriptionns-yaml[SriovSubscriptionNS.yaml],No,No
SR-IOV Operator,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-sriovsubscriptionopergroup-yaml[SriovSubscriptionOperGroup.yaml],No,No

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="machine-configuration-crs_{context}"]
@@ -14,22 +14,16 @@ Container runtime (crun),xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-
Container runtime (crun),xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-enable-crun-worker-yaml[enable-crun-worker.yaml],No,No
Disabling CRI-O wipe,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-99-crio-disable-wipe-master-yaml[99-crio-disable-wipe-master.yaml],No,No
Disabling CRI-O wipe,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-99-crio-disable-wipe-worker-yaml[99-crio-disable-wipe-worker.yaml],No,No
Enable cgroup v1,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-enable-cgroups-v1-yaml[enable-cgroups-v1.yaml],No,No
Enabling kdump,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-05-kdump-config-master-yaml[05-kdump-config-master.yaml],No,No
Enabling kdump,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-05-kdump-config-worker-yaml[05-kdump-config-worker.yaml],No,No
Enabling kdump,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-06-kdump-master-yaml[06-kdump-master.yaml],No,No
Enabling kdump,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-06-kdump-worker-yaml[06-kdump-worker.yaml],No,No
kubelet configuration and container mount hiding,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-01-container-mount-ns-and-kubelet-conf-master-yaml[01-container-mount-ns-and-kubelet-conf-master.yaml],No,No
kubelet configuration and container mount hiding,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-01-container-mount-ns-and-kubelet-conf-worker-yaml[01-container-mount-ns-and-kubelet-conf-worker.yaml],No,No
Kubelet configuration and container mount hiding,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-01-container-mount-ns-and-kubelet-conf-master-yaml[01-container-mount-ns-and-kubelet-conf-master.yaml],No,No
Kubelet configuration and container mount hiding,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-01-container-mount-ns-and-kubelet-conf-worker-yaml[01-container-mount-ns-and-kubelet-conf-worker.yaml],No,No
One-shot time sync,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-99-sync-time-once-master-yaml[99-sync-time-once-master.yaml],No,No
One-shot time sync,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-99-sync-time-once-worker-yaml[99-sync-time-once-worker.yaml],No,No
SCTP,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-03-sctp-machine-config-master-yaml[03-sctp-machine-config-master.yaml],No,No
SCTP,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-03-sctp-machine-config-worker-yaml[03-sctp-machine-config-worker.yaml],No,No
Set RCU Normal,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-08-set-rcu-normal-master-yaml[08-set-rcu-normal-master.yaml],No,No
Set RCU Normal,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-08-set-rcu-normal-worker-yaml[08-set-rcu-normal-worker.yaml],No,No
SR-IOV related kernel arguments,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-07-sriov-related-kernel-args-master-yaml[07-sriov-related-kernel-args-master.yaml],No,No
SR-IOV related kernel arguments,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-07-sriov-related-kernel-args-master-yaml[07-sriov-related-kernel-args-master.yaml],No,Yes
SR-IOV related kernel arguments,xref:../../telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc#ztp-07-sriov-related-kernel-args-worker-yaml[07-sriov-related-kernel-args-worker.yaml],No,No
|====
:FeatureName: cgroup v1
include::snippets/deprecated-feature.adoc[]

View File

@@ -22,6 +22,8 @@ Use other probe implementations, for example, `httpGet` or `tcpSocket`.
** When you need to use exec probes, limit the exec probe frequency and quantity.
The maximum number of exec probes must be kept below 10, and frequency must not be set to less than 10 seconds.
* Avoid using exec probes unless there is absolutely no viable alternative.
[NOTE]
====
Startup probes require minimal resources during steady-state operation.

View File

@@ -0,0 +1,18 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
:_mod-docs-content-type: REFERENCE
[id="telco-ran-lca-operator_{context}"]
= {lcao}
New in this release::
* Use the {lcao} to enable image-based upgrades for {sno} clusters.
Description::
The {lcao} provides local lifecycle management services for {sno} clusters.
Limits and requirements::
* The {lcao} is not applicable in multi-node clusters or {SNO} clusters with an additional worker.
* Requires a persistent volume.

View File

@@ -9,14 +9,11 @@
New in this release::
* No reference design updates in this release
New in this release::
* Simplified LVMS `deviceSelector` logic
* LVM Storage with `ext4` and `PV` resources
[NOTE]
====
LVMS Operator is an optional component.
When you use the LVMS Operator as the storage solution, it replaces the Local Storage Operator, and the CPU required will be assigned to the management partition as platform overhead. The reference configuration must include one of these storage solutions but not both.
====
Description::
@@ -47,12 +44,7 @@ spec:
----
Limits and requirements::
* Ceph is excluded when used on cluster topologies with fewer than 3 nodes.
For example, Ceph is excluded in a {sno} cluster or {sno} cluster with a single worker node.
* In {sno} clusters, persistent storage must be provided by either LVMS or local storage, not both.
Engineering considerations::
* The LVMS Operator is not the reference storage solution for the DU use case.
If you require LVMS Operator for application workloads, the resource use is accounted for against the application cores.
* Ensure that sufficient disks or partitions are available for storage requirements.

View File

@@ -7,10 +7,16 @@
= Node Tuning Operator
New in this release::
* No reference design updates in this release
* With this release, the Node Tuning Operator supports setting CPU frequencies in the `PerformanceProfile` for reserved and isolated core CPUs.
This is an optional feature that you can use to define specific frequencies. Use this feature to set specific frequencies by enabling the `intel_pstate` `CPUFreq` driver in the Intel hardware. You must follow Intel's recommendations on frequencies for FlexRAN-like applications, which requires the default CPU frequency to be set to a lower value than default running frequency.
* Previously, for the RAN DU-profile, setting the `realTime` workload hint to `true` in the `PerformanceProfile` always disabled the `intel_pstate`.
With this release, the Node Tuning Operator detects the underlying Intel hardware using `TuneD` and appropriately sets the `intel_pstate` kernel parameter based on the processor's generation.
* In this release, {product-title} deployments with a performance profile now default to using cgroups v2 as the underlying resource management layer. If you run workloads that are not ready for this change, you can still revert to using the older cgroups v1 mechanism.
Description::
You tune the cluster performance by link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/cnf-create-performance-profiles.html[creating a performance profile].
You tune the cluster performance by creating a performance profile.
Settings that you configure with a performance profile include:
+
* Selecting the realtime or non-realtime kernel.
@@ -26,6 +32,8 @@ Settings that you configure with a performance profile include:
* Setting per-core power tuning and max CPU frequency.
* Reserved and isolated core frequency tuning.
Limits and requirements::
The Node Tuning Operator uses the `PerformanceProfile` CR to configure the cluster. You need to configure the following settings in the RAN DU profile `PerformanceProfile` CR:
@@ -66,12 +74,5 @@ Variation must still meet the specified limits.
To ensure that pods with guaranteed whole CPU QoS have full use of the allocated CPU, all hardware in the server must support IRQ affinity.
For more information, see link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/cnf-low-latency-tuning.html#about_irq_affinity_setting_cnf-master[About support of IRQ affinity setting].
[NOTE]
====
In {product-title} {product-version}, any `PerformanceProfile` CR configured on the cluster causes the Node Tuning Operator to automatically set all cluster nodes to use cgroup v1.
For more information about cgroups, see link:https://docs.openshift.com/container-platform/4.16/nodes/clusters/nodes-cluster-cgroups-2.html#nodes-clusters-cgroups-2_nodes-cluster-cgroups-2[Configuring Linux cgroup].
====
:FeatureName: cgroup v1
include::snippets/deprecated-feature.adoc[]

View File

@@ -7,7 +7,9 @@
= PTP Operator
New in this release::
* No reference design updates in this release
* Configuring linuxptp services as grandmaster clock (T-GM) for dual Intel E810 Westport Channel NICs is now a generally available feature.
* You can configure the `linuxptp` services `ptp4l` and `phc2sys` as a highly available (HA) system clock for dual PTP boundary clocks (T-BC).
Description::
See link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-sno-du-configuring-ptp_sno-configure-for-vdu[PTP timing] for details of support and configuration of PTP in cluster nodes.
@@ -15,14 +17,11 @@ The DU node can run in the following modes:
+
* As an ordinary clock (OC) synced to a grandmaster clock or boundary clock (T-BC)
* As a grandmaster clock synced from GPS with support for single or dual card E810 Westport Channel NICs
* As a grandmaster clock synced from GPS with support for single or dual card E810 Westport Channel NICs.
* As dual boundary clocks (one per NIC) with support for E810 Westport Channel NICs
+
[NOTE]
====
Highly available boundary clocks are not supported.
====
* Allow for High Availability of the system clock when there are multiple time sources on different NICs.
* Optional: as a boundary clock for radio units (RUs)
@@ -34,18 +33,12 @@ You can subscribe applications to PTP events that happen on the node where the D
--
Limits and requirements::
* High availability is not supported with dual-NIC configurations.
* Limited to two boundary clocks for dual NIC and HA
* Digital Phase-Locked Loop (DPLL) clock synchronization is not supported for E810 Westport Channel NICs.
* GPS offsets are not reported.
Use a default offset of less than or equal to 5.
* DPLL offsets are not reported.
Use a default offset of less than or equal to 5.
* Limited to two WPC card configuration for T-GM
Engineering considerations::
* Configurations are provided for ordinary clock, boundary clock, or grandmaster clock
* Configurations are provided for ordinary clock, boundary clock, grandmaster clock, or PTP-HA
* PTP fast event notifications uses `ConfigMap` CRs to store PTP event subscriptions

View File

@@ -7,7 +7,8 @@
= {rh-rhacm-first}
New in this release::
* No reference design updates in this release
//CNF-7422
* You can now use `PolicyGenerator` resources and {rh-rhacm-first} to deploy polices for managed clusters with {ztp}. This is a Technology Preview feature.
Description::
{rh-rhacm} provides Multi Cluster Engine (MCE) installation and ongoing lifecycle management functionality for deployed clusters.
@@ -31,4 +32,3 @@ You can significantly reduce the number of policies by using a single group poli
These configurations should be managed using {rh-rhacm} policy hub-side templating with values pulled from `ConfigMap` CRs based on the cluster name.
* To save CPU resources on managed clusters, policies that apply static configurations should be unbound from managed clusters after {ztp} installation of the cluster.
For more information, see link:https://docs.openshift.com/container-platform/4.16/storage/understanding-persistent-storage.html#releasing_understanding-persistent-storage[Release a persistent volume].

View File

@@ -1,15 +0,0 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc
:_mod-docs-content-type: REFERENCE
[id="telco-ran-redfish-operator_{context}"]
= {redfish-operator}
The {redfish-operator} is an optional Operator that runs exclusively on the managed spoke cluster. It relays Redfish hardware events to cluster applications.
[NOTE]
====
The {redfish-operator} is not included in the RAN DU use model reference configuration and is an optional feature.
If you want to use the {redfish-operator}, assign additional CPU resources from the application CPU budget.
====

View File

@@ -7,13 +7,31 @@
= SR-IOV Operator
New in this release::
* No reference design updates in this release
* With this release, you can use the SR-IOV Network Operator to configure QinQ (802.1ad and 802.1q) tagging. QinQ tagging provides efficient traffic management by enabling the use of both inner and outer VLAN tags. Outer VLAN tagging is hardware accelerated, leading to faster network performance. The update extends beyond the SR-IOV Network Operator itself. You can now configure QinQ on externally managed VFs by setting the outer VLAN tag using `nmstate`. QinQ support varies across different NICs. For a comprehensive list of known limitations for specific NIC models, see _Configuring QinQ support for SR-IOV enabled workloads_ in the _Additional resources_ section.
* With this release, you can configure the SR-IOV Network Operator to drain nodes in parallel during network policy updates, dramatically accelerating the setup process. This translates to significant time savings, especially for large cluster deployments that previously took hours or even days to complete.
Description::
The SR-IOV Operator provisions and configures the SR-IOV CNI and device plugins.
Both `netdevice` (kernel VFs) and `vfio` (DPDK) devices are supported.
Limits and requirements::
* Use {product-title} supported devices
* SR-IOV and IOMMU enablement in BIOS: The SR-IOV Network Operator will automatically enable IOMMU on the kernel command line.
* SR-IOV VFs do not receive link state updates from the PF. If link down detection is needed you must configure this at the protocol level.
* You can apply multi-network policies on `netdevice` drivers types only. Multi-network policies require the `iptables` tool, which cannot manage `vfio` driver types.
Engineering considerations::
* SR-IOV interfaces with the `vfio` driver type are typically used to enable additional secondary networks for applications that require high throughput or low latency.
* Customer variation on the configuration and number of `SriovNetwork` and `SriovNetworkNodePolicy` custom resources (CRs) is expected.
* IOMMU kernel command line settings are applied with a `MachineConfig` CR at install time. This ensures that the `SriovOperator` CR does not cause a reboot of the node when adding them.
* SR-IOV support for draining nodes in parallel is not applicable in a {sno} cluster.
* If you exclude the `SriovOperatorConfig` CR from your deployment, the CR will not be created automatically.
* In scenarios where you pin or restrict workloads to specific nodes, the SR-IOV parallel node drain feature will not result in the rescheduling of pods. In these scenarios, the SR-IOV Operator disables the parallel node drain functionality.

View File

@@ -18,7 +18,7 @@ Limits and requirements::
** `SecureBoot` is supported
** The `vfio` driver for the `PF` requires the usage of `vfio-token` that is injected into Pods.
The `VF` token can be passed to DPDK by using the EAL parameter `--vfio-vf-token`.
Applications in the pod can pass the `VF` token to DPDK by using the EAL parameter `--vfio-vf-token`.
Engineering considerations::
* The SRIOV-FEC Operator uses CPU cores from the `isolated` CPU set.

View File

@@ -55,10 +55,6 @@ spec:
<2> Configurable filtering allows exclusion of unused images
--
Backup and restore for {sno}::
{cgu-operator} supports taking a snapshot of the cluster operating system and configuration to a dedicated partition on a local disk.
A restore script is provided that returns the cluster to the backed up state.
Limits and requirements::
* {cgu-operator} supports concurrent cluster deployment in batches of 400
@@ -67,5 +63,3 @@ Limits and requirements::
Engineering considerations::
* The `PreCachingConfig` CR is optional and does not need to be created if you just wants to precache platform related (OpenShift and OLM Operator) images.
The `PreCachingConfig` CR must be applied before referencing it in the `ClusterGroupUpgrade` CR.
* Create a recovery partition during installation if you opt to use the {cgu-operator} backup and restore feature.

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="cluster-tuning-yaml_{context}"]
@@ -20,13 +20,6 @@ include::snippets/ztp_example-sno.yaml[]
include::snippets/ztp_DisableSnoNetworkDiag.yaml[]
----
[id="ztp-09-openshift-marketplace-ns-yaml"]
.09-openshift-marketplace-ns.yaml
[source,yaml]
----
include::snippets/ztp_09-openshift-marketplace-ns.yaml[]
----
[id="ztp-reducemonitoringfootprint-yaml"]
.ReduceMonitoringFootprint.yaml
[source,yaml]
@@ -61,3 +54,11 @@ include::snippets/ztp_DisconnectedICSP.yaml[]
----
include::snippets/ztp_OperatorHub.yaml[]
----
[id="ztp-09-openshift-marketplace-ns-yaml"]
.09-openshift-marketplace-ns.yaml
[source,yaml]
----
include::snippets/ztp_09-openshift-marketplace-ns.yaml[]
----

View File

@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
// * telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc
// *
:_mod-docs-content-type: REFERENCE
[id="day-2-operators-yaml_{context}"]
@@ -76,6 +76,69 @@ include::snippets/ztp_StorageOperGroup.yaml[]
include::snippets/ztp_StorageSubscription.yaml[]
----
[id="ztp-imagebasedupgrade-yaml"]
.ImageBasedUpgrade.yaml
[source,yaml]
----
include::snippets/ztp_ImageBasedUpgrade.yaml[]
----
[id="ztp-lcasubscription-yaml"]
.LcaSubscription.yaml
[source,yaml]
----
include::snippets/ztp_LcaSubscription.yaml[]
----
[id="ztp-lcasubscriptionns-yaml"]
.LcaSubscriptionNS.yaml
[source,yaml]
----
include::snippets/ztp_LcaSubscriptionNS.yaml[]
----
[id="ztp-lcasubscriptionopergroup-yaml"]
.LcaSubscriptionOperGroup.yaml
[source,yaml]
----
include::snippets/ztp_LcaSubscriptionOperGroup.yaml[]
----
[id="ztp-lvmoperatorstatus-yaml"]
.LVMOperatorStatus.yaml
[source,yaml]
----
include::snippets/ztp_LVMOperatorStatus.yaml[]
----
[id="ztp-storagelvmcluster-yaml"]
.StorageLVMCluster.yaml
[source,yaml]
----
include::snippets/ztp_StorageLVMCluster.yaml[]
----
[id="ztp-storagelvmsubscription-yaml"]
.StorageLVMSubscription.yaml
[source,yaml]
----
include::snippets/ztp_StorageLVMSubscription.yaml[]
----
[id="ztp-storagelvmsubscriptionns-yaml"]
.StorageLVMSubscriptionNS.yaml
[source,yaml]
----
include::snippets/ztp_StorageLVMSubscriptionNS.yaml[]
----
[id="ztp-storagelvmsubscriptionopergroup-yaml"]
.StorageLVMSubscriptionOperGroup.yaml
[source,yaml]
----
include::snippets/ztp_StorageLVMSubscriptionOperGroup.yaml[]
----
[id="ztp-performanceprofile-yaml"]
.PerformanceProfile.yaml
[source,yaml]
@@ -90,6 +153,48 @@ include::snippets/ztp_PerformanceProfile.yaml[]
include::snippets/ztp_TunedPerformancePatch.yaml[]
----
[id="ztp-ptpconfigboundaryforevent-yaml"]
.PtpConfigBoundaryForEvent.yaml
[source,yaml]
----
include::snippets/ztp_PtpConfigBoundaryForEvent.yaml[]
----
[id="ztp-ptpconfigdualcardgmwpc-yaml"]
.PtpConfigDualCardGmWpc.yaml
[source,yaml]
----
include::snippets/ztp_PtpConfigDualCardGmWpc.yaml[]
----
[id="ztp-ptpconfigforhaforevent-yaml"]
.PtpConfigForHAForEvent.yaml
[source,yaml]
----
include::snippets/ztp_PtpConfigForHAForEvent.yaml[]
----
[id="ztp-ptpconfiggmwpc-yaml"]
.PtpConfigGmWpc.yaml
[source,yaml]
----
include::snippets/ztp_PtpConfigGmWpc.yaml[]
----
[id="ztp-ptpconfigmasterforevent-yaml"]
.PtpConfigMasterForEvent.yaml
[source,yaml]
----
include::snippets/ztp_PtpConfigMasterForEvent.yaml[]
----
[id="ztp-ptpconfigslaveforevent-yaml"]
.PtpConfigSlaveForEvent.yaml
[source,yaml]
----
include::snippets/ztp_PtpConfigSlaveForEvent.yaml[]
----
[id="ztp-ptpoperatorconfigforevent-yaml"]
.PtpOperatorConfigForEvent.yaml
[source,yaml]
@@ -104,14 +209,21 @@ include::snippets/ztp_PtpOperatorConfigForEvent.yaml[]
include::snippets/ztp_PtpConfigBoundary.yaml[]
----
[id="ztp-ptpconfigdualcardgmwpc-yaml"]
[id="ztp-ptpconfigdualcardgmwpc-op-yaml"]
.PtpConfigDualCardGmWpc.yaml
[source,yaml]
----
include::snippets/ztp_PtpConfigDualCardGmWpc.yaml[]
----
[id="ztp-ptpconfiggmwpc-yaml"]
[id="ztp-ptpconfigforha-yaml"]
.PtpConfigForHA.yaml
[source,yaml]
----
include::snippets/ztp_PtpConfigForHA.yaml[]
----
[id="ztp-ptpconfiggmwpc-op-yaml"]
.PtpConfigGmWpc.yaml
[source,yaml]
----
@@ -195,6 +307,13 @@ include::snippets/ztp_SriovNetworkNodePolicy.yaml[]
include::snippets/ztp_SriovOperatorConfig.yaml[]
----
[id="ztp-sriovoperatorconfigforsno-yaml"]
.SriovOperatorConfigForSNO.yaml
[source,yaml]
----
include::snippets/ztp_SriovOperatorConfigForSNO.yaml[]
----
[id="ztp-sriovsubscription-yaml"]
.SriovSubscription.yaml
[source,yaml]
@@ -215,3 +334,4 @@ include::snippets/ztp_SriovSubscriptionNS.yaml[]
----
include::snippets/ztp_SriovSubscriptionOperGroup.yaml[]
----

View File

@@ -34,27 +34,6 @@ include::snippets/ztp_99-crio-disable-wipe-master.yaml[]
include::snippets/ztp_99-crio-disable-wipe-worker.yaml[]
----
[id="ztp-enable-cgroups-v1-yaml"]
.enable-cgroups-v1.yaml
[source,yaml]
----
include::snippets/ztp_enable-cgroups-v1.yaml[]
----
[id="ztp-05-kdump-config-master-yaml"]
.05-kdump-config-master.yaml
[source,yaml]
----
include::snippets/ztp_05-kdump-config-master.yaml[]
----
[id="ztp-05-kdump-config-worker-yaml"]
.05-kdump-config-worker.yaml
[source,yaml]
----
include::snippets/ztp_05-kdump-config-worker.yaml[]
----
[id="ztp-06-kdump-master-yaml"]
.06-kdump-master.yaml
[source,yaml]

View File

@@ -53,7 +53,7 @@ The Red Hat telco RAN DU {product-version} solution has been validated using the
|4.16
|{rh-rhacm-first}
|2.11
|2.10, 2.11
|{gitops-title}
|1.12

View File

@@ -9,6 +9,4 @@ toc::[]
The {rds} reference design specification (RDS) configures a {product-title} cluster running on commodity hardware to host {rds} workloads.
include::modules/telco-core-whats-new-ref-design.adoc[leveloffset=+1]
:!telco-core:

View File

@@ -19,6 +19,8 @@ include::modules/telco-core-cpu-partitioning-performance-tune.adoc[leveloffset=+
* xref:../../../edge_computing/ztp-reference-cluster-configuration-for-vdu.adoc#ztp-du-configuring-host-firmware-requirements_sno-configure-for-vdu[Configuring host firmware for low latency and high performance]
* xref:../../../installing/install_config/enabling-cgroup-v1.adoc#nodes-clusters-cgroups-2-install_nodes-cluster-cgroups-1[Enabling Linux cgroup v1 during installation]
include::modules/telco-core-service-mesh.adoc[leveloffset=+1]
[role="_additional-resources"]

View File

@@ -0,0 +1,11 @@
:_mod-docs-content-type: ASSEMBLY
[id="telco-core-ref-software-artifacts"]
= Telco core reference configuration software specifications
:context: core-ref-design-validation
include::_attributes/common-attributes.adoc[]
toc::[]
The following information describes the telco core reference design specification (RDS) validated software versions.
include::modules/telco-core-software-stack.adoc[leveloffset=+1]

View File

@@ -10,7 +10,8 @@ toc::[]
The {rds-first} {product-version} reference design configures an {product-title} {product-version} cluster running on commodity hardware to host {rds} workloads.
It captures the recommended, tested, and supported configurations to get reliable and repeatable performance for a cluster running the {rds} profile.
include::modules/telco-ran-ref-design-features.adoc[leveloffset=+1]
// Removing this because we already highlight what is new in the components section.
//include::modules/telco-ran-ref-design-features.adoc[leveloffset=+1]
include::modules/telco-ran-architecture-overview.adoc[leveloffset=+1]

View File

@@ -17,6 +17,13 @@ include::modules/telco-ran-ptp-operator.adoc[leveloffset=+1]
include::modules/telco-ran-sr-iov-operator.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
* xref:../../../edge_computing/ztp-preparing-the-hub-cluster.adoc#ztp-preparing-the-ztp-git-repository-ver-ind_ztp-preparing-the-hub-cluster[Preparing the {ztp} site configuration repository for version independence]
* xref:../../../networking/hardware_networks/configuring-sriov-qinq-support.adoc[Configuring QinQ support for SR-IOV enabled workloads]
include::modules/telco-ran-logging.adoc[leveloffset=+1]
include::modules/telco-ran-sriov-fec-operator.adoc[leveloffset=+1]
@@ -31,6 +38,13 @@ include::modules/telco-ran-cluster-tuning.adoc[leveloffset=+1]
include::modules/telco-ran-machine-configuration.adoc[leveloffset=+1]
include::modules/telco-ran-lca-operator.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
* xref:../../../edge_computing/image_based_upgrade/cnf-understanding-image-based-upgrade.adoc[Understanding the image-based upgrade for {sno} clusters]
[id="telco-reference-ran-du-deployment-components_{context}"]
== Reference design deployment components
@@ -47,13 +61,8 @@ include::modules/telco-ran-gitops-operator-and-ztp-plugins.adoc[leveloffset=+2]
* xref:../../../edge_computing/ztp-preparing-the-hub-cluster.adoc#ztp-preparing-the-ztp-git-repository-ver-ind_ztp-preparing-the-hub-cluster[Preparing the {ztp} site configuration repository for version independence]
* xref:../../../edge_computing/ztp-advanced-policy-config.adoc#ztp-adding-new-content-to-gitops-ztp_ztp-advanced-policy-config[Adding custom content to the {ztp} pipeline]
* xref:../../../edge_computing/policygentemplate_for_ztp/ztp-advanced-policy-config.adoc#ztp-adding-new-content-to-gitops-ztp_ztp-advanced-policy-config[Adding custom content to the {ztp} pipeline]
include::modules/telco-ran-agent-based-installer-abi.adoc[leveloffset=+2]
[id="telco-ran-additional-components_{context}"]
== Additional components
include::modules/telco-ran-redfish-operator.adoc[leveloffset=+2]
:!telco-ran:

View File

@@ -10,3 +10,5 @@ spec:
externalStorage:
enable: true
labelSelector: {}
status:
phase: Ready

View File

@@ -9,3 +9,5 @@ spec:
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
status:
state: AtLatestKnown

View File

@@ -0,0 +1,5 @@
apiVersion: nmstate.io/v1
kind: NMState
metadata:
name: nmstate
spec: {}

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: openshift-nmstate
annotations:
workload.openshift.io/allowed: management

View File

@@ -0,0 +1,8 @@
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: openshift-nmstate
namespace: openshift-nmstate
spec:
targetNamespaces:
- openshift-nmstate

View File

@@ -0,0 +1,13 @@
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: kubernetes-nmstate-operator
namespace: openshift-nmstate
spec:
channel: "stable"
name: kubernetes-nmstate-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
status:
state: AtLatestKnown

View File

@@ -0,0 +1,10 @@
apiVersion: config.openshift.io/v1
kind: Scheduler
metadata:
name: cluster
spec:
# non-schedulable control plane is the default. This ensures
# compliance.
mastersSchedulable: false
policy:
name: ""

View File

@@ -11,3 +11,5 @@ spec:
node-role.kubernetes.io/worker: ""
enableInjector: true
enableOperatorWebhook: true
disableDrain: false
logLevel: 2

View File

@@ -11,3 +11,5 @@ spec:
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
status:
state: AtLatestKnown

View File

@@ -14,10 +14,12 @@ spec:
# eg:
# - peer-one
#
communities: [$communities]
# Note correlation with address pool.
# Note correlation with address pool, or Community
# eg:
# - bgpcommunity
# - 65535:65282
aggregationLength: 32
aggregationLengthV6: 128

View File

@@ -1,6 +1,6 @@
# required
# count: 1-N
apiVersion: metallb.io/v1beta1
apiVersion: metallb.io/v1beta2
kind: BGPPeer
metadata:
name: $name
@@ -11,3 +11,4 @@ spec:
myASN: $myasn # eg 64500
routerID: $id # eg 10.10.10.10
bfdProfile: bfdprofile
passwordSecret: {}

View File

@@ -13,6 +13,6 @@ spec:
# updateStrategy:
# registryPoll:
# interval: 1h
#status:
# connectionState:
# lastObservedState: READY
status:
connectionState:
lastObservedState: READY

View File

@@ -0,0 +1,8 @@
---
apiVersion: metallb.io/v1beta1
kind: Community
metadata:
name: bgpcommunity
namespace: metallb-system
spec:
communities: [$comm]

View File

@@ -19,7 +19,7 @@ spec:
overwrite: true
path: /etc/modprobe.d/kernel-blacklist.conf
- contents:
source: data:text/plain;charset=utf-8;base64,aXBfZ3JlCmlwNl90YWJsZXMKaXA2dF9SRUpFQ1QKaXA2dGFibGVfZmlsdGVyCmlwNnRhYmxlX21hbmdsZQppcHRhYmxlX2ZpbHRlcgppcHRhYmxlX21hbmdsZQppcHRhYmxlX25hdAp4dF9tdWx0aXBvcnQKeHRfb3duZXIKeHRfUkVESVJFQ1QKeHRfc3RhdGlzdGljCnh0X1RDUE1TUwp4dF91MzI=
source: data:text/plain;charset=utf-8;base64,aXBfZ3JlCmlwNl90YWJsZXMKaXA2dF9SRUpFQ1QKaXA2dGFibGVfZmlsdGVyCmlwNnRhYmxlX21hbmdsZQppcHRhYmxlX2ZpbHRlcgppcHRhYmxlX21hbmdsZQppcHRhYmxlX25hdAp4dF9tdWx0aXBvcnQKeHRfb3duZXIKeHRfUkVESVJFQ1QKeHRfc3RhdGlzdGljCnh0X1RDUE1TUwo=
mode: 420
overwrite: true
path: /etc/modules-load.d/kernel-load.conf

View File

@@ -12,3 +12,5 @@ spec:
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
status:
state: AtLatestKnown

View File

@@ -8,9 +8,6 @@ metadata:
namespace: openshift-monitoring
data:
config.yaml: |
k8sPrometheusAdapter:
dedicatedServiceMonitors:
enabled: true
prometheusK8s:
retention: 15d
volumeClaimTemplate:

View File

@@ -12,3 +12,5 @@ spec:
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
status:
state: AtLatestKnown

View File

@@ -19,7 +19,7 @@ spec:
overwrite: true
path: /etc/modprobe.d/kernel-blacklist.conf
- contents:
source: data:text/plain;charset=utf-8;base64,aXBfZ3JlCmlwNl90YWJsZXMKaXA2dF9SRUpFQ1QKaXA2dGFibGVfZmlsdGVyCmlwNnRhYmxlX21hbmdsZQppcHRhYmxlX2ZpbHRlcgppcHRhYmxlX21hbmdsZQppcHRhYmxlX25hdAp4dF9tdWx0aXBvcnQKeHRfb3duZXIKeHRfUkVESVJFQ1QKeHRfc3RhdGlzdGljCnh0X1RDUE1TUwp4dF91MzI=
source: data:text/plain;charset=utf-8;base64,aXBfZ3JlCmlwNl90YWJsZXMKaXA2dF9SRUpFQ1QKaXA2dGFibGVfZmlsdGVyCmlwNnRhYmxlX21hbmdsZQppcHRhYmxlX2ZpbHRlcgppcHRhYmxlX21hbmdsZQppcHRhYmxlX25hdAp4dF9tdWx0aXBvcnQKeHRfb3duZXIKeHRfUkVESVJFQ1QKeHRfc3RhdGlzdGljCnh0X1RDUE1TUwo=
mode: 420
overwrite: true
path: /etc/modules-load.d/kernel-load.conf

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Taken from https://github.com/operator-framework/operator-marketplace/blob/53c124a3f0edfd151652e1f23c87dd39ed7646bb/manifests/01_namespace.yaml
# Update it as the source evolves.
apiVersion: v1
kind: Namespace
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:

View File

@@ -1,3 +1,5 @@
# Automatically generated by extra-manifests-builder
# Do not make changes directly.
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
@@ -14,6 +16,7 @@ spec:
[Unit]
Description=Sync time once
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
TimeoutStartSec=300

View File

@@ -5,5 +5,28 @@ metadata:
namespace: openshift-logging
annotations: {}
spec:
outputs: $outputs
pipelines: $pipelines
# outputs: $outputs
# pipelines: $pipelines
#apiVersion: "logging.openshift.io/v1"
#kind: ClusterLogForwarder
#metadata:
# name: instance
# namespace: openshift-logging
#spec:
# outputs:
# - type: "kafka"
# name: kafka-open
# url: tcp://10.46.55.190:9092/test
# pipelines:
# - inputRefs:
# - audit
# - infrastructure
# labels:
# label1: test1
# label2: test2
# label3: test3
# label4: test4
# name: all-to-default
# outputRefs:
# - kafka-open

View File

@@ -7,5 +7,4 @@ metadata:
spec:
managementState: "Managed"
collection:
logs:
type: "vector"
type: "vector"

View File

@@ -4,5 +4,5 @@ metadata:
name: disconnected-internal-icsp
annotations: {}
spec:
repositoryDigestMirrors:
- $mirrors
# repositoryDigestMirrors:
# - $mirrors

View File

@@ -0,0 +1,10 @@
apiVersion: lca.openshift.io/v1alpha1
kind: ImageBasedUpgrade
metadata:
name: upgrade
spec:
stage: Idle
# When setting `stage: Prep`, remember to add the seed image reference object below.
# seedImageRef:
# image: $image
# version: $version

View File

@@ -0,0 +1,25 @@
# This CR verifies the installation/upgrade of the Sriov Network Operator
apiVersion: operators.coreos.com/v1
kind: Operator
metadata:
name: lvms-operator.openshift-storage
annotations: {}
status:
components:
refs:
- kind: Subscription
namespace: openshift-storage
conditions:
- type: CatalogSourcesUnhealthy
status: "False"
- kind: InstallPlan
namespace: openshift-storage
conditions:
- type: Installed
status: "True"
- kind: ClusterServiceVersion
namespace: openshift-storage
conditions:
- type: Succeeded
status: "True"
reason: InstallSucceeded

View File

@@ -0,0 +1,14 @@
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: lifecycle-agent
namespace: openshift-lifecycle-agent
annotations: {}
spec:
channel: "stable"
name: lifecycle-agent
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Manual
status:
state: AtLatestKnown

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: openshift-lifecycle-agent
annotations: {}
labels:
kubernetes.io/metadata.name: openshift-lifecycle-agent

View File

@@ -0,0 +1,9 @@
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: lifecycle-agent
namespace: openshift-lifecycle-agent
annotations: {}
spec:
targetNamespaces:
- openshift-lifecycle-agent

View File

@@ -3,7 +3,7 @@ kind: PerformanceProfile
metadata:
# if you change this name make sure the 'include' line in TunedPerformancePatch.yaml
# matches this name: include=openshift-node-performance-${PerformanceProfile.metadata.name}
# Also in file 'validatorCRs/informDuValidator.yaml':
# Also in file 'validatorCRs/informDuValidator.yaml':
# name: 50-performance-${PerformanceProfile.metadata.name}
name: openshift-node-performance-profile
annotations:

View File

@@ -0,0 +1,131 @@
apiVersion: ptp.openshift.io/v1
kind: PtpConfig
metadata:
name: boundary
namespace: openshift-ptp
annotations: {}
spec:
profile:
- name: "boundary"
ptp4lOpts: "-2 --summary_interval -4"
phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16"
ptpSchedulingPolicy: SCHED_FIFO
ptpSchedulingPriority: 10
ptpSettings:
logReduce: "true"
ptp4lConf: |
# The interface name is hardware-specific
[$iface_slave]
masterOnly 0
[$iface_master_1]
masterOnly 1
[$iface_master_2]
masterOnly 1
[$iface_master_3]
masterOnly 1
[global]
#
# Default Data Set
#
twoStepFlag 1
slaveOnly 0
priority1 128
priority2 128
domainNumber 24
#utc_offset 37
clockClass 248
clockAccuracy 0xFE
offsetScaledLogVariance 0xFFFF
free_running 0
freq_est_interval 1
dscp_event 0
dscp_general 0
dataset_comparison G.8275.x
G.8275.defaultDS.localPriority 128
#
# Port Data Set
#
logAnnounceInterval -3
logSyncInterval -4
logMinDelayReqInterval -4
logMinPdelayReqInterval -4
announceReceiptTimeout 3
syncReceiptTimeout 0
delayAsymmetry 0
fault_reset_interval -4
neighborPropDelayThresh 20000000
masterOnly 0
G.8275.portDS.localPriority 128
#
# Run time options
#
assume_two_step 0
logging_level 6
path_trace_enabled 0
follow_up_info 0
hybrid_e2e 0
inhibit_multicast_service 0
net_sync_monitor 0
tc_spanning_tree 0
tx_timestamp_timeout 50
unicast_listen 0
unicast_master_table 0
unicast_req_duration 3600
use_syslog 1
verbose 0
summary_interval 0
kernel_leap 1
check_fup_sync 0
clock_class_threshold 135
#
# Servo Options
#
pi_proportional_const 0.0
pi_integral_const 0.0
pi_proportional_scale 0.0
pi_proportional_exponent -0.3
pi_proportional_norm_max 0.7
pi_integral_scale 0.0
pi_integral_exponent 0.4
pi_integral_norm_max 0.3
step_threshold 2.0
first_step_threshold 0.00002
max_frequency 900000000
clock_servo pi
sanity_freq_limit 200000000
ntpshm_segment 0
#
# Transport options
#
transportSpecific 0x0
ptp_dst_mac 01:1B:19:00:00:00
p2p_dst_mac 01:80:C2:00:00:0E
udp_ttl 1
udp6_scope 0x0E
uds_address /var/run/ptp4l
#
# Default interface options
#
clock_type BC
network_transport L2
delay_mechanism E2E
time_stamping hardware
tsproc_mode filter
delay_filter moving_median
delay_filter_length 10
egressLatency 0
ingressLatency 0
boundary_clock_jbod 0
#
# Clock description
#
productDescription ;;
revisionData ;;
manufacturerIdentity 00:00:00
userDescription ;
timeSource 0xA0
recommend:
- profile: "boundary"
priority: 4
match:
- nodeLabel: "node-role.kubernetes.io/$mcp"

View File

@@ -1,4 +1,7 @@
# 2 cards $iface_master and $iface_master_1 are connected via SMA1 ports by a cable and $iface_master_1 receives 1PPS signals from $iface_master
# The grandmaster profile is provided for testing only
# It is not installed on production clusters
# In this example two cards $iface_nic1 and $iface_nic2 are connected via
# SMA1 ports by a cable and $iface_nic2 receives 1PPS signals from $iface_nic1
apiVersion: ptp.openshift.io/v1
kind: PtpConfig
metadata:
@@ -9,7 +12,7 @@ spec:
profile:
- name: "grandmaster"
ptp4lOpts: "-2 --summary_interval -4"
phc2sysOpts: -r -u 0 -m -O -37 -N 8 -R 16 -s $iface_master -n 24
phc2sysOpts: -r -u 0 -m -w -N 8 -R 16 -s $iface_nic1 -n 24
ptpSchedulingPolicy: SCHED_FIFO
ptpSchedulingPriority: 10
ptpSettings:
@@ -22,12 +25,12 @@ spec:
LocalHoldoverTimeout: 14400
MaxInSpecOffset: 100
pins: $e810_pins
# "$iface_master":
# "$iface_nic1":
# "U.FL2": "0 2"
# "U.FL1": "0 1"
# "SMA2": "0 2"
# "SMA1": "2 1"
# "$iface_master_1":
# "$iface_nic2":
# "U.FL2": "0 2"
# "U.FL1": "0 1"
# "SMA2": "0 2"
@@ -86,6 +89,12 @@ spec:
- "-p"
- "MON-HW"
reportOutput: true
- args: #ubxtool -P 29.20 -p CFG-MSG,1,38,300
- "-P"
- "29.20"
- "-p"
- "CFG-MSG,1,38,300"
reportOutput: true
ts2phcOpts: " "
ts2phcConf: |
[nmea]
@@ -99,21 +108,30 @@ spec:
#example value of gnss_serialport is /dev/ttyGNSS_1700_0
ts2phc.nmea_serialport $gnss_serialport
leapfile /usr/share/zoneinfo/leap-seconds.list
[$iface_master]
[$iface_nic1]
ts2phc.extts_polarity rising
ts2phc.extts_correction 0
[$iface_master_1]
[$iface_nic2]
ts2phc.master 0
ts2phc.extts_polarity rising
#this is a measured value in nanoseconds to compensate for SMA cable delay
ts2phc.extts_correction -10
ptp4lConf: |
[$iface_master]
[$iface_nic1]
masterOnly 1
[$iface_master_1]
[$iface_nic1_1]
masterOnly 1
[$iface_master_1_1]
[$iface_nic1_2]
masterOnly 1
[$iface_master_1_2]
[$iface_nic1_3]
masterOnly 1
[$iface_nic2]
masterOnly 1
[$iface_nic2_1]
masterOnly 1
[$iface_nic2_2]
masterOnly 1
[$iface_nic2_3]
masterOnly 1
[global]
#

View File

@@ -3,18 +3,17 @@ kind: PtpConfig
metadata:
name: boundary-ha
namespace: openshift-ptp
annotations:
ran.openshift.io/ztp-deploy-wave: "10"
annotations: {}
spec:
profile:
- name: "boundary-ha"
ptp4lOpts: " "
ptp4lOpts: ""
phc2sysOpts: "-a -r -n 24"
ptpSchedulingPolicy: SCHED_FIFO
ptpSchedulingPriority: 10
ptpSettings:
logReduce: "true"
haProfiles: "ha-ptp-config-nic1,ha-ptp-config-nic2"
haProfiles: "$profile1,$profile2"
recommend:
- profile: "boundary-ha"
priority: 4

View File

@@ -0,0 +1,21 @@
apiVersion: ptp.openshift.io/v1
kind: PtpConfig
metadata:
name: boundary-ha
namespace: openshift-ptp
annotations: {}
spec:
profile:
- name: "boundary-ha"
ptp4lOpts: " "
phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16"
ptpSchedulingPolicy: SCHED_FIFO
ptpSchedulingPriority: 10
ptpSettings:
logReduce: "true"
haProfiles: "$profile1,$profile2"
recommend:
- profile: "boundary-ha"
priority: 4
match:
- nodeLabel: "node-role.kubernetes.io/$mcp"

View File

@@ -1,3 +1,5 @@
# The grandmaster profile is provided for testing only
# It is not installed on production clusters
apiVersion: ptp.openshift.io/v1
kind: PtpConfig
metadata:
@@ -8,7 +10,7 @@ spec:
profile:
- name: "grandmaster"
ptp4lOpts: "-2 --summary_interval -4"
phc2sysOpts: -r -u 0 -m -O -37 -N 8 -R 16 -s $iface_master -n 24
phc2sysOpts: -r -u 0 -m -w -N 8 -R 16 -s $iface_master -n 24
ptpSchedulingPolicy: SCHED_FIFO
ptpSchedulingPriority: 10
ptpSettings:
@@ -80,6 +82,12 @@ spec:
- "-p"
- "MON-HW"
reportOutput: true
- args: #ubxtool -P 29.20 -p CFG-MSG,1,38,300
- "-P"
- "29.20"
- "-p"
- "CFG-MSG,1,38,300"
reportOutput: true
ts2phcOpts: " "
ts2phcConf: |
[nmea]

View File

@@ -0,0 +1,126 @@
# The grandmaster profile is provided for testing only
# It is not installed on production clusters
apiVersion: ptp.openshift.io/v1
kind: PtpConfig
metadata:
name: grandmaster
namespace: openshift-ptp
annotations: {}
spec:
profile:
- name: "grandmaster"
# The interface name is hardware-specific
interface: $interface
ptp4lOpts: "-2 --summary_interval -4"
phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16"
ptpSchedulingPolicy: SCHED_FIFO
ptpSchedulingPriority: 10
ptpSettings:
logReduce: "true"
ptp4lConf: |
[global]
#
# Default Data Set
#
twoStepFlag 1
slaveOnly 0
priority1 128
priority2 128
domainNumber 24
#utc_offset 37
clockClass 255
clockAccuracy 0xFE
offsetScaledLogVariance 0xFFFF
free_running 0
freq_est_interval 1
dscp_event 0
dscp_general 0
dataset_comparison G.8275.x
G.8275.defaultDS.localPriority 128
#
# Port Data Set
#
logAnnounceInterval -3
logSyncInterval -4
logMinDelayReqInterval -4
logMinPdelayReqInterval -4
announceReceiptTimeout 3
syncReceiptTimeout 0
delayAsymmetry 0
fault_reset_interval -4
neighborPropDelayThresh 20000000
masterOnly 0
G.8275.portDS.localPriority 128
#
# Run time options
#
assume_two_step 0
logging_level 6
path_trace_enabled 0
follow_up_info 0
hybrid_e2e 0
inhibit_multicast_service 0
net_sync_monitor 0
tc_spanning_tree 0
tx_timestamp_timeout 50
unicast_listen 0
unicast_master_table 0
unicast_req_duration 3600
use_syslog 1
verbose 0
summary_interval 0
kernel_leap 1
check_fup_sync 0
clock_class_threshold 7
#
# Servo Options
#
pi_proportional_const 0.0
pi_integral_const 0.0
pi_proportional_scale 0.0
pi_proportional_exponent -0.3
pi_proportional_norm_max 0.7
pi_integral_scale 0.0
pi_integral_exponent 0.4
pi_integral_norm_max 0.3
step_threshold 2.0
first_step_threshold 0.00002
max_frequency 900000000
clock_servo pi
sanity_freq_limit 200000000
ntpshm_segment 0
#
# Transport options
#
transportSpecific 0x0
ptp_dst_mac 01:1B:19:00:00:00
p2p_dst_mac 01:80:C2:00:00:0E
udp_ttl 1
udp6_scope 0x0E
uds_address /var/run/ptp4l
#
# Default interface options
#
clock_type OC
network_transport L2
delay_mechanism E2E
time_stamping hardware
tsproc_mode filter
delay_filter moving_median
delay_filter_length 10
egressLatency 0
ingressLatency 0
boundary_clock_jbod 0
#
# Clock description
#
productDescription ;;
revisionData ;;
manufacturerIdentity 00:00:00
userDescription ;
timeSource 0xA0
recommend:
- profile: "grandmaster"
priority: 4
match:
- nodeLabel: "node-role.kubernetes.io/$mcp"

View File

@@ -1,7 +1,7 @@
apiVersion: ptp.openshift.io/v1
kind: PtpConfig
metadata:
name: slave
name: du-ptp-slave
namespace: openshift-ptp
annotations: {}
spec:

View File

@@ -0,0 +1,124 @@
apiVersion: ptp.openshift.io/v1
kind: PtpConfig
metadata:
name: du-ptp-slave
namespace: openshift-ptp
annotations: {}
spec:
profile:
- name: "slave"
# The interface name is hardware-specific
interface: $interface
ptp4lOpts: "-2 -s --summary_interval -4"
phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16"
ptpSchedulingPolicy: SCHED_FIFO
ptpSchedulingPriority: 10
ptpSettings:
logReduce: "true"
ptp4lConf: |
[global]
#
# Default Data Set
#
twoStepFlag 1
slaveOnly 1
priority1 128
priority2 128
domainNumber 24
#utc_offset 37
clockClass 255
clockAccuracy 0xFE
offsetScaledLogVariance 0xFFFF
free_running 0
freq_est_interval 1
dscp_event 0
dscp_general 0
dataset_comparison G.8275.x
G.8275.defaultDS.localPriority 128
#
# Port Data Set
#
logAnnounceInterval -3
logSyncInterval -4
logMinDelayReqInterval -4
logMinPdelayReqInterval -4
announceReceiptTimeout 3
syncReceiptTimeout 0
delayAsymmetry 0
fault_reset_interval -4
neighborPropDelayThresh 20000000
masterOnly 0
G.8275.portDS.localPriority 128
#
# Run time options
#
assume_two_step 0
logging_level 6
path_trace_enabled 0
follow_up_info 0
hybrid_e2e 0
inhibit_multicast_service 0
net_sync_monitor 0
tc_spanning_tree 0
tx_timestamp_timeout 50
unicast_listen 0
unicast_master_table 0
unicast_req_duration 3600
use_syslog 1
verbose 0
summary_interval 0
kernel_leap 1
check_fup_sync 0
clock_class_threshold 7
#
# Servo Options
#
pi_proportional_const 0.0
pi_integral_const 0.0
pi_proportional_scale 0.0
pi_proportional_exponent -0.3
pi_proportional_norm_max 0.7
pi_integral_scale 0.0
pi_integral_exponent 0.4
pi_integral_norm_max 0.3
step_threshold 2.0
first_step_threshold 0.00002
max_frequency 900000000
clock_servo pi
sanity_freq_limit 200000000
ntpshm_segment 0
#
# Transport options
#
transportSpecific 0x0
ptp_dst_mac 01:1B:19:00:00:00
p2p_dst_mac 01:80:C2:00:00:0E
udp_ttl 1
udp6_scope 0x0E
uds_address /var/run/ptp4l
#
# Default interface options
#
clock_type OC
network_transport L2
delay_mechanism E2E
time_stamping hardware
tsproc_mode filter
delay_filter moving_median
delay_filter_length 10
egressLatency 0
ingressLatency 0
boundary_clock_jbod 0
#
# Clock description
#
productDescription ;;
revisionData ;;
manufacturerIdentity 00:00:00
userDescription ;
timeSource 0xA0
recommend:
- profile: "slave"
priority: 4
match:
- nodeLabel: "node-role.kubernetes.io/$mcp"

View File

@@ -11,7 +11,7 @@ spec:
deviceType: $deviceType
isRdma: $isRdma
nicSelector:
# The exact physical function name must match the hardware used
# The exact physical function name must match the hardware used
pfNames: [$pfNames]
nodeSelector:
node-role.kubernetes.io/$mcp: ""

View File

@@ -3,8 +3,7 @@ kind: SriovOperatorConfig
metadata:
name: default
namespace: openshift-sriov-network-operator
annotations:
ran.openshift.io/ztp-deploy-wave: "10"
annotations: {}
spec:
configDaemonNodeSelector:
"node-role.kubernetes.io/$mcp": ""
@@ -23,6 +22,4 @@ spec:
# openshift.io/<resource_name>: "1"
enableInjector: false
enableOperatorWebhook: false
# Disable drain is needed for single-node OpenShift.
disableDrain: true
logLevel: 0

View File

@@ -0,0 +1,27 @@
apiVersion: sriovnetwork.openshift.io/v1
kind: SriovOperatorConfig
metadata:
name: default
namespace: openshift-sriov-network-operator
annotations: {}
spec:
configDaemonNodeSelector:
"node-role.kubernetes.io/$mcp": ""
# Injector and OperatorWebhook pods can be disabled (set to "false") below
# to reduce the number of management pods. It is recommended to start with the
# webhook and injector pods enabled, and only disable them after verifying the
# correctness of user manifests.
# If the injector is disabled, containers using sr-iov resources must explicitly assign
# them in the "requests"/"limits" section of the container spec, for example:
# containers:
# - name: my-sriov-workload-container
# resources:
# limits:
# openshift.io/<resource_name>: "1"
# requests:
# openshift.io/<resource_name>: "1"
enableInjector: false
enableOperatorWebhook: false
# Disable drain is needed for Single Node Openshift
disableDrain: true
logLevel: 0

View File

@@ -1,16 +1,16 @@
apiVersion: lvm.topolvm.io/v1alpha1
kind: LVMCluster
metadata:
name: odf-lvmcluster
name: lvmcluster
namespace: openshift-storage
spec:
storage:
deviceClasses:
- name: vg1
deviceSelector:
paths:
- /usr/disk/by-path/pci-0000:11:00.0-nvme-1
thinPoolConfig:
name: thin-pool-1
overprovisionRatio: 10
sizePercent: 90
annotations: {}
spec: {}
#example: creating a vg1 volume group leveraging all available disks on the node
# except the installation disk.
# storage:
# deviceClasses:
# - name: vg1
# thinPoolConfig:
# name: thin-pool-1
# sizePercent: 90
# overprovisionRatio: 10

View File

@@ -0,0 +1,14 @@
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: lvms-operator
namespace: openshift-storage
annotations: {}
spec:
channel: "stable"
name: lvms-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Manual
status:
state: AtLatestKnown

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: openshift-storage
labels:
openshift.io/cluster-monitoring: "true"
annotations: {}

View File

@@ -0,0 +1,9 @@
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: lvms-operator-operatorgroup
namespace: openshift-storage
annotations: {}
spec:
targetNamespaces:
- openshift-storage

View File

@@ -3,8 +3,7 @@ kind: Tuned
metadata:
name: performance-patch
namespace: openshift-cluster-node-tuning-operator
annotations:
ran.openshift.io/ztp-deploy-wave: "10"
annotations: {}
spec:
profile:
- name: performance-patch

View File

@@ -9,145 +9,144 @@ spec:
baseDomain: "example.com"
pullSecretRef:
name: "assisted-deployment-pull-secret"
clusterImageSetNameRef: "openshift-4.10"
clusterImageSetNameRef: "openshift-4.16"
sshPublicKey: "ssh-rsa AAAA..."
clusters:
- clusterName: "example-sno"
networkType: "OVNKubernetes"
# installConfigOverrides is a generic way of passing install-config
# parameters through the siteConfig. The 'capabilities' field configures
# the composable openshift feature. In this 'capabilities' setting, we
# remove all but the marketplace component from the optional set of
# components.
# Notes:
# - OperatorLifecycleManager is needed for 4.15 and later
# - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier
# - Ingress is needed for 4.16 and later
installConfigOverrides: |
{
"capabilities": {
"baselineCapabilitySet": "None",
"additionalEnabledCapabilities": [
"NodeTuning",
"OperatorLifecycleManager"
"Ingress"
]
- clusterName: "example-sno"
networkType: "OVNKubernetes"
# installConfigOverrides is a generic way of passing install-config
# parameters through the siteConfig. The 'capabilities' field configures
# the composable openshift feature. In this 'capabilities' setting, we
# remove all the optional set of components.
# Notes:
# - OperatorLifecycleManager is needed for 4.15 and later
# - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier
# - Ingress is needed for 4.16 and later
installConfigOverrides: |
{
"capabilities": {
"baselineCapabilitySet": "None",
"additionalEnabledCapabilities": [
"NodeTuning",
"OperatorLifecycleManager",
"Ingress"
]
}
}
}
# It is strongly recommended to include crun manifests as part of the additional install-time manifests for 4.13+.
# The crun manifests can be obtained from source-crs/optional-extra-manifest/ and added to the git repo ie.sno-extra-manifest.
# extraManifestPath: sno-extra-manifest
clusterLabels:
# These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples
du-profile: "latest"
# These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates:
# ../policygentemplates/common-ranGen.yaml will apply to all clusters with 'common: true'
common: true
# ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""'
group-du-sno: ""
# ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"'
# Normally this should match or contain the cluster name so it only applies to a single cluster
sites : "example-sno"
clusterNetwork:
- cidr: 1001:1::/48
hostPrefix: 64
machineNetwork:
- cidr: 1111:2222:3333:4444::/64
serviceNetwork:
- 1001:2::/112
additionalNTPSources:
- 1111:2222:3333:4444::2
# Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate
# please see Workload Partitioning Feature for a complete guide.
cpuPartitioningMode: AllNodes
# Optionally; This can be used to override the KlusterletAddonConfig that is created for this cluster:
#crTemplates:
# KlusterletAddonConfig: "KlusterletAddonConfigOverride.yaml"
nodes:
- hostName: "example-node1.example.com"
role: "master"
# Optionally; This can be used to configure desired BIOS setting on a host:
#biosConfigRef:
# filePath: "example-hw.profile"
bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1"
bmcCredentialsName:
name: "example-node1-bmh-secret"
bootMACAddress: "AA:BB:CC:DD:EE:11"
# Use UEFISecureBoot to enable secure boot
bootMode: "UEFI"
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0"
# disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md for more details
ignitionConfigOverride: |
{
"ignition": {
"version": "3.2.0"
},
"storage": {
"disks": [
{
"device": "/dev/disk/by-id/wwn-0x6b07b250ebb9d0002a33509f24af1f62",
"partitions": [
{
"label": "var-lib-containers",
"sizeMiB": 0,
"startMiB": 250000
# It is strongly recommended to include crun manifests as part of the additional install-time manifests for 4.13+.
# The crun manifests can be obtained from source-crs/optional-extra-manifest/ and added to the git repo ie.sno-extra-manifest.
# extraManifestPath: sno-extra-manifest
clusterLabels:
# These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples
du-profile: "latest"
# These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates:
# ../policygentemplates/common-ranGen.yaml will apply to all clusters with 'common: true'
common: true
# ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""'
group-du-sno: ""
# ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"'
# Normally this should match or contain the cluster name so it only applies to a single cluster
sites: "example-sno"
clusterNetwork:
- cidr: 1001:1::/48
hostPrefix: 64
machineNetwork:
- cidr: 1111:2222:3333:4444::/64
serviceNetwork:
- 1001:2::/112
additionalNTPSources:
- 1111:2222:3333:4444::2
# Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate
# please see Workload Partitioning Feature for a complete guide.
cpuPartitioningMode: AllNodes
# Optionally; This can be used to override the KlusterletAddonConfig that is created for this cluster:
#crTemplates:
# KlusterletAddonConfig: "KlusterletAddonConfigOverride.yaml"
nodes:
- hostName: "example-node1.example.com"
role: "master"
# Optionally; This can be used to configure desired BIOS setting on a host:
#biosConfigRef:
# filePath: "example-hw.profile"
bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1"
bmcCredentialsName:
name: "example-node1-bmh-secret"
bootMACAddress: "AA:BB:CC:DD:EE:11"
# Use UEFISecureBoot to enable secure boot
bootMode: "UEFI"
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0"
# disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md for more details
ignitionConfigOverride: |
{
"ignition": {
"version": "3.2.0"
},
"storage": {
"disks": [
{
"device": "/dev/disk/by-id/wwn-0x6b07b250ebb9d0002a33509f24af1f62",
"partitions": [
{
"label": "var-lib-containers",
"sizeMiB": 0,
"startMiB": 250000
}
],
"wipeTable": false
}
],
"wipeTable": false
}
],
"filesystems": [
{
"device": "/dev/disk/by-partlabel/var-lib-containers",
"format": "xfs",
"mountOptions": [
"defaults",
"prjquota"
],
"path": "/var/lib/containers",
"wipeFilesystem": true
}
]
},
"systemd": {
"units": [
{
"contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target",
"enabled": true,
"name": "var-lib-containers.mount"
}
]
"filesystems": [
{
"device": "/dev/disk/by-partlabel/var-lib-containers",
"format": "xfs",
"mountOptions": [
"defaults",
"prjquota"
],
"path": "/var/lib/containers",
"wipeFilesystem": true
}
]
},
"systemd": {
"units": [
{
"contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target",
"enabled": true,
"name": "var-lib-containers.mount"
}
]
}
}
}
nodeNetwork:
interfaces:
- name: eno1
macAddress: "AA:BB:CC:DD:EE:11"
config:
nodeNetwork:
interfaces:
- name: eno1
type: ethernet
state: up
ipv4:
enabled: false
ipv6:
enabled: true
address:
# For SNO sites with static IP addresses, the node-specific,
# API and Ingress IPs should all be the same and configured on
# the interface
- ip: 1111:2222:3333:4444::aaaa:1
prefix-length: 64
dns-resolver:
config:
search:
- example.com
server:
- 1111:2222:3333:4444::2
routes:
config:
- destination: ::/0
next-hop-interface: eno1
next-hop-address: 1111:2222:3333:4444::1
table-id: 254
macAddress: "AA:BB:CC:DD:EE:11"
config:
interfaces:
- name: eno1
type: ethernet
state: up
ipv4:
enabled: false
ipv6:
enabled: true
address:
# For SNO sites with static IP addresses, the node-specific,
# API and Ingress IPs should all be the same and configured on
# the interface
- ip: 1111:2222:3333:4444::aaaa:1
prefix-length: 64
dns-resolver:
config:
search:
- example.com
server:
- 1111:2222:3333:4444::2
routes:
config:
- destination: ::/0
next-hop-interface: eno1
next-hop-address: 1111:2222:3333:4444::1
table-id: 254