From 618fae90ac7fc29391aca9e069e61e78dbedaab5 Mon Sep 17 00:00:00 2001 From: Shauna Diaz Date: Tue, 15 Nov 2022 16:29:46 -0500 Subject: [PATCH] OSDOCS-4566 MicroShift adding networking assembly and firewall module to config --- _topic_maps/_topic_map_ms.yml | 40 ++--- .../microshift-using-config-tools.adoc | 2 +- microshift_networking/_attributes | 1 + microshift_networking/images | 1 + .../ingress-operator-microshift.adoc | 87 ++++++++++ .../microshift-networking.adoc | 25 +++ microshift_networking/modules | 1 + microshift_networking/snippets | 1 + .../microshift-known-issues.adoc | 5 +- .../microshift-version.adoc | 1 + modules/microshift-cni.adoc | 107 ++++++++++++ modules/microshift-configuring-ovn.adoc | 72 ++++++++ .../microshift-cri-o-container-runtime.adoc | 31 ++++ modules/microshift-firewall-config.adoc | 157 ++++++++++++++++++ modules/microshift-http-proxy.adoc | 12 ++ modules/microshift-man-config-ovs-bridge.adoc | 38 +++++ modules/microshift-ovs-snapshot.adoc | 54 ++++++ modules/microshift-rpm-ostree.adoc | 32 ++++ .../microshift-troubleshooting-nodeport.adoc | 35 ++++ modules/microshift-version-api.adoc | 1 - 20 files changed, 680 insertions(+), 23 deletions(-) create mode 120000 microshift_networking/_attributes create mode 120000 microshift_networking/images create mode 100644 microshift_networking/ingress-operator-microshift.adoc create mode 100644 microshift_networking/microshift-networking.adoc create mode 120000 microshift_networking/modules create mode 120000 microshift_networking/snippets create mode 100644 modules/microshift-cni.adoc create mode 100644 modules/microshift-configuring-ovn.adoc create mode 100644 modules/microshift-cri-o-container-runtime.adoc create mode 100644 modules/microshift-firewall-config.adoc create mode 100644 modules/microshift-http-proxy.adoc create mode 100644 modules/microshift-man-config-ovs-bridge.adoc create mode 100644 modules/microshift-ovs-snapshot.adoc create mode 100644 modules/microshift-rpm-ostree.adoc create mode 100644 modules/microshift-troubleshooting-nodeport.adoc diff --git a/_topic_maps/_topic_map_ms.yml b/_topic_maps/_topic_map_ms.yml index 65b6085a1b..ac94368e93 100644 --- a/_topic_maps/_topic_map_ms.yml +++ b/_topic_maps/_topic_map_ms.yml @@ -51,9 +51,9 @@ Name: Installing Dir: microshift_install Distros: microshift Topics: -- Name: Installing MicroShift from RPM +- Name: Installing from RPM File: microshift-install-rpm -- Name: Installing MicroShift in a RHEL for Edge Image +- Name: Installing in a RHEL for Edge Image File: microshift-install-rhel-for-edge --- Name: API reference @@ -104,25 +104,17 @@ Name: Configuring Dir: microshift_configuring Distros: microshift Topics: -- Name: Configuring MicroShift +- Name: Configuring File: microshift-using-config-tools -# --- -# Name: Storage -# Dir: storage -# Distros: microshift -# Topics: -# - Name: Persistent storage -# Dir: persistent_storage -# Topics: -# - Name: Persistent storage using ODF-LVM -# File: microshift-persistent-storage-odf-lvm -# --- -# Name: Networking -# Dir: networking -# Distros: microshift -# Topics: +--- +Name: Networking +Dir: microshift_networking +Distros: microshift +Topics: +- Name: Understanding networking + File: microshift-networking # - Name: Understanding the Ingress Operator -# File: ingress-operator +# File: ingress-operator-microshift # - Name: Configuring a cluster-wide proxy during installation # File: configuring-cluster-wide-proxy # - Name: CIDR range definitions @@ -141,6 +133,16 @@ Topics: # - Name: Configuring multitenant isolation with network policy # File: multitenant-network-policy # --- +# Name: Storage +# Dir: storage +# Distros: microshift +# Topics: +# - Name: Persistent storage +# Dir: persistent_storage +# Topics: +# - Name: Persistent storage using ODF-LVM +# File: microshift-persistent-storage-odf-lvm +# --- # Name: Updating clusters # Dir: updating # Distros: microshift diff --git a/microshift_configuring/microshift-using-config-tools.adoc b/microshift_configuring/microshift-using-config-tools.adoc index d527312395..390291fd84 100644 --- a/microshift_configuring/microshift-using-config-tools.adoc +++ b/microshift_configuring/microshift-using-config-tools.adoc @@ -1,6 +1,6 @@ :_content-type: ASSEMBLY [id="microshift-using-config-tools"] -= Configuring {product-title} += Configuring include::_attributes/attributes-microshift.adoc[] :context: microshift-configuring toc::[] diff --git a/microshift_networking/_attributes b/microshift_networking/_attributes new file mode 120000 index 0000000000..93957f0227 --- /dev/null +++ b/microshift_networking/_attributes @@ -0,0 +1 @@ +../_attributes \ No newline at end of file diff --git a/microshift_networking/images b/microshift_networking/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/microshift_networking/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/microshift_networking/ingress-operator-microshift.adoc b/microshift_networking/ingress-operator-microshift.adoc new file mode 100644 index 0000000000..0108033866 --- /dev/null +++ b/microshift_networking/ingress-operator-microshift.adoc @@ -0,0 +1,87 @@ +:_content-type: ASSEMBLY +[id="configuring-ingress-microshift"] += Ingress Operator in {product-title} +include::_attributes/attributes-microshift.adoc[] +:context: configuring-ingress + +toc::[] +include::modules/nw-ne-openshift-ingress.adoc[leveloffset=+1] +include::modules/nw-installation-ingress-config-asset.adoc[leveloffset=+1] +include::modules/nw-ingress-controller-configuration-parameters.adoc[leveloffset=+1] + +[id="configuring-ingress-controller-tls"] +=== Ingress Controller TLS security profiles + +TLS security profiles provide a way for servers to regulate which ciphers a connecting client can use when connecting to the server. + +// Understanding TLS security profiles +include::modules/tls-profiles-understanding.adoc[leveloffset=+3] + +// Configuring the TLS profile for the Ingress Controller +include::modules/tls-profiles-ingress-configuring.adoc[leveloffset=+3] + +include::modules/nw-mutual-tls-auth.adoc[leveloffset=+3] + +include::modules/nw-ingress-view.adoc[leveloffset=+1] + +include::modules/nw-ingress-operator-status.adoc[leveloffset=+1] + +include::modules/nw-ingress-operator-logs.adoc[leveloffset=+1] + +include::modules/nw-ingress-controller-status.adoc[leveloffset=+1] + +[id="configuring-ingress-controller"] +== Configuring the Ingress Controller + +include::modules/nw-ingress-setting-a-custom-default-certificate.adoc[leveloffset=+2] + +include::modules/nw-ingress-custom-default-certificate-remove.adoc[leveloffset=+2] + +include::modules/nw-autoscaling-ingress-controller.adoc[leveloffset=+2] + +include::modules/nw-scaling-ingress-controller.adoc[leveloffset=+2] + +include::modules/nw-configure-ingress-access-logging.adoc[leveloffset=+2] + +include::modules/nw-ingress-setting-thread-count.adoc[leveloffset=+2] + +include::modules/nw-ingress-sharding.adoc[leveloffset=+2] + +include::modules/nw-ingress-sharding-route-labels.adoc[leveloffset=+3] + +include::modules/nw-ingress-sharding-namespace-labels.adoc[leveloffset=+3] + +include::modules/nw-ingress-setting-internal-lb.adoc[leveloffset=+2] + +include::modules/nw-ingress-controller-configuration-gcp-global-access.adoc[leveloffset=+2] + +include::modules/nw-ingress-controller-config-tuningoptions-healthcheckinterval.adoc[leveloffset=+2] + +include::modules/nw-ingress-default-internal.adoc[leveloffset=+2] + +include::modules/nw-route-admission-policy.adoc[leveloffset=+2] + +include::modules/using-wildcard-routes.adoc[leveloffset=+2] + +include::modules/nw-using-ingress-forwarded.adoc[leveloffset=+2] + +include::modules/nw-http2-haproxy.adoc[leveloffset=+2] + +include::modules/nw-ingress-controller-configuration-proxy-protocol.adoc[leveloffset=+2] + +include::modules/nw-ingress-configuring-application-domain.adoc[leveloffset=+2] + +include::modules/nw-ingress-converting-http-header-case.adoc[leveloffset=+2] + +include::modules/nw-configuring-router-compression.adoc[leveloffset=+2] + +include::modules/nw-customize-ingress-error-pages.adoc[leveloffset=+2] +//include::modules/nw-ingress-select-route.adoc[leveloffset=+2] + +include::modules/nw-ingress-setting-max-connections.adoc[leveloffset=+2] + +//[role="_additional-resources"] +//== Additional resources + +//* xref:../networking/configuring-a-custom-pki.adoc#configuring-a-custom-pki[Configuring a custom PKI] + diff --git a/microshift_networking/microshift-networking.adoc b/microshift_networking/microshift-networking.adoc new file mode 100644 index 0000000000..855bd6ce8f --- /dev/null +++ b/microshift_networking/microshift-networking.adoc @@ -0,0 +1,25 @@ +:_content-type: ASSEMBLY +[id="microshift-understanding-networking"] += Understanding networking +include::_attributes/attributes-microshift.adoc[] +:context: microshift-networking + +toc::[] + +Learn how to apply networking customization and default settings to {product-title} deployments. Each node is contained to a single machine and single {product-title}, so each deployment requires individual configuration, pods, and settings. + +Cluster Administrators have several options for exposing applications that run inside a cluster to external traffic and securing network connections: + +* A service such as NodePort + +* API resources, such as `Ingress` and `Route` + +By default, Kubernetes allocates each pod an internal IP address for applications running within the pod. Pods and their containers can have traffic between them, but clients outside the cluster do not have direct network access to pods except when exposed with a service such as NodePort. + +include::modules/microshift-cni.adoc[leveloffset=+1] +include::modules/microshift-configuring-ovn.adoc[leveloffset=+1] +//include::modules/microshift-man-config-ovs-bridge.adoc[leveloffset=+1] +include::modules/microshift-http-proxy.adoc[leveloffset=+1] +include::modules/microshift-cri-o-container-runtime.adoc[leveloffset=+1] +include::modules/microshift-ovs-snapshot.adoc[leveloffset=+1] +include::modules/microshift-firewall-config.adoc[leveloffset=+1] diff --git a/microshift_networking/modules b/microshift_networking/modules new file mode 120000 index 0000000000..464b823aca --- /dev/null +++ b/microshift_networking/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/microshift_networking/snippets b/microshift_networking/snippets new file mode 120000 index 0000000000..9d58b92e50 --- /dev/null +++ b/microshift_networking/snippets @@ -0,0 +1 @@ +../snippets/ \ No newline at end of file diff --git a/microshift_troubleshooting/microshift-known-issues.adoc b/microshift_troubleshooting/microshift-known-issues.adoc index 61dfff571a..93a105b714 100644 --- a/microshift_troubleshooting/microshift-known-issues.adoc +++ b/microshift_troubleshooting/microshift-known-issues.adoc @@ -1,10 +1,11 @@ :_content-type: ASSEMBLY [id="microshift-known-issues"] -= Troubleshoot known issues += Troubleshooting and known issues include::_attributes/attributes-microshift.adoc[] :context: microshift-known-issues toc::[] -Read about known issues and possible solutions. +Read about troubleshooting, known issues, and possible solutions. include::modules/microshift-ki-cni-iptables-deleted.adoc[leveloffset=+1] +include::modules/microshift-troubleshooting-nodeport.adoc[leveloffset=+1] diff --git a/microshift_troubleshooting/microshift-version.adoc b/microshift_troubleshooting/microshift-version.adoc index fafeb134c8..9d3e8d83ce 100644 --- a/microshift_troubleshooting/microshift-version.adoc +++ b/microshift_troubleshooting/microshift-version.adoc @@ -3,6 +3,7 @@ = Checking which version you have installed include::_attributes/attributes-microshift.adoc[] :context: microshift-version + toc::[] To begin troubleshooting, determine which version of {product-title} you have installed. diff --git a/modules/microshift-cni.adoc b/modules/microshift-cni.adoc new file mode 100644 index 0000000000..df38ffc9ee --- /dev/null +++ b/modules/microshift-cni.adoc @@ -0,0 +1,107 @@ +// Module included in the following assemblies: +// +// * microshift_networking/microshift-understanding networking.adoc + +:_content-type: CONCEPT +[id="microshift-cni_{context}"] += About the OVN-Kubernetes CNI network provider + +OVN-Kubernetes is the default networking solution for {product-title} deployments. OVN-Kubernetes is a virtualized network for pods and services that is based on Open Virtual Network (OVN). The OVN-Kubernetes Container Network Interface (CNI) plug-in is the network provider for the cluster. A cluster that uses the OVN-Kubernetes network provider also runs Open vSwitch (OVS) on the node. OVN configures OVS on the node to implement the declared network configuration. + +[id="microshift-network-topology_{context}"] +== Network topology +OVN-Kubernetes provides an overlay-based networking implementation. This overlay includes an OVS-based implementation of Service and NetworkPolicy. The overlay network uses the geneve tunnel, so the pod maximum transmission unit (MTU) is set to smaller than that of the physical interface on the host to remove the tunnel header. + +OVS runs as a systemd service on the {product-title} node. The OVS RPM package is installed as a dependency to the `microshift-networking` RPM package. OVS is started immediately when the `microshift-networking` RPM is installed. + +=== IP forward +The host network `sysctl net.ipv4.ip_forward` is automatically enabled by the `ovnkube-master` container when started. This is required to forward incoming traffic to the CNI. For example, accessing the NodePort service from outside of a cluster fails if `ip_forward` is disabled. + +[id="microshift-network-performance_{context}"] +== Network performance optimizations +By default, three performance optimizations are applied to OVS services to minimize resource consumption: + +* CPU affinity to `ovs-vswitchd.service` and `ovsdb-server.service` +* `no-mlockall` to `openvswitch.service` +* Limit handler and `revalidator` threads to `ovs-vswitchd.service` + +[id="microshift-network-features_{context}"] +== Network features +Networking features available with {product-title} {product-version} include: + +* Kubernetes network policy +* Dynamic node IP +* Cluster network on specified host interface +* Secondary gateway interface +* Dual stack + +Networking features not available with {product-title} {product-version}: + +* Egress IP/firewall/qos: disabled +* Hybrid networking: not supported +* IPsec: not supported +* Hardware offload: not supported + +//watch USHIFT-640 for updates to architectural docs that will clarify features + +//Q: are there immutable network settings we should tell users about? +[id="microshift-network-comps-svcs_{context}"] +== {product-title} networking components and services overview +This brief overview describes networking components and their operation in {product-title}. The `microshift-networking` RPM is a package that automatically pulls in any networking-related dependencies and systemd services to initialize networking, for example, the `microshift-ovs-init` systemd service. + +NetworkManager:: +NetworkManager is required to set up the initial gateway bridge on the {product-title} node. The NetworkManager and `NetworkManager-ovs` RPM packages are installed as dependencies to the `microshift-networking` RPM package, which contains the necessary configuration files. NetworkManager in {product-title} uses the `keyfile` plug-in and is restarted after installation of the `microshift-networking` RPM package. + +microshift-ovs-init:: +The `microshift-ovs-init.service` is installed by the `microshift-networking` RPM package as a dependent systemd service to microshift.service. It is responsible for setting up the OVS gateway bridge. + +OVN containers:: +Two OVN-Kubernetes daemon sets are rendered and applied by {product-title}. + +* *ovnkube-master* +Includes the `northd`, `nbdb`, `sbdb` and `ovnkube-master` containers. + +* *ovnkube-node* +The ovnkube-node includes the OVN-Controller container. ++ +After {product-title} boots, the OVN-Kubernetes daemon sets are deployed in the `openshift-ovn-kubernetes` namespace. + +Packaging:: +OVN-Kubernetes manifests and startup logic are built into {product-title}. The systemd services and configurations included in `microshift-networking` RPM are: + +* `/etc/NetworkManager/conf.d/microshift-nm.conf` for NetworkManager.service +* `/etc/systemd/system/ovs-vswitchd.service.d/microshift-cpuaffinity.conf` for ovs-vswitchd.service +* `/etc/systemd/system/ovsdb-server.service.d/microshift-cpuaffinity.conf` +* `/usr/bin/configure-ovs-microshift.sh` for microshift-ovs-init.service +* `/usr/bin/configure-ovs.sh` for microshift-ovs-init.service +* `/etc/crio/crio.conf.d/microshift-ovn.conf` for CRI-O service + +[id="microshift-bridge-mapping_{context}"] +== Bridge mappings +Bridge mappings allow provider network traffic to reach the physical network. Traffic leaves the provider network and arrives at the `br-int` bridge. A patch port between `br-int` and `br-ex` then allows the traffic to traverse to and from the provider network and the edge network. Kubernetes pods are connected to the `br-int` bridge through virtual ethernet pair: one end of the virtual ethernet pair is attached to the pod namespace, and the other end is attached to the `br-int` bridge. + +[id="microshift-primary-gateway-interface_{context}"] +=== Primary gateway interface +You can specify the desired host interface name in the `ovn.yaml` config file as `gatewayInterface`. The specified interface is added in OVS bridge br-ex which acts as gateway bridge for the CNI network. + +[id="microshift-secondary-gateway-interface_{context}"] +=== Secondary gateway interface +You can set up one additional host interface for cluster ingress and egress in the `ovn.yaml` config file. The additional interface is added in a second OVS bridge `br-ex1`. Cluster pod traffic directed to the additional host subnet is routed automatically based on the destination IP through br-ex1. + +Either two or three OVS bridges are created based on the CNI configuration: + +Default deployment:: +* The `externalGatewayInterface` in not specified in the `ovn.yaml` config file. +* Two OVS bridges, `br-ex` and `br-int`, are created. + +Customized deployment:: +* The `externalGatewayInterface` is user-specified in the `ovn.yaml` config file. +* Three OVS bridges are created: `br-ex`, `br-ex1` and `br-int`. + +//Q: We need to include the third bridge if we want to include this level of detail. +The br-ex bridge is created by `microshift-ovs-init.service` or manually. The br-ex bridge contains statically programmed openflow rules which distinguish traffic to and from the host network (underlay) and the OVN network (overlay). + +The `br-int` bridge is created by the `ovnkube-master` container. The `br-int` bridge contains dynamically programmed openflow rules which handle cluster network traffic. + +//The `br-ex1` bridge is created by... +//Q: need details. diff --git a/modules/microshift-configuring-ovn.adoc b/modules/microshift-configuring-ovn.adoc new file mode 100644 index 0000000000..98ffdf90d4 --- /dev/null +++ b/modules/microshift-configuring-ovn.adoc @@ -0,0 +1,72 @@ +// Module included in the following assemblies: +// +// * microshift_networking/microshift-networking.adoc + +:_content-type: PROCEDURE +[id="microshift-config-OVN-K_{context}"] +== Configuring OVN-Kubernetes +An OVN-Kubernetes config file can be written to `/etc/microshift/ovn.yaml`. {product-title} will use default OVN-Kubernetes configuration values if an OVN-Kubernetes config file is not customized. + +.Default `ovn.yaml` config values: +[source,yaml] +---- +ovsInit: + disableOVSInit: false + gatewayInterface: "" <1> + externalGatewayInterface: "" <2> +mtu: 1400 +---- +<1> Default value is an empty string, which means "not-specified." The CNI network provider auto-detects to interface with the default route. +<2> Default value is an empty string, which means disabled. + +To customize your configuration, use the following table to find valid values that you can use in your `ovn.yaml` config file. + +.Supported optional OVN-Kubernetes configurations for {product-title}. + +[cols="5",options="header"] +|=== +|Field +|Type +|Default +|Description +|Example + +|`ovsInit.disableOVSInit` +|bool +|false +|Skip configuring OVS bridge `br-ex` in `microshift-ovs-init.service` +|true <1> + +|`ovsInit.gatewayInterface` +|Alpha +|eth0 +|Ingress that is the API gateway +|eth0 + +|`ovsInit.externalGatewayInterface` +|Alpha +|eth1 +|Ingress routing external traffic to your services and pods inside the node +|eth1 + +|mtu +|uint32 +|1400 +|MTU value used for the pods +|1300 +|=== + +<1> The OVS bridge is required. When `disableOVSInit` is true, OVS bridge `br-ex` must be configured manually. + +.Example `ovn.yaml` config file: +[source, yaml] +---- +ovsInit: + disableOVSInit: true + gatewayInterface: eth0 + externalGatewayInterface: eth1 +mtu: 1300 +---- + +[IMPORTANT] +When `disableOVSInit` is set to true in the `ovn.yaml` config file, the OVS bridge br-ex must be manually configured. diff --git a/modules/microshift-cri-o-container-runtime.adoc b/modules/microshift-cri-o-container-runtime.adoc new file mode 100644 index 0000000000..d7d30e0ab7 --- /dev/null +++ b/modules/microshift-cri-o-container-runtime.adoc @@ -0,0 +1,31 @@ +// Module included in the following assemblies: +// +// * microshift_networking/microshift-networking.adoc + +:_content-type: PROCEDURE +[id="microshift-CRI-O-container-engine_{context}"] +== CRI-O container runtime +To use an HTTP(S) proxy in `CRI-O`, you need to set the `HTTP_PROXY` and `HTTPS_PROXY` environment variables. You can also set the `NO_PROXY` variable to exclude a list of hosts from being proxied. + +.Procedure +. Add the following settings to the `/etc/systemd/system/crio.service.d/00-proxy.conf` file: ++ +[source, config] +---- +Environment=NO_PROXY="localhost,127.0.0.1" +Environment=HTTP_PROXY="http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_PORT/" +Environment=HTTPS_PROXY="http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_PORT/" +---- +//Q: was this file created during installation? if not, do we need to create it? +. Reload the configuration settings: ++ +[source, terminal] +---- +$ sudo systemctl daemon-reload +---- +. Restart the CRI-O service to apply the settings: ++ +[source, terminal] +---- +$ sudo systemctl restart crio +---- diff --git a/modules/microshift-firewall-config.adoc b/modules/microshift-firewall-config.adoc new file mode 100644 index 0000000000..6cbd91d8d8 --- /dev/null +++ b/modules/microshift-firewall-config.adoc @@ -0,0 +1,157 @@ +// Module included in the following assemblies: +// +// * microshift_configuring/microshift-networking.adoc + +:_content-type: CONCEPT +[id="microshift-firewall-config_{context}"] += Using a firewall +Firewalls are not required in {product-title}, but using a firewall can prevent undesired access to the {product-title} API. When using a firewall, you must explicitly allow the following OVN-Kubernetes traffic when the `firewalld` service is running: + +CNI pod to CNI pod:: +CNI pod to Host-Network pod +Host-Network pod to Host-Network pod + +CNI pod:: +The Kubernetes pod that uses the CNI network + +Host-Network pod:: +The Kubernetes pod that uses host network + Install and configure the `firewalld` service by using the following procedures. +//Q: Are there networking prerequisites for this procedure, such as having already installed the OpenShift DNS Operator? + +[IMPORTANT] +==== +{product-title} pods must have access to the internal CoreDNS component and API servers. +==== + +[id="microshift-firewall-install_{context}"] +== Installing the `firewalld` service +To install and run the `firewalld` service, run the following commands: + +.Procedure + +. To install the `firewalld` service: ++ +[source,terminal] +---- +$ sudo dnf install -y firewalld +---- + +. To initiate the firewall: ++ +[source,terminal] +---- +$ sudo systemctl enable firewalld --now +---- + +[id="microshift-required-settings_{context}"] +== Required settings +An IP address range for pods is a required part of the firewall configuration. You can use the default values or customize the IP address range. You must also configure pod access to the internal CoreDNS component. + +.Required settings +[cols="1,1",options="header"] +|=== +^| IR Range ^| Description + +|10.42.0.0/16 +|Host network pod access to CoreDNS and {product-title} API + +|169.254.169.1 +|Host network pod access to {product-title} API Server +|=== + +.Procedure + +. Run the following commands to allow network traffic by first configuring the IP address range with either default or custom values, then allow internal traffic from pods through the network gateway by inserting the DNS server. + +.. To use default values for the IP address range: ++ +[source,terminal] +---- +$ sudo firewall-offline-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 +---- + +.. To allow internal traffic from pods through the network gateway: ++ +[source, terminal] +---- +$ sudo firewall-offline-cmd --permanent --zone=trusted --add-source=169.254.169.1 +---- + +. To use custom values for the IP address range: ++ +[source,terminal] +---- +$ sudo firewall-offline-cmd --permanent --zone=trusted --add-source= +---- + +. To allow internal traffic from pods through the network gateway: ++ +[source,terminal] +---- +$ sudo firewall-offline-cmd --permanent --zone=trusted --add-source=169.254.169.1 +---- + +. Reload the firewall rules: ++ +[source, terminal] +---- +$ sudo firewall-cmd --reload +---- + +[id="microshift-firewall-optional-settings_{context}"] +== Optional settings + +.Procedure + +. To add customized ports to your firewall configuration, use the following command syntax: ++ +[source,terminal] +---- +$ sudo firewall-offline-cmd --permanent --zone=public --add-port=/ +---- ++ +.Optional ports +[option="header"] +|=== +|Port(s)|Protocol(s)|Description + +|80 +|TCP +|HTTP port used to serve applications through the {ocp} router. + +|443 +|TCP +|HTTPS port used to serve applications through the {ocp} router. + +|5353 +|UDP +|mDNS service to respond for {ocp} route mDNS hosts. + +|30000-32767 +|TCP +|Port range reserved for NodePort services; can be used to expose applications on the LAN. + +|30000-32767 +|UDP +|Port range reserved for NodePort services; can be used to expose applications on the LAN. + +|6443 +|TCP +|HTTPS API port for the {product-title} API. +|=== + +=== Known firewall issue +To avoid breaking traffic flows with a firewall restart, execute firewall commands before starting OVN-Kubernetes pods. OVN-Kubernetes makes use of iptable rules for some traffic flows, such as those using the NodePort service. The iptable rules are generated and inserted by the `ovnkube-master` container, but are deleted when the firewall restarts. The absence of the iptable rules breaks traffic flows. If firewall commands have to be executed after OVN-Kubernetes pods have started, manually restart the `ovnkube-master` pod to trigger a reconciliation of the iptable rules. +//See Troubleshooting for a detailed procedure. Need hard link to troubleshooting section + +[id="microshift-firewall-applying-settings_{context}"] +== Applying firewall settings +After you have finished configuring, run the following command to restart the firewall and apply settings: + +[source,terminal] +---- +$ sudo firewall-offline-cmd --reload +---- + +//Q: How do we verify? What should we see after running this command? diff --git a/modules/microshift-http-proxy.adoc b/modules/microshift-http-proxy.adoc new file mode 100644 index 0000000000..8b63980228 --- /dev/null +++ b/modules/microshift-http-proxy.adoc @@ -0,0 +1,12 @@ +// Module included in the following assemblies: +// +// * microshift_networking/microshift-networking.adoc + +:_content-type: CONCEPT +[id="microshift-http-proxy_{context}"] +== Deploying {product-title} behind an HTTP(S) proxy +Deploy a {product-title} cluster behind an HTTP(S) proxy when you want to add basic anonymity and security measures to your pods. + +You must configure the host operating system to use the proxy service with all components initiating HTTP(S) requests when deploying {product-title} behind a proxy. + +All the user-specific workloads or pods with egress traffic, such as accessing cloud services, must be configured to use the proxy. There is no built-in transparent proxying of egress traffic in {product-title}. diff --git a/modules/microshift-man-config-ovs-bridge.adoc b/modules/microshift-man-config-ovs-bridge.adoc new file mode 100644 index 0000000000..85bc6ade19 --- /dev/null +++ b/modules/microshift-man-config-ovs-bridge.adoc @@ -0,0 +1,38 @@ +//FIXME: need updated config procedure for customers that will persist across reboots + +//=== Manually configuring OVS bridge br-ex +//.Procedure +//Manually configure the OVS bridge br-ex by running the following commands. + +//* Initiate OVS: +//+ +//[source, terminal] +//---- +//$ sudo systemctl enable openvswitch --now +//---- +//* Add the network bridge: +//+ +//[source, terminal] +//---- +//$ sudo ovs-vsctl add-br br-ex +//---- +//* Add the interface to the network bridge: +//+ +//[source, terminal] +//---- +//$ sudo ovs-vsctl add-port br-ex +//---- +//The `` is the network interface name where the node IP address is assigned. +//* Get the bridge up and running: +//+ +//[source, terminal] +//---- +//$ sudo ip link set br-ex up +//---- +//* After `br-ex up` is running, assign the node IP address to `br-ex` bridge: +//[source, terminal] +//---- +//$ sudo ... +//---- +//[NOTE] +//Adding a physical interface to `br-ex` bridge will disconnect the ssh connection on the node IP address. \ No newline at end of file diff --git a/modules/microshift-ovs-snapshot.adoc b/modules/microshift-ovs-snapshot.adoc new file mode 100644 index 0000000000..2bd67b02bb --- /dev/null +++ b/modules/microshift-ovs-snapshot.adoc @@ -0,0 +1,54 @@ +// Module included in the following assemblies: +// +// * microshift_networking/microshift-networking.adoc + +:_content-type: PROCEDURE +[id="microshift-OVS-snapshot_{context}"] +== Getting a snapshot of OVS interfaces from a running cluster +.Procedure +To see a snapshot of OVS interfaces from a running {product-title} cluster, use the following command: + +[source, terminal] +---- +$ sudo ovs-vsctl show +---- + +.Example OVS interfaces in a running cluster +[source, terminal] +---- +9d9f5ea2-9d9d-4e34-bbd2-dbac154fdc93 + Bridge br-ex + Port enp1s0 + Interface enp1s0 + type: system + Port br-ex + Interface br-ex + type: internal + Port patch-br-ex_localhost.localdomain-to-br-int <1> + Interface patch-br-ex_localhost.localdomain-to-br-int + type: patch + options: {peer=patch-br-int-to-br-ex_localhost.localdomain} <1> + Bridge br-int + fail_mode: secure + datapath_type: system + Port patch-br-int-to-br-ex_localhost.localdomain + Interface patch-br-int-to-br-ex_localhost.localdomain + type: patch + options: {peer=patch-br-ex_localhost.localdomain-to-br-int} + Port eebee1ce5568761 + Interface eebee1ce5568761 <2> + Port b47b1995ada84f4 + Interface b47b1995ada84f4 <2> + Port "3031f43d67c167f" + Interface "3031f43d67c167f" <2> + Port br-int + Interface br-int + type: internal + Port ovn-k8s-mp0 <3> + Interface ovn-k8s-mp0 + type: internal + ovs_version: "2.17.3" +---- +<1> The `patch-br-ex_localhost.localdomain-to-br-int` and `patch-br-int-to-br-ex_localhost.localdomain` are OVS patch ports that connect `br-ex` and `br-int`. +<2> The pod interfaces `eebee1ce5568761`, `b47b1995ada84f4` and `3031f43d67c167f` are named with the first 15 bits of pod sandbox ID and are plugged in the `br-int` bridge. +<3> The OVS internal port for hairpin traffic,`ovn-k8s-mp0` is created by the `ovnkube-master` container. diff --git a/modules/microshift-rpm-ostree.adoc b/modules/microshift-rpm-ostree.adoc new file mode 100644 index 0000000000..4d8371c299 --- /dev/null +++ b/modules/microshift-rpm-ostree.adoc @@ -0,0 +1,32 @@ +// Module included in the following assemblies: +// +// * microshift_networking/microshift-networking.adoc + +:_content-type: PROCEDURE +[id="microshift-rpm-ostree-package-system_{context}"] +== rpm-ostree image and package system +To use the HTTP(S) proxy in rpm-ostree, set the `http_proxy environment` variable for the `rpm-ostreed` service. + +.Procedure +. Add the following setting to the `/etc/systemd/system/rpm-ostreed.service.d/00-proxy.conf` file: ++ +[source, terminal] +---- +Environment="http_proxy=http://$PROXY_USER:$PROXY_PASSWORD@$PROXY_SERVER:$PROXY_PORT/" +---- + +. Next, reload the configuration settings and restart the service to apply your changes. + +.. Reload the configuration settings: ++ +[source, terminal] +---- +$ sudo systemctl daemon-reload +---- +.. Restart the rpm-ostree service: ++ +[source, terminal] +---- +$ sudo systemctl restart rpm-ostreed.service +---- +//Q: Instructions for how to test that the proxy works by booting the image, verifying that MicroShift starts, and that their application is accessible? diff --git a/modules/microshift-troubleshooting-nodeport.adoc b/modules/microshift-troubleshooting-nodeport.adoc new file mode 100644 index 0000000000..4ac7fd532b --- /dev/null +++ b/modules/microshift-troubleshooting-nodeport.adoc @@ -0,0 +1,35 @@ +// Module included in the following assemblies: +// +// * microshift_troubleshooting/microshift-known-issues.adoc + +:_content-type: PROCEDURE +[id="microshift-troubleshooting-nodeport_{context}"] += Troubleshooting the NodePort service +OVN-Kubernetes sets up an iptable chain in the NAT table to handle incoming traffic to the NodePort service. When the NodePort service is not reachable or the connection is refused, check the iptable rules on the host to make sure the relevant rules are properly inserted. +//procedure here + +Example iptable rules for the NodePort service: + +[source, terminal] +---- +$ iptables-save | grep NODEPORT +---- +.Example output +[source, terminal] +---- +-A OUTPUT -j OVN-KUBE-NODEPORT +-A OVN-KUBE-NODEPORT -p tcp -m addrtype --dst-type LOCAL -m tcp --dport 30326 -j DNAT --to-destination 10.43.95.170:80 +---- + +OVN-Kubernetes configures the OVN-KUBE-NODEPORT chain in iptable NAT table to match the destination port and DNATs the packet to the backend clusterIP service. The DNATed packet is then routed to the OVN network through gateway bridge br-ex via routing rules on the host: + +[source, terminal] +---- +$ ip route +---- +.Example output +[source, terminal] +---- +10.43.0.0/16 via 192.168.122.1 dev br-ex mtu 1400 +---- +This routing rule matches the Kubernetes service IP address range and forwards the packet to the gateway bridge `br-ex`. You must enable `ip_forward` on the host. After the packet is forwarded to the OVS bridge `br-ex`, it is handled by openflow rules in OVS which steers the packet to the OVN network and eventually to the pod. diff --git a/modules/microshift-version-api.adoc b/modules/microshift-version-api.adoc index b2873cfd33..ea000ee4ec 100644 --- a/modules/microshift-version-api.adoc +++ b/modules/microshift-version-api.adoc @@ -4,7 +4,6 @@ :_content-type: PROCEDURE [id="microshift-version-api_{context}"] - = Checking the {product-title} version using the API .Procedure