From 751b6a31d7070890efd96d6a9a90a9985247384f Mon Sep 17 00:00:00 2001 From: dfitzmau Date: Mon, 22 Dec 2025 16:29:00 +0000 Subject: [PATCH] OSDOCS-17013-bm-upi-4: CQAs bare metal UPI doc --- ...ing-bare-metal-network-customizations.adoc | 4 + .../upi/installing-bare-metal.adoc | 4 + ...alling-restricted-networks-bare-metal.adoc | 4 + .../installing-ibm-power.adoc | 4 + ...talling-restricted-networks-ibm-power.adoc | 4 + .../upi/installing-ibm-z-kvm.adoc | 2 + .../upi/installing-ibm-z-lpar.adoc | 2 + .../upi/installing-ibm-z-reqs.adoc | 2 + .../upi/installing-ibm-z.adoc | 2 + ...talling-restricted-networks-ibm-z-kvm.adoc | 2 + ...alling-restricted-networks-ibm-z-lpar.adoc | 2 + .../installing-restricted-networks-ibm-z.adoc | 2 + .../upi/upi-ibm-z-preparing-to-install.adoc | 3 - ...installing-openstack-installer-custom.adoc | 2 + .../installing-platform-agnostic.adoc | 4 + .../installing-two-node-fencing.adoc | 2 + .../upi/upi-vsphere-installation-reqs.adoc | 4 + .../upi/upi-vsphere-preparing-to-install.adoc | 2 + modules/cluster-entitlements.adoc | 1 + ...manifest-file-customized-br-ex-bridge.adoc | 1 + ...machine-sets-compute-nodes-networking.adoc | 9 +- modules/csr-management.adoc | 1 + modules/enabling-OVS-balance-slb-mode.adoc | 46 +++-- .../installation-dns-user-infra-example.adoc | 172 ++++++++++++++++++ modules/installation-dns-user-infra.adoc | 131 +------------ ...ion-load-balancing-user-infra-example.adoc | 118 ++++++++++++ ...nstallation-load-balancing-user-infra.adoc | 133 ++------------ .../installation-machine-requirements.adoc | 3 + ...llation-minimum-resource-requirements.adoc | 3 +- 29 files changed, 400 insertions(+), 269 deletions(-) create mode 100644 modules/installation-dns-user-infra-example.adoc create mode 100644 modules/installation-load-balancing-user-infra-example.adoc diff --git a/installing/installing_bare_metal/upi/installing-bare-metal-network-customizations.adoc b/installing/installing_bare_metal/upi/installing-bare-metal-network-customizations.adoc index d2cb45ef008e..119b2917b9d6 100644 --- a/installing/installing_bare_metal/upi/installing-bare-metal-network-customizations.adoc +++ b/installing/installing_bare_metal/upi/installing-bare-metal-network-customizations.adoc @@ -67,10 +67,14 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+2] include::modules/installation-dns-user-infra.adoc[leveloffset=+2] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+3] + * xref:../../../installing/installing_bare_metal/upi/installing-bare-metal-network-customizations.adoc#installation-user-provisioned-validating-dns_installing-bare-metal-network-customizations[Validating DNS resolution for user-provisioned infrastructure] include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+3] + // Creating a manifest object that includes a customized `br-ex` bridge include::modules/creating-manifest-file-customized-br-ex-bridge.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal/upi/installing-bare-metal.adoc b/installing/installing_bare_metal/upi/installing-bare-metal.adoc index dc45a26f266b..c3ebda7ab080 100644 --- a/installing/installing_bare_metal/upi/installing-bare-metal.adoc +++ b/installing/installing_bare_metal/upi/installing-bare-metal.adoc @@ -69,6 +69,8 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+2] include::modules/installation-dns-user-infra.adoc[leveloffset=+2] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+3] + [role="_additional-resources"] .Additional resources @@ -77,6 +79,8 @@ include::modules/installation-dns-user-infra.adoc[leveloffset=+2] // Load balancing requirements for user-provisioned infrastructure include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+3] + // Creating a manifest object that includes a customized `br-ex` bridge include::modules/creating-manifest-file-customized-br-ex-bridge.adoc[leveloffset=+1] diff --git a/installing/installing_bare_metal/upi/installing-restricted-networks-bare-metal.adoc b/installing/installing_bare_metal/upi/installing-restricted-networks-bare-metal.adoc index 43ddcc7ada69..1f67411923d6 100644 --- a/installing/installing_bare_metal/upi/installing-restricted-networks-bare-metal.adoc +++ b/installing/installing_bare_metal/upi/installing-restricted-networks-bare-metal.adoc @@ -79,6 +79,8 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+2] include::modules/installation-dns-user-infra.adoc[leveloffset=+2] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+3] + [role="_additional-resources"] .Additional resources @@ -86,6 +88,8 @@ include::modules/installation-dns-user-infra.adoc[leveloffset=+2] include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+3] + // Creating a manifest object that includes a customized `br-ex` bridge include::modules/creating-manifest-file-customized-br-ex-bridge.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_power/installing-ibm-power.adoc b/installing/installing_ibm_power/installing-ibm-power.adoc index 2d05bdb593e7..8f8a0e278258 100644 --- a/installing/installing_ibm_power/installing-ibm-power.adoc +++ b/installing/installing_ibm_power/installing-ibm-power.adoc @@ -62,8 +62,12 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+2] include::modules/installation-dns-user-infra.adoc[leveloffset=+2] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+3] + include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+3] + include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc b/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc index 05529ce9631c..690db0e05efd 100644 --- a/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc +++ b/installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc @@ -70,8 +70,12 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+2] include::modules/installation-dns-user-infra.adoc[leveloffset=+2] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+3] + include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+3] + include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] diff --git a/installing/installing_ibm_z/upi/installing-ibm-z-kvm.adoc b/installing/installing_ibm_z/upi/installing-ibm-z-kvm.adoc index 9e6be9574b87..cf5ca2896135 100644 --- a/installing/installing_ibm_z/upi/installing-ibm-z-kvm.adoc +++ b/installing/installing_ibm_z/upi/installing-ibm-z-kvm.adoc @@ -33,6 +33,8 @@ Be sure to also review this site list if you are configuring a proxy. include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+2] + include::modules/installation-initializing-manual.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/installing/installing_ibm_z/upi/installing-ibm-z-lpar.adoc b/installing/installing_ibm_z/upi/installing-ibm-z-lpar.adoc index 3a88458d2c07..ca7ae0a56269 100644 --- a/installing/installing_ibm_z/upi/installing-ibm-z-lpar.adoc +++ b/installing/installing_ibm_z/upi/installing-ibm-z-lpar.adoc @@ -31,6 +31,8 @@ Be sure to also review this site list if you are configuring a proxy. include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+2] + include::modules/installation-initializing-manual.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/installing/installing_ibm_z/upi/installing-ibm-z-reqs.adoc b/installing/installing_ibm_z/upi/installing-ibm-z-reqs.adoc index 55f925fa204d..0b52f91542c6 100644 --- a/installing/installing_ibm_z/upi/installing-ibm-z-reqs.adoc +++ b/installing/installing_ibm_z/upi/installing-ibm-z-reqs.adoc @@ -49,4 +49,6 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+2] include::modules/installation-dns-user-infra.adoc[leveloffset=+2] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+3] + include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] \ No newline at end of file diff --git a/installing/installing_ibm_z/upi/installing-ibm-z.adoc b/installing/installing_ibm_z/upi/installing-ibm-z.adoc index 5f35725fc0d2..4d6d4a59ee19 100644 --- a/installing/installing_ibm_z/upi/installing-ibm-z.adoc +++ b/installing/installing_ibm_z/upi/installing-ibm-z.adoc @@ -32,6 +32,8 @@ Be sure to also review this site list if you are configuring a proxy. include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+2] + include::modules/installation-initializing-manual.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-kvm.adoc b/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-kvm.adoc index 19a3138d4b8c..4479e81182e2 100644 --- a/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-kvm.adoc +++ b/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-kvm.adoc @@ -41,6 +41,8 @@ include::modules/installation-about-restricted-network.adoc[leveloffset=+1] include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+2] + include::modules/installation-initializing-manual.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-lpar.adoc b/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-lpar.adoc index 7ce8b7f107e9..3ad02ef6ac75 100644 --- a/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-lpar.adoc +++ b/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-lpar.adoc @@ -39,6 +39,8 @@ include::modules/installation-about-restricted-network.adoc[leveloffset=+1] include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+2] + include::modules/installation-initializing-manual.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z.adoc b/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z.adoc index 67bb6699ba0e..ad0ef11e78b3 100644 --- a/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z.adoc +++ b/installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z.adoc @@ -40,6 +40,8 @@ include::modules/installation-about-restricted-network.adoc[leveloffset=+1] include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+2] + include::modules/installation-initializing-manual.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/installing/installing_ibm_z/upi/upi-ibm-z-preparing-to-install.adoc b/installing/installing_ibm_z/upi/upi-ibm-z-preparing-to-install.adoc index 89cccfb542a8..f337ffd61a7f 100644 --- a/installing/installing_ibm_z/upi/upi-ibm-z-preparing-to-install.adoc +++ b/installing/installing_ibm_z/upi/upi-ibm-z-preparing-to-install.adoc @@ -45,9 +45,6 @@ include::modules/cli-installing-cli-macos.adoc[leveloffset=+1] include::modules/ssh-agent-using.adoc[leveloffset=+1] -//verify how to get in differences between kvm and zvm, lpar -// include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] //verify telemetry only connected do we want it here diff --git a/installing/installing_openstack/installing-openstack-installer-custom.adoc b/installing/installing_openstack/installing-openstack-installer-custom.adoc index cb2ed8749589..bd63f90eec87 100644 --- a/installing/installing_openstack/installing-openstack-installer-custom.adoc +++ b/installing/installing_openstack/installing-openstack-installer-custom.adoc @@ -26,6 +26,8 @@ include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+3] + include::modules/cluster-entitlements.adoc[leveloffset=+1] include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] diff --git a/installing/installing_platform_agnostic/installing-platform-agnostic.adoc b/installing/installing_platform_agnostic/installing-platform-agnostic.adoc index 3f710d7cf122..591090cfe872 100644 --- a/installing/installing_platform_agnostic/installing-platform-agnostic.adoc +++ b/installing/installing_platform_agnostic/installing-platform-agnostic.adoc @@ -50,8 +50,12 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+2] include::modules/installation-dns-user-infra.adoc[leveloffset=+2] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+3] + include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+3] + include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] diff --git a/installing/installing_two_node_cluster/installing_tnf/installing-two-node-fencing.adoc b/installing/installing_two_node_cluster/installing_tnf/installing-two-node-fencing.adoc index f68cc641ae4d..b35b410d1c56 100644 --- a/installing/installing_two_node_cluster/installing_tnf/installing-two-node-fencing.adoc +++ b/installing/installing_two_node_cluster/installing_tnf/installing-two-node-fencing.adoc @@ -48,6 +48,8 @@ include::modules/installation-two-node-cluster-min-resource-reqs.adoc[leveloffse // Two-node-dns-requirements - user-provisioned infrastructure include::modules/installation-dns-user-infra.adoc[leveloffset=+1] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+2] + // Two-node-dns-requirements - installer-provisioned infrastructure include::modules/installation-dns-installer-infra.adoc[leveloffset=+1] diff --git a/installing/installing_vsphere/upi/upi-vsphere-installation-reqs.adoc b/installing/installing_vsphere/upi/upi-vsphere-installation-reqs.adoc index 88d46c330c10..8444e3a8b502 100644 --- a/installing/installing_vsphere/upi/upi-vsphere-installation-reqs.adoc +++ b/installing/installing_vsphere/upi/upi-vsphere-installation-reqs.adoc @@ -59,4 +59,8 @@ include::modules/installation-network-user-infra.adoc[leveloffset=+2] include::modules/installation-dns-user-infra.adoc[leveloffset=+2] +include::modules/installation-dns-user-infra-example.adoc[leveloffset=+3] + include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] + +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+3] diff --git a/installing/installing_vsphere/upi/upi-vsphere-preparing-to-install.adoc b/installing/installing_vsphere/upi/upi-vsphere-preparing-to-install.adoc index 418375cee2d4..cf7cb78d6a78 100644 --- a/installing/installing_vsphere/upi/upi-vsphere-preparing-to-install.adoc +++ b/installing/installing_vsphere/upi/upi-vsphere-preparing-to-install.adoc @@ -39,4 +39,6 @@ include::modules/ssh-agent-using.adoc[leveloffset=+1] include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] +include::modules/installation-load-balancing-user-infra-example.adoc[leveloffset=+2] + include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] diff --git a/modules/cluster-entitlements.adoc b/modules/cluster-entitlements.adoc index bf479e0350d2..1822f04928de 100644 --- a/modules/cluster-entitlements.adoc +++ b/modules/cluster-entitlements.adoc @@ -109,6 +109,7 @@ endif::[] ifndef::openshift-origin[] = Internet access for {product-title} +[role="_abstract"] In {product-title} {product-version}, you require access to the internet to ifndef::restricted[] install diff --git a/modules/creating-manifest-file-customized-br-ex-bridge.adoc b/modules/creating-manifest-file-customized-br-ex-bridge.adoc index cb798690ccba..cd26d1636957 100644 --- a/modules/creating-manifest-file-customized-br-ex-bridge.adoc +++ b/modules/creating-manifest-file-customized-br-ex-bridge.adoc @@ -14,6 +14,7 @@ endif::[] [id="creating-manifest-file-customized-br-ex-bridge_{context}"] = Creating a manifest object that includes a customized `br-ex` bridge +[role="_abstract"] ifndef::postinstall-bare-metal[] As an alternative to using the `configure-ovs.sh` shell script to set a `br-ex` bridge on a bare-metal platform, you can create a `MachineConfig` object that includes an NMState configuration file. The host `nmstate-configuration.service` and `nmstate.service` apply the NMState configuration file to each node that runs in your cluster. endif::postinstall-bare-metal[] diff --git a/modules/creating-scaling-machine-sets-compute-nodes-networking.adoc b/modules/creating-scaling-machine-sets-compute-nodes-networking.adoc index fd255ff9c9a4..2f7112295b8d 100644 --- a/modules/creating-scaling-machine-sets-compute-nodes-networking.adoc +++ b/modules/creating-scaling-machine-sets-compute-nodes-networking.adoc @@ -11,9 +11,10 @@ [id="creating-scaling-machine-sets-compute-nodes-networking_{context}"] = Scaling each machine set to compute nodes -To apply a customized `br-ex` bridge configuration to all compute nodes in your {product-title} cluster, you must edit your `MachineConfig` custom resource (CR) and modify its roles. Additionally, you must create a `BareMetalHost` CR that defines information for your bare-metal machine, such as hostname, credentials, and so on. +[role="_abstract"] +To scale each machine set to compute nodes, you must apply a customized `br-ex` bridge configuration to all compute nodes in your {product-title} cluster. You must then edit your `MachineConfig` custom resource (CR) and modify its roles. -After you configure these resources, you must scale machine sets, so that the machine sets can apply the resource configuration to each compute node and reboot the nodes. +Additionally, you must create a `BareMetalHost` CR that defines information for your bare-metal machine, such as hostname, credentials, and so on. After you configure these resources, you must scale machine sets, so that the machine sets can apply the resource configuration to each compute node and reboot the nodes. .Prerequisites @@ -70,7 +71,7 @@ $ oc get machinesets + [source,terminal] ---- -$ oc scale machineset --replicas= <1> +$ oc scale machineset --replicas= ---- -<1> Where `` is the name of the machine set and `` is the number of compute nodes. +* : Where `` is the name of the machine set and `` is the number of compute nodes. diff --git a/modules/csr-management.adoc b/modules/csr-management.adoc index 54a563ca8fcd..448046995625 100644 --- a/modules/csr-management.adoc +++ b/modules/csr-management.adoc @@ -29,4 +29,5 @@ [id="csr-management_{context}"] = Certificate signing requests management +[role="_abstract"] Because your cluster has limited access to automatic machine management when you use infrastructure that you provision, you must provide a mechanism for approving cluster certificate signing requests (CSRs) after installation. The `kube-controller-manager` only approves the kubelet client CSRs. The `machine-approver` cannot guarantee the validity of a serving certificate that is requested by using kubelet credentials because it cannot confirm that the correct machine issued the request. You must determine and implement a method of verifying the validity of the kubelet serving certificate requests and approving them. diff --git a/modules/enabling-OVS-balance-slb-mode.adoc b/modules/enabling-OVS-balance-slb-mode.adoc index 74e8c15e7840..cbee530c3494 100644 --- a/modules/enabling-OVS-balance-slb-mode.adoc +++ b/modules/enabling-OVS-balance-slb-mode.adoc @@ -11,6 +11,7 @@ [id="enabling-OVS-balance-slb-mode_{context}"] = Enabling OVS balance-slb mode for your cluster +[role="_abstract"] You can enable the Open vSwitch (OVS) `balance-slb` mode so that two or more physical interfaces can share their network traffic. A `balance-slb` mode interface can give source load balancing (SLB) capabilities to a cluster that runs virtualization workloads, without requiring load balancing negotiation with the network switch. Currently, source load balancing runs on a bond interface, where the interface connects to an auxiliary bridge, such as `br-phy`. Source load balancing balances only across different Media Access Control (MAC) address and virtual local area network (VLAN) combinations. Note that all OVN-Kubernetes pod traffic uses the same MAC address and VLAN, so this traffic cannot be load balanced across many physical interfaces. @@ -40,7 +41,7 @@ You can integrate the `balance-slb` mode interface into primary or secondary net # ... networkConfig: interfaces: - - name: enp1s0 <1> + - name: enp1s0 type: ethernet state: up ipv4: @@ -48,17 +49,17 @@ networkConfig: enabled: true ipv6: enabled: false - - name: enp2s0 <2> + - name: enp2s0 type: ethernet state: up - mtu: 1500 <3> + mtu: 1500 ipv4: dhcp: true enabled: true ipv6: dhcp: true enabled: true - - name: enp3s0 <4> + - name: enp3s0 type: ethernet state: up mtu: 1500 @@ -68,10 +69,13 @@ networkConfig: enabled: false # ... ---- -<1> The interface for the provisioned network interface controller (NIC). -<2> The first bonded interface that pulls in the Ignition config file for the bond interface. -<3> Manually set the `br-ex` maximum transmission unit (MTU) on the bond ports. -<4> The second bonded interface is part of a minimal configuration that pulls ignition during cluster installation. ++ +where: ++ +`enp1s0`:: The interface for the provisioned network interface controller (NIC). +`enp2s0`:: The first bonded interface that pulls in the Ignition config file for the bond interface. +`mtu`:: Manually set the `br-ex` maximum transmission unit (MTU) on the bond ports. +`enp3s0`:: The second bonded interface is part of a minimal configuration that pulls ignition during cluster installation. . Define each network interface in an NMState configuration file: + @@ -98,7 +102,7 @@ interfaces: - name: br-ex type: ovs-interface state: up - mtu: 1500 <1> + mtu: 1500 ipv4: enabled: true dhcp: true @@ -156,15 +160,18 @@ interfaces: enabled: false # ... ---- -<1> Manually set the `br-ex` MTU on the bond ports. ++ +where: ++ +`mtu`:: Manually set the `br-ex` MTU on the bond ports. . Use the `base64` command to encode the interface content of the NMState configuration file: + [source,terminal] ---- -$ base64 -w0 .yml <1> +$ base64 -w0 .yml ---- -<1> Where the `-w0` option prevents line wrapping during the base64 encoding operation. +* : Where the `-w0` option prevents line wrapping during the base64 encoding operation. . Create `MachineConfig` manifest files for the `master` role and the `worker` role. Ensure that you embed the base64-encoded string from an earlier command into each `MachineConfig` manifest file. The following example manifest file configures the `master` role for all nodes that exist in a cluster. You can also create a manifest file for `master` and `worker` roles specific to a node. + @@ -175,7 +182,7 @@ kind: MachineConfig metadata: labels: machineconfiguration.openshift.io/role: master - name: 10-br-ex-master <1> + name: 10-br-ex-master spec: config: ignition: @@ -183,14 +190,17 @@ spec: storage: files: - contents: - source: data:text/plain;charset=utf-8;base64, <2> + source: data:text/plain;charset=utf-8;base64, mode: 0644 overwrite: true - path: /etc/nmstate/openshift/cluster.yml <3> + path: /etc/nmstate/openshift/cluster.yml ---- -<1> The name of the policy. -<2> Writes the encoded base64 information to the specified path. -<3> Specify the path to the `cluster.yml` file. For each node in your cluster, you can specify the short hostname path to your node, such as ``.yml. ++ +where: ++ +`name`:: The name of the policy. +`source`:: Writes the encoded base64 information to the specified path. +`path`:: Specify the path to the `cluster.yml` file. For each node in your cluster, you can specify the short hostname path to your node, such as ``.yml. . Save each `MachineConfig` manifest file to the `.//manifests` directory, where `` is the directory in which the installation program creates files. + diff --git a/modules/installation-dns-user-infra-example.adoc b/modules/installation-dns-user-infra-example.adoc new file mode 100644 index 000000000000..8348611c591d --- /dev/null +++ b/modules/installation-dns-user-infra-example.adoc @@ -0,0 +1,172 @@ +// Module included in the following assemblies: +// +// * installing/installing_bare_metal/upi/installing-bare-metal-network-customizations.adoc +// * installing/installing_bare_metal/upi/installing-bare-metal.adoc +// * installing/installing_bare_metal/upi/installing-restricted-networks-bare-metal.adoc +// * installing/installing_ibm_power/installing-ibm-power.adoc +// * installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc +// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc +// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc +// * installing/installing_ibm_z/installing-ibm-z.adoc +// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc +// * installing/installing_ibm_z/installing-ibm-z-lpar.adoc +// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-lpar.adoc +// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc +// * installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc +// * installing/installing_vmc/installing-vmc-user-infra.adoc +// * installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc +// * installing/installing_vsphere/upi/upi-vsphere-installation-reqs.adoc + +ifeval::["{context}" == "installing-ibm-z"] +:ibm-z: +endif::[] +ifeval::["{context}" == "installing-ibm-z-kvm"] +:ibm-z-kvm: +endif::[] +ifeval::["{context}" == "installing-ibm-z-lpar"] +:ibm-z: +endif::[] +ifeval::["{context}" == "installing-restricted-networks-ibm-z"] +:ibm-z: +endif::[] +ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] +:ibm-z-kvm: +endif::[] +ifeval::["{context}" == "installing-restricted-networks-ibm-z-lpar"] +:ibm-z: +endif::[] + +:_mod-docs-content-type: REFERENCE +[id="installation-dns-user-infra-example_{context}"] += Example DNS configuration for user-provisioned clusters + +[role="_abstract"] +Reference the example DNS configurations to understand how A and PTR record configuration samples meet the DNS requirements for deploying {product-title} on user-provisioned infrastructure. The samples are not meant to provide advice for choosing one DNS solution over another. + +In the examples, the cluster name is `ocp4` and the base domain is `example.com`. + +ifeval::["{context}" == "installing-two-node-fencing"] +[NOTE] +==== +In a two-node cluster with fencing, the control plane machines are also schedulable worker nodes. The DNS configuration must therefore include only the two control plane nodes. If you later add compute machines, provide corresponding A and PTR records for them as in a standard user-provisioned installation. +==== +endif::[] + +The following example is a BIND zone file that shows sample DNS A records for name resolution in a user-provisioned cluster: + +[source,text] +---- +$TTL 1W +@ IN SOA ns1.example.com. root ( + 2019070700 ; serial + 3H ; refresh (3 hours) + 30M ; retry (30 minutes) + 2W ; expiry (2 weeks) + 1W ) ; minimum (1 week) + IN NS ns1.example.com. + IN MX 10 smtp.example.com. +; +; +ns1.example.com. IN A 192.168.1.5 +smtp.example.com. IN A 192.168.1.5 +; +helper.example.com. IN A 192.168.1.5 +helper.ocp4.example.com. IN A 192.168.1.5 +; +api.ocp4.example.com. IN A 192.168.1.5 +api-int.ocp4.example.com. IN A 192.168.1.5 +; +*.apps.ocp4.example.com. IN A 192.168.1.5 +; +bootstrap.ocp4.example.com. IN A 192.168.1.96 +; +control-plane0.ocp4.example.com. IN A 192.168.1.97 +control-plane1.ocp4.example.com. IN A 192.168.1.98 +; +ifeval::["{context}" != "installing-two-node-fencing"] +control-plane2.ocp4.example.com. IN A 192.168.1.99 +; +compute0.ocp4.example.com. IN A 192.168.1.11 +compute1.ocp4.example.com. IN A 192.168.1.7 +endif::[] +; +;EOF +---- + +* `api.ocp4.example.com.`: Provides name resolution for the Kubernetes API. The record refers to the IP address of the API load balancer. +* `api-int.ocp4.example.com.`: Provides name resolution for the Kubernetes API. The record refers to the IP address of the API load balancer and is used for internal cluster communications. +* `*.apps.ocp4.example.com.`: Provides name resolution for the wildcard routes. The record refers to the IP address of the application ingress load balancer. The application ingress load balancer targets the machines that run the Ingress Controller pods. ++ +[NOTE] +==== +In the example, the same load balancer is used for the Kubernetes API and application ingress traffic. In production scenarios, you can deploy the API and application ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. +==== ++ +* `bootstrap.ocp4.example.com.`: Provides name resolution for the bootstrap machine. +* `control-plane0.ocp4.example.com.`: Provides name resolution for the control plane machines. +ifeval::["{context}" != "installing-two-node-fencing"] +* `compute0.ocp4.example.com.`: Provides name resolution for the compute machines. +endif::[] + +The following example BIND zone file shows sample PTR records for reverse name resolution in a user-provisioned cluster: + +[source,text] +---- +$TTL 1W +@ IN SOA ns1.example.com. root ( + 2019070700 ; serial + 3H ; refresh (3 hours) + 30M ; retry (30 minutes) + 2W ; expiry (2 weeks) + 1W ) ; minimum (1 week) + IN NS ns1.example.com. +; +5.1.168.192.in-addr.arpa. IN PTR api.ocp4.example.com. +5.1.168.192.in-addr.arpa. IN PTR api-int.ocp4.example.com. +; +96.1.168.192.in-addr.arpa. IN PTR bootstrap.ocp4.example.com. +; +97.1.168.192.in-addr.arpa. IN PTR control-plane0.ocp4.example.com. +98.1.168.192.in-addr.arpa. IN PTR control-plane1.ocp4.example.com. +; +ifeval::["{context}" != "installing-two-node-fencing"] +99.1.168.192.in-addr.arpa. IN PTR control-plane2.ocp4.example.com. +; +11.1.168.192.in-addr.arpa. IN PTR compute0.ocp4.example.com. +7.1.168.192.in-addr.arpa. IN PTR compute1.ocp4.example.com. +endif::[] +; +;EOF +---- + +* `api.ocp4.example.com.`: Provides reverse DNS resolution for the Kubernetes API. The PTR record refers to the record name of the API load balancer. +* `api-int.ocp4.example.com.`: Provides reverse DNS resolution for the Kubernetes API. The PTR record refers to the record name of the API load balancer and is used for internal cluster communications. +* `bootstrap.ocp4.example.com.`: Provides reverse DNS resolution for the bootstrap machine. +* `control-plane0.ocp4.example.com.`: Provides rebootstrap.ocp4.example.com.verse DNS resolution for the control plane machines. +ifeval::["{context}" != "installing-two-node-fencing"] +* `compute0.ocp4.example.com.`: Provides reverse DNS resolution for the compute machines. +endif::[] + +[NOTE] +==== +A PTR record is not required for the {product-title} application wildcard. +==== + +ifeval::["{context}" == "installing-ibm-z"] +:!ibm-z: +endif::[] +ifeval::["{context}" == "installing-ibm-z-kvm"] +:!ibm-z-kvm: +endif::[] +ifeval::["{context}" == "installing-ibm-z-lpar"] +:!ibm-z: +endif::[] +ifeval::["{context}" == "installing-restricted-networks-ibm-z"] +:!ibm-z: +endif::[] +ifeval::["{context}" == "installing-restricted-networks-ibm-z-kvm"] +:!ibm-z-kvm: +endif::[] +ifeval::["{context}" == "installing-restricted-networks-ibm-z-lpar"] +:!ibm-z: +endif::[] diff --git a/modules/installation-dns-user-infra.adoc b/modules/installation-dns-user-infra.adoc index 0320d75734a0..87affb37c7db 100644 --- a/modules/installation-dns-user-infra.adoc +++ b/modules/installation-dns-user-infra.adoc @@ -42,7 +42,10 @@ endif::[] [id="installation-dns-user-infra_{context}"] = User-provisioned DNS requirements -In {product-title} deployments, DNS name resolution is required for the following components: +[role="_abstract"] +In {product-title} deployments, you must ensure cluster components meet certain DNS name resolution criteria for internal communication, certificate validation, and automated node discover purposes. + +These components are listed as follows: * The Kubernetes API * The {product-title} application wildcard @@ -134,132 +137,6 @@ You can use the `dig` command to verify name and reverse name resolution. See th ==== -[id="installation-dns-user-infra-example_{context}"] -== Example DNS configuration for user-provisioned clusters - -This section provides A and PTR record configuration samples that meet the DNS requirements for deploying {product-title} on user-provisioned infrastructure. The samples are not meant to provide advice for choosing one DNS solution over another. - -In the examples, the cluster name is `ocp4` and the base domain is `example.com`. - -ifeval::["{context}" == "installing-two-node-fencing"] -[NOTE] -==== -In a two-node cluster with fencing, the control plane machines are also schedulable worker nodes. The DNS configuration must therefore include only the two control plane nodes. If you later add compute machines, provide corresponding A and PTR records for them as in a standard user-provisioned installation. -==== -endif::[] - -.Example DNS A record configuration for a user-provisioned cluster - -The following example is a BIND zone file that shows sample A records for name resolution in a user-provisioned cluster. - -.Sample DNS zone database -[%collapsible] -==== -[source,text] ----- -$TTL 1W -@ IN SOA ns1.example.com. root ( - 2019070700 ; serial - 3H ; refresh (3 hours) - 30M ; retry (30 minutes) - 2W ; expiry (2 weeks) - 1W ) ; minimum (1 week) - IN NS ns1.example.com. - IN MX 10 smtp.example.com. -; -; -ns1.example.com. IN A 192.168.1.5 -smtp.example.com. IN A 192.168.1.5 -; -helper.example.com. IN A 192.168.1.5 -helper.ocp4.example.com. IN A 192.168.1.5 -; -api.ocp4.example.com. IN A 192.168.1.5 -api-int.ocp4.example.com. IN A 192.168.1.5 -; -*.apps.ocp4.example.com. IN A 192.168.1.5 -; -bootstrap.ocp4.example.com. IN A 192.168.1.96 -; -control-plane0.ocp4.example.com. IN A 192.168.1.97 -control-plane1.ocp4.example.com. IN A 192.168.1.98 -; -ifeval::["{context}" != "installing-two-node-fencing"] -control-plane2.ocp4.example.com. IN A 192.168.1.99 -; -compute0.ocp4.example.com. IN A 192.168.1.11 -compute1.ocp4.example.com. IN A 192.168.1.7 -endif::[] -; -;EOF ----- - -* `api.ocp4.example.com.`: Provides name resolution for the Kubernetes API. The record refers to the IP address of the API load balancer. -* `api-int.ocp4.example.com.`: Provides name resolution for the Kubernetes API. The record refers to the IP address of the API load balancer and is used for internal cluster communications. -* `*.apps.ocp4.example.com.`: Provides name resolution for the wildcard routes. The record refers to the IP address of the application ingress load balancer. The application ingress load balancer targets the machines that run the Ingress Controller pods. -+ -[NOTE] -===== -In the example, the same load balancer is used for the Kubernetes API and application ingress traffic. In production scenarios, you can deploy the API and application ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. -===== -+ -* `bootstrap.ocp4.example.com.`: Provides name resolution for the bootstrap machine. -* `control-plane0.ocp4.example.com.`: Provides name resolution for the control plane machines. -ifeval::["{context}" != "installing-two-node-fencing"] -* `compute0.ocp4.example.com.`: Provides name resolution for the compute machines. -endif::[] -==== - -.Example DNS PTR record configuration for a user-provisioned cluster - -The following example BIND zone file shows sample PTR records for reverse name resolution in a user-provisioned cluster. - -.Sample DNS zone database for reverse records -[%collapsible] -==== -[source,text] ----- -$TTL 1W -@ IN SOA ns1.example.com. root ( - 2019070700 ; serial - 3H ; refresh (3 hours) - 30M ; retry (30 minutes) - 2W ; expiry (2 weeks) - 1W ) ; minimum (1 week) - IN NS ns1.example.com. -; -5.1.168.192.in-addr.arpa. IN PTR api.ocp4.example.com. -5.1.168.192.in-addr.arpa. IN PTR api-int.ocp4.example.com. -; -96.1.168.192.in-addr.arpa. IN PTR bootstrap.ocp4.example.com. -; -97.1.168.192.in-addr.arpa. IN PTR control-plane0.ocp4.example.com. -98.1.168.192.in-addr.arpa. IN PTR control-plane1.ocp4.example.com. -; -ifeval::["{context}" != "installing-two-node-fencing"] -99.1.168.192.in-addr.arpa. IN PTR control-plane2.ocp4.example.com. -; -11.1.168.192.in-addr.arpa. IN PTR compute0.ocp4.example.com. -7.1.168.192.in-addr.arpa. IN PTR compute1.ocp4.example.com. -endif::[] -; -;EOF ----- - -* `api.ocp4.example.com.`: Provides reverse DNS resolution for the Kubernetes API. The PTR record refers to the record name of the API load balancer. -* `api-int.ocp4.example.com.`: Provides reverse DNS resolution for the Kubernetes API. The PTR record refers to the record name of the API load balancer and is used for internal cluster communications. -* `bootstrap.ocp4.example.com.`: Provides reverse DNS resolution for the bootstrap machine. -* `control-plane0.ocp4.example.com.`: Provides rebootstrap.ocp4.example.com.verse DNS resolution for the control plane machines. -ifeval::["{context}" != "installing-two-node-fencing"] -* `compute0.ocp4.example.com.`: Provides reverse DNS resolution for the compute machines. -endif::[] -==== - -[NOTE] -==== -A PTR record is not required for the {product-title} application wildcard. -==== - ifeval::["{context}" == "installing-ibm-z"] :!ibm-z: endif::[] diff --git a/modules/installation-load-balancing-user-infra-example.adoc b/modules/installation-load-balancing-user-infra-example.adoc new file mode 100644 index 000000000000..284f047321ef --- /dev/null +++ b/modules/installation-load-balancing-user-infra-example.adoc @@ -0,0 +1,118 @@ +// Module included in the following assemblies: +// +// * installing/installing_bare_metal/upi/installing-bare-metal.adoc +// * installing/installing_bare_metal/upi/installing-bare-metal-network-customizations.adoc +// * installing/installing_bare_metal/upi/installing-restricted-networks-bare-metal.adoc +// * installing/installing_platform_agnostic/installing-platform-agnostic.adoc +// * installing/installing_ibm_z/installing-ibm-z.adoc +// * installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc +// * installing/installing_ibm_z/installing-ibm-z-kvm.adoc +// * installing/installing_ibm_z/installing-ibm-z-lpar.adoc +// * installing/installing_ibm_z/installing-restricted-networks-ibm-z-lpar.adoc +// * installing/installing_ibm_z/installing-ibm-power.adoc +// * installing/installing_ibm_z/installing-restricted-networks-ibm-power.adoc +// * installing/installing_openstack/installing-openstack-installer-custom.adoc + +ifeval::["{context}" == "installing-openstack-installer-custom"] +:user-managed-lb: +endif::[] + +:_mod-docs-content-type: CONCEPT +[id="installation-load-balancing-user-infra-example_{context}"] +ifndef::user-managed-lb[] += Example load balancer configuration for user-provisioned clusters + +[role="_abstract"] +Reference the example API and application Ingress load balancer configuration so that you can understand how to meet the load balancing requirements for user-provisioned clusters. + +The sample is an `/etc/haproxy/haproxy.cfg` configuration for an HAProxy load balancer. The example is not meant to provide advice for choosing one load balancing solution over another. +endif::user-managed-lb[] + +ifdef::user-managed-lb[] += Example load balancer configuration for clusters that are deployed with user-managed load balancers + +This section provides an example API and application Ingress load balancer configuration that meets the load balancing requirements for clusters that are deployed with user-managed load balancers. The sample is an `/etc/haproxy/haproxy.cfg` configuration for an HAProxy load balancer. The example is not meant to provide advice for choosing one load balancing solution over another. +endif::user-managed-lb[] + +In the example, the same load balancer is used for the Kubernetes API and application ingress traffic. In production scenarios, you can deploy the API and application ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. + +[NOTE] +==== +If you are using HAProxy as a load balancer and SELinux is set to `enforcing`, you must ensure that the HAProxy service can bind to the configured TCP port by running `setsebool -P haproxy_connect_any=1`. +==== + +.Sample API and application Ingress load balancer configuration +[source,text] +---- +global + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon +defaults + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +listen api-server-6443 + bind *:6443 + mode tcp + option httpchk GET /readyz HTTP/1.0 + option log-health-checks + balance roundrobin + server bootstrap bootstrap.ocp4.example.com:6443 verify none check check-ssl inter 10s fall 2 rise 3 backup + server master0 master0.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 + server master1 master1.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 + server master2 master2.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.ocp4.example.com:22623 check inter 1s backup + server master0 master0.ocp4.example.com:22623 check inter 1s + server master1 master1.ocp4.example.com:22623 check inter 1s + server master2 master2.ocp4.example.com:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute0 compute0.ocp4.example.com:443 check inter 1s + server compute1 compute1.ocp4.example.com:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute0 compute0.ocp4.example.com:80 check inter 1s + server compute1 compute1.ocp4.example.com:80 check inter 1s +---- + +where: + +`listen api-server-6443`:: Port `6443` handles the Kubernetes API traffic and points to the control plane machines. +`server bootstrap bootstrap.ocp4.example.com`:: The bootstrap entries must be in place before the {product-title} cluster installation and they must be removed after the bootstrap process is complete. +`listen machine-config-server`:: Port `22623` handles the machine config server traffic and points to the control plane machines. +`listen ingress-router-443`:: Port `443` handles the HTTPS traffic and points to the machines that run the Ingress Controller pods. The Ingress Controller pods run on the compute machines by default. +`listen ingress-router-80`:: Port `80` handles the HTTP traffic and points to the machines that run the Ingress Controller pods. The Ingress Controller pods run on the compute machines by default. ++ +[NOTE] +==== +If you are deploying a three-node cluster with zero compute nodes, the Ingress Controller pods run on the control plane nodes. In three-node cluster deployments, you must configure your application Ingress load balancer to route HTTP and HTTPS traffic to the control plane nodes. +==== + +[TIP] +==== +If you are using HAProxy as a load balancer, you can check that the `haproxy` process is listening on ports `6443`, `22623`, `443`, and `80` by running `netstat -nltupe` on the HAProxy node. +==== + +ifeval::["{context}" == "installing-openstack-installer-custom"] +:!user-managed-lb: +endif::[] diff --git a/modules/installation-load-balancing-user-infra.adoc b/modules/installation-load-balancing-user-infra.adoc index 3bf5b5957731..222f81cb0628 100644 --- a/modules/installation-load-balancing-user-infra.adoc +++ b/modules/installation-load-balancing-user-infra.adoc @@ -21,6 +21,7 @@ endif::[] [id="installation-load-balancing-user-infra_{context}"] = Load balancing requirements for user-provisioned infrastructure +[role="_abstract"] ifndef::user-managed-lb[] Before you install {product-title}, you must provision the API and application Ingress load balancing infrastructure. In production scenarios, you can deploy the API and application Ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. endif::user-managed-lb[] @@ -36,21 +37,18 @@ If you want to deploy the API and application Ingress load balancers with a {op- The load balancing infrastructure must meet the following requirements: -. *API load balancer*: Provides a common endpoint for users, both human and machine, to interact with and configure the platform. Configure the following conditions: -+ --- - ** Layer 4 load balancing only. This can be referred to as Raw TCP or SSL Passthrough mode. - ** A stateless load balancing algorithm. The options vary based on the load balancer implementation. --- -+ +* API load balancer: Provides a common endpoint for users, both human and machine, to interact with and configure the platform. Configure the following conditions: + +** Layer 4 load balancing only. This can be referred to as Raw TCP or SSL Passthrough mode. +** A stateless load balancing algorithm. The options vary based on the load balancer implementation. + [IMPORTANT] ==== Do not configure session persistence for an API load balancer. Configuring session persistence for a Kubernetes API server might cause performance issues from excess application traffic for your {product-title} cluster and the Kubernetes API that runs inside the cluster. ==== -+ -Configure the following ports on both the front and back of the load balancers: -+ -.API load balancer + +Configure the following ports on both the front and back of the API load balancers: + [cols="2,5,^2,^2,2",options="header"] |=== @@ -86,23 +84,19 @@ error or becomes healthy, the endpoint must have been removed or added. Probing every 5 or 10 seconds, with two successful requests to become healthy and three to become unhealthy, are well-tested values. ==== -+ -. *Application Ingress load balancer*: Provides an ingress point for application traffic flowing in from outside the cluster. A working configuration for the Ingress router is required for an {product-title} cluster. -+ -Configure the following conditions: -+ --- - ** Layer 4 load balancing only. This can be referred to as Raw TCP or SSL Passthrough mode. - ** A connection-based or session-based persistence is recommended, based on the options available and types of applications that will be hosted on the platform. --- -+ + +* Application Ingress load balancer: Provides an ingress point for application traffic flowing in from outside the cluster. A working configuration for the Ingress router is required for an {product-title} cluster. Configure the following conditions: + +** Layer 4 load balancing only. This can be referred to as Raw TCP or SSL Passthrough mode. +** A connection-based or session-based persistence is recommended, based on the options available and types of applications that will be hosted on the platform. + [TIP] ==== If the true IP address of the client can be seen by the application Ingress load balancer, enabling source IP-based session persistence can improve performance for applications that use end-to-end TLS encryption. ==== -+ + Configure the following ports on both the front and back of the load balancers: -+ + .Application Ingress load balancer [cols="2,5,^2,^2,2",options="header"] |=== @@ -126,103 +120,10 @@ Configure the following ports on both the front and back of the load balancers: |HTTP traffic |=== -+ -[NOTE] -==== -If you are deploying a three-node cluster with zero compute nodes, the Ingress Controller pods run on the control plane nodes. In three-node cluster deployments, you must configure your application Ingress load balancer to route HTTP and HTTPS traffic to the control plane nodes. -==== - -[id="installation-load-balancing-user-infra-example_{context}"] -ifndef::user-managed-lb[] -== Example load balancer configuration for user-provisioned clusters - -This section provides an example API and application Ingress load balancer configuration that meets the load balancing requirements for user-provisioned clusters. The sample is an `/etc/haproxy/haproxy.cfg` configuration for an HAProxy load balancer. The example is not meant to provide advice for choosing one load balancing solution over another. -endif::user-managed-lb[] - -ifdef::user-managed-lb[] -== Example load balancer configuration for clusters that are deployed with user-managed load balancers - -This section provides an example API and application Ingress load balancer configuration that meets the load balancing requirements for clusters that are deployed with user-managed load balancers. The sample is an `/etc/haproxy/haproxy.cfg` configuration for an HAProxy load balancer. The example is not meant to provide advice for choosing one load balancing solution over another. -endif::user-managed-lb[] - -In the example, the same load balancer is used for the Kubernetes API and application ingress traffic. In production scenarios, you can deploy the API and application ingress load balancers separately so that you can scale the load balancer infrastructure for each in isolation. [NOTE] ==== -If you are using HAProxy as a load balancer and SELinux is set to `enforcing`, you must ensure that the HAProxy service can bind to the configured TCP port by running `setsebool -P haproxy_connect_any=1`. -==== - -.Sample API and application Ingress load balancer configuration -[%collapsible] -==== -[source,text] ----- -global - log 127.0.0.1 local2 - pidfile /var/run/haproxy.pid - maxconn 4000 - daemon -defaults - mode http - log global - option dontlognull - option http-server-close - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 -listen api-server-6443 <1> - bind *:6443 - mode tcp - option httpchk GET /readyz HTTP/1.0 - option log-health-checks - balance roundrobin - server bootstrap bootstrap.ocp4.example.com:6443 verify none check check-ssl inter 10s fall 2 rise 3 backup <2> - server master0 master0.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 - server master1 master1.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 - server master2 master2.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 -listen machine-config-server-22623 <3> - bind *:22623 - mode tcp - server bootstrap bootstrap.ocp4.example.com:22623 check inter 1s backup <2> - server master0 master0.ocp4.example.com:22623 check inter 1s - server master1 master1.ocp4.example.com:22623 check inter 1s - server master2 master2.ocp4.example.com:22623 check inter 1s -listen ingress-router-443 <4> - bind *:443 - mode tcp - balance source - server compute0 compute0.ocp4.example.com:443 check inter 1s - server compute1 compute1.ocp4.example.com:443 check inter 1s -listen ingress-router-80 <5> - bind *:80 - mode tcp - balance source - server compute0 compute0.ocp4.example.com:80 check inter 1s - server compute1 compute1.ocp4.example.com:80 check inter 1s ----- - -<1> Port `6443` handles the Kubernetes API traffic and points to the control plane machines. -<2> The bootstrap entries must be in place before the {product-title} cluster installation and they must be removed after the bootstrap process is complete. -<3> Port `22623` handles the machine config server traffic and points to the control plane machines. -<4> Port `443` handles the HTTPS traffic and points to the machines that run the Ingress Controller pods. The Ingress Controller pods run on the compute machines by default. -<5> Port `80` handles the HTTP traffic and points to the machines that run the Ingress Controller pods. The Ingress Controller pods run on the compute machines by default. -+ -[NOTE] -===== If you are deploying a three-node cluster with zero compute nodes, the Ingress Controller pods run on the control plane nodes. In three-node cluster deployments, you must configure your application Ingress load balancer to route HTTP and HTTPS traffic to the control plane nodes. -===== -==== - -[TIP] -==== -If you are using HAProxy as a load balancer, you can check that the `haproxy` process is listening on ports `6443`, `22623`, `443`, and `80` by running `netstat -nltupe` on the HAProxy node. ==== ifeval::["{context}" == "installing-openstack-installer-custom"] diff --git a/modules/installation-machine-requirements.adoc b/modules/installation-machine-requirements.adoc index b091d8bf3ba1..a59e83817b0e 100644 --- a/modules/installation-machine-requirements.adoc +++ b/modules/installation-machine-requirements.adoc @@ -39,6 +39,9 @@ endif::[] [id="installation-machine-requirements_{context}"] = Required machines for cluster installation +[role="_abstract"] +You must specify the minimum required machines or hosts for your cluster so that your cluster remains stable if a node fails. + The smallest {product-title} clusters require the following hosts: [IMPORTANT] diff --git a/modules/installation-minimum-resource-requirements.adoc b/modules/installation-minimum-resource-requirements.adoc index f8881fda51e0..c9a93f355909 100644 --- a/modules/installation-minimum-resource-requirements.adoc +++ b/modules/installation-minimum-resource-requirements.adoc @@ -115,7 +115,8 @@ endif::[] [id="installation-minimum-resource-requirements_{context}"] = Minimum resource requirements for cluster installation -Each cluster machine must meet the following minimum requirements: +[role="_abstract"] +When creating a cluster, the cluster must meet minimum requirements so that the cluster runs as expected. .Minimum resource requirements [cols="2,2,2,2,2,2",options="header"]