diff --git a/.gitignore b/.gitignore index d23661ad..83aae1f5 100644 --- a/.gitignore +++ b/.gitignore @@ -39,3 +39,8 @@ coverage.txt junit.xml .DS_Store + +# Local cluster artifacts +capl-cluster-manifests.yaml +*-kubeconfig.yaml +.opencode/ diff --git a/Makefile b/Makefile index 2ade9d75..fa146237 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,8 @@ LOCALBIN ?= $(CACHE_BIN) DEVBOX_BIN ?= $(DEVBOX_PACKAGES_DIR)/bin HELM ?= $(LOCALBIN)/helm HELM_VERSION ?= v3.16.3 +CLUSTERCTL ?= $(LOCALBIN)/clusterctl +CLUSTERCTL_VERSION ?= v1.12.2 GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint GOLANGCI_LINT_NILAWAY ?= $(CACHE_BIN)/golangci-lint-nilaway @@ -26,13 +28,13 @@ SUBNET_MANIFEST_NAME ?= subnet-testing-manifests K8S_VERSION ?= "v1.31.2" # renovate: datasource=github-tags depName=kubernetes-sigs/cluster-api -CAPI_VERSION ?= "v1.8.5" +CAPI_VERSION ?= "v1.12.3" # renovate: datasource=github-tags depName=kubernetes-sigs/cluster-api-addon-provider-helm -CAAPH_VERSION ?= "v0.2.1" +CAAPH_VERSION ?= "v0.6.1" # renovate: datasource=github-tags depName=linode/cluster-api-provider-linode -CAPL_VERSION ?= "v0.8.5" +CAPL_VERSION ?= "v0.10.2" # renovate: datasource=github-tags depName=golangci/golangci-lint GOLANGCI_LINT_VERSION ?= "v2.11.3" @@ -163,23 +165,24 @@ run-debug: build mgmt-and-capl-cluster: docker-setup mgmt-cluster capl-cluster .PHONY: capl-cluster -capl-cluster: generate-capl-cluster-manifests create-capl-cluster patch-linode-ccm +capl-cluster: generate-capl-cluster-manifests create-capl-cluster .PHONY: generate-capl-cluster-manifests -generate-capl-cluster-manifests: +generate-capl-cluster-manifests: clusterctl # Create the CAPL cluster manifests without any CSI driver stuff - LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) VPC_NAME=$(VPC_NAME) clusterctl generate cluster $(CLUSTER_NAME) \ + LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) VPC_NAME=$(VPC_NAME) $(CLUSTERCTL) generate cluster $(CLUSTER_NAME) \ --kubernetes-version $(K8S_VERSION) --infrastructure linode-linode:$(CAPL_VERSION) \ - --control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) > $(MANIFEST_NAME).yaml - yq -i e 'select(.kind == "LinodeVPC").spec.subnets = [{"ipv4": "10.0.0.0/8", "label": "default"}, {"ipv4": "172.16.0.0/16", "label": "testing"}]' $(MANIFEST_NAME).yaml + --control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) --flavor kubeadm-dual-stack > $(MANIFEST_NAME).yaml + yq -i e 'select(.kind == "LinodeVPC").spec.ipv6Range = [{"range": "auto"}] | select(.kind == "LinodeVPC").spec.subnets = [{"ipv4": "10.0.0.0/8", "label": "default", "ipv6Range": [{"range": "auto"}]}, {"ipv4": "172.16.0.0/16", "label": "testing", "ipv6Range": [{"range": "auto"}]}]' $(MANIFEST_NAME).yaml + IMG_TAG=$${IMG##*:} CILIUM_HOST_FIREWALL=$$'hostFirewall:\n enabled: false' yq -i e '(select(.kind == "HelmChartProxy" and .spec.chartName == "ccm-linode").spec.valuesTemplate |= sub("tag: [^\\n]+"; "tag: " + strenv(IMG_TAG))) | (select(.kind == "HelmChartProxy" and .spec.chartName == "ccm-linode").spec.valuesTemplate |= sub("pullPolicy: [^\\n]+"; "pullPolicy: Always")) | (select(.kind == "HelmChartProxy" and .spec.chartName == "cilium").spec.valuesTemplate |= sub("hostFirewall:\\n enabled: true"; strenv(CILIUM_HOST_FIREWALL)))' $(MANIFEST_NAME).yaml .PHONY: create-capl-cluster -create-capl-cluster: +create-capl-cluster: clusterctl # Create a CAPL cluster with updated CCM and wait for it to be ready kubectl apply -f $(MANIFEST_NAME).yaml kubectl wait --for=condition=ControlPlaneReady cluster/$(CLUSTER_NAME) --timeout=600s || (kubectl get cluster -o yaml; kubectl get linodecluster -o yaml; kubectl get linodemachines -o yaml; kubectl logs -n capl-system deployments/capl-controller-manager --tail=50) kubectl wait --for=condition=NodeHealthy=true machines -l cluster.x-k8s.io/cluster-name=$(CLUSTER_NAME) --timeout=900s - clusterctl get kubeconfig $(CLUSTER_NAME) > $(KUBECONFIG_PATH) + $(CLUSTERCTL) get kubeconfig $(CLUSTER_NAME) > $(KUBECONFIG_PATH) KUBECONFIG=$(KUBECONFIG_PATH) kubectl wait --for=condition=Ready nodes --all --timeout=600s # Remove all taints from control plane node so that pods scheduled on it by tests can run (without this, some tests fail) KUBECONFIG=$(KUBECONFIG_PATH) kubectl taint nodes -l node-role.kubernetes.io/control-plane node-role.kubernetes.io/control-plane- @@ -187,15 +190,16 @@ create-capl-cluster: .PHONY: patch-linode-ccm patch-linode-ccm: KUBECONFIG=$(KUBECONFIG_PATH) kubectl patch -n kube-system daemonset ccm-linode --type='json' -p="[{'op': 'replace', 'path': '/spec/template/spec/containers/0/image', 'value': '${IMG}'}]" + KUBECONFIG=$(KUBECONFIG_PATH) kubectl patch -n kube-system daemonset ccm-linode --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Always"}]' KUBECONFIG=$(KUBECONFIG_PATH) kubectl patch -n kube-system daemonset ccm-linode --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/env/-", "value": {"name": "LINODE_API_VERSION", "value": "v4beta"}}]' KUBECONFIG=$(KUBECONFIG_PATH) kubectl rollout status -n kube-system daemonset/ccm-linode --timeout=600s KUBECONFIG=$(KUBECONFIG_PATH) kubectl -n kube-system get daemonset/ccm-linode -o yaml .PHONY: mgmt-cluster -mgmt-cluster: +mgmt-cluster: clusterctl # Create a mgmt cluster ctlptl apply -f e2e/setup/ctlptl-config.yaml - clusterctl init \ + $(CLUSTERCTL) init \ --wait-providers \ --wait-provider-timeout 600 \ --core cluster-api:$(CAPI_VERSION) \ @@ -248,7 +252,6 @@ e2e-test-subnet: # Create the second cluster MANIFEST_NAME=$(SUBNET_MANIFEST_NAME) CLUSTER_NAME=$(SUBNET_CLUSTER_NAME) KUBECONFIG_PATH=$(SUBNET_KUBECONFIG_PATH) \ make create-capl-cluster - KUBECONFIG_PATH=$(SUBNET_KUBECONFIG_PATH) make patch-linode-ccm # Run chainsaw test LINODE_TOKEN=$(LINODE_TOKEN) \ LINODE_URL=$(LINODE_URL) \ @@ -295,13 +298,13 @@ helm-template: helm .PHONY: kubectl kubectl: $(KUBECTL) ## Download kubectl locally if necessary. $(KUBECTL): $(LOCALBIN) - curl -fsSL https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(OS)/$(ARCH_SHORT)/kubectl -o $(KUBECTL) + curl -fsSL https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(HOSTOS)/$(ARCH_SHORT)/kubectl -o $(KUBECTL) chmod +x $(KUBECTL) .PHONY: clusterctl clusterctl: $(CLUSTERCTL) ## Download clusterctl locally if necessary. $(CLUSTERCTL): $(LOCALBIN) - curl -fsSL https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CLUSTERCTL_VERSION)/clusterctl-$(OS)-$(ARCH_SHORT) -o $(CLUSTERCTL) + curl -fsSL https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CLUSTERCTL_VERSION)/clusterctl-$(HOSTOS)-$(ARCH_SHORT) -o $(CLUSTERCTL) chmod +x $(CLUSTERCTL) .phony: golangci-lint-nilaway diff --git a/cloud/annotations/annotations.go b/cloud/annotations/annotations.go index 197542bb..5e608294 100644 --- a/cloud/annotations/annotations.go +++ b/cloud/annotations/annotations.go @@ -41,6 +41,8 @@ const ( // AnnLinodeEnableIPv6Ingress is the annotation used to specify that a service should include both IPv4 and IPv6 // addresses for its LoadBalancer ingress. When set to "true", both addresses will be included in the status. AnnLinodeEnableIPv6Ingress = "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress" + // AnnLinodeEnableIPv6Backends controls whether a NodeBalancer service should use public IPv6 backend nodes. + AnnLinodeEnableIPv6Backends = "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-backends" AnnLinodeNodePrivateIP = "node.k8s.linode.com/private-ip" AnnLinodeHostUUID = "node.k8s.linode.com/host-uuid" diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index b97d6399..2750441b 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -469,9 +469,9 @@ func (l *loadbalancers) updateNodeBalancer( } } oldNBNodeIDs := make(map[string]int) + var currentNBNodes []linodego.NodeBalancerNode if currentNBCfg != nil { // Obtain list of current NB nodes and convert it to map of node IDs - var currentNBNodes []linodego.NodeBalancerNode currentNBNodes, err = l.client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil) if err != nil { // This error can be ignored, because if we fail to get nodes we can anyway rebuild the config from scratch, @@ -485,42 +485,20 @@ func (l *loadbalancers) updateNodeBalancer( } else { klog.Infof("No preexisting nodebalancer for port %v found.", port.Port) } + + useIPv6Backends := resolveIPv6NodeBalancerBackendState(service) // Add all of the Nodes to the config - newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes)) - subnetID := 0 - if options.Options.NodeBalancerBackendIPv4SubnetID != 0 { - subnetID = options.Options.NodeBalancerBackendIPv4SubnetID - } - backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] - if ok { - if err = validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { - return err - } - } - if len(options.Options.VPCNames) > 0 && !options.Options.DisableNodeBalancerVPCBackends { - var id int - id, err = l.getSubnetIDForSVC(ctx, service) - if err != nil { - sentry.CaptureError(ctx, err) - return fmt.Errorf("Error getting subnet ID for service %s: %w", service.Name, err) - } - subnetID = id + subnetID, err := l.getBackendSubnetID(ctx, service, useIPv6Backends) + if err != nil { + sentry.CaptureError(ctx, err) + return fmt.Errorf("Error getting subnet ID for service %s: %w", service.Name, err) } - for _, node := range nodes { - var newNodeOpts *linodego.NodeBalancerConfigRebuildNodeOptions - newNodeOpts, err = l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, newNBCfg.Protocol) - if err != nil { - sentry.CaptureError(ctx, err) - return fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) - } - oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address] - if ok { - newNodeOpts.ID = oldNodeID - } else { - klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address) - } - newNBNodes = append(newNBNodes, *newNodeOpts) + newNBNodes, err := l.buildNodeBalancerConfigNodes(service, nodes, port.NodePort, subnetID, useIPv6Backends, newNBCfg.Protocol, oldNBNodeIDs) + if err != nil { + sentry.CaptureError(ctx, err) + return fmt.Errorf("[port %d] error building NodeBalancer backend node configs: %w", int(port.Port), err) } + // If there's no existing config, create it var rebuildOpts linodego.NodeBalancerConfigRebuildOptions if currentNBCfg == nil { @@ -582,7 +560,8 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri serviceWithStatus := service.DeepCopy() serviceWithStatus.Status.LoadBalancer, err = l.getLatestServiceLoadBalancerStatus(ctx, service) if err != nil { - return fmt.Errorf("failed to get latest LoadBalancer status for service (%s): %w", getServiceNn(service), err) + klog.Warningf("failed to get latest LoadBalancer status for service (%s), using provided status instead: %v", getServiceNn(service), err) + serviceWithStatus.Status.LoadBalancer = service.Status.LoadBalancer } nb, err := l.getNodeBalancerForService(ctx, serviceWithStatus) @@ -945,6 +924,7 @@ func (l *loadbalancers) getSubnetIDByVPCAndSubnetNames(ctx context.Context, vpcN func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { connThrottle := getConnectionThrottle(service) + useIPv6Backends := resolveIPv6NodeBalancerBackendState(service) label := l.GetLoadBalancerName(ctx, clusterName, service) tags := l.GetLoadBalancerTags(ctx, clusterName, service) @@ -958,7 +938,7 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri Type: nbType, } - if len(options.Options.VPCNames) > 0 && !options.Options.DisableNodeBalancerVPCBackends { + if !useIPv6Backends && len(options.Options.VPCNames) > 0 && !options.Options.DisableNodeBalancerVPCBackends { createOpts.VPCs, err = l.getVPCCreateOptions(ctx, service) if err != nil { return nil, err @@ -1157,24 +1137,11 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam } ports := service.Spec.Ports configs := make([]*linodego.NodeBalancerConfigCreateOptions, 0, len(ports)) + useIPv6Backends := resolveIPv6NodeBalancerBackendState(service) - subnetID := 0 - if options.Options.NodeBalancerBackendIPv4SubnetID != 0 { - subnetID = options.Options.NodeBalancerBackendIPv4SubnetID - } - // Check for the NodeBalancerBackendIPv4Range annotation - backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] - if ok { - if err := validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { - return nil, err - } - } - if len(options.Options.VPCNames) > 0 && !options.Options.DisableNodeBalancerVPCBackends { - id, err := l.getSubnetIDForSVC(ctx, service) - if err != nil { - return nil, err - } - subnetID = id + subnetID, err := l.getBackendSubnetID(ctx, service, useIPv6Backends) + if err != nil { + return nil, err } for _, port := range ports { @@ -1185,7 +1152,7 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam createOpt := config.GetCreateOptions() for _, node := range nodes { - newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID, config.Protocol) + newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(service, node, port.NodePort, subnetID, useIPv6Backends, config.Protocol) if err != nil { sentry.CaptureError(ctx, err) return nil, fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) @@ -1210,14 +1177,14 @@ func coerceString(str string, minLen, maxLen int, padding string) string { return str } -func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) { - nodeIP, err := getNodePrivateIP(node, subnetID) +func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(service *v1.Service, node *v1.Node, nodePort int32, subnetID int, useIPv6Backends bool, protocol linodego.ConfigProtocol) (*linodego.NodeBalancerConfigRebuildNodeOptions, error) { + nodeIP, err := getNodeBackendIP(service, node, subnetID, useIPv6Backends) if err != nil { - return nil, fmt.Errorf("node %s does not have a private IP address: %w", node.Name, err) + return nil, err } nodeOptions := &linodego.NodeBalancerConfigRebuildNodeOptions{ NodeBalancerNodeCreateOptions: linodego.NodeBalancerNodeCreateOptions{ - Address: fmt.Sprintf("%v:%v", nodeIP, nodePort), + Address: formatNodeBalancerBackendAddress(nodeIP, nodePort), // NodeBalancer backends must be 3-32 chars in length // If < 3 chars, pad node name with "node-" prefix Label: coerceString(node.Name, 3, 32, "node-"), @@ -1228,12 +1195,80 @@ func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, if protocol != linodego.ProtocolUDP { nodeOptions.Mode = "accept" } - if subnetID != 0 { + if !useIPv6Backends && subnetID != 0 { nodeOptions.SubnetID = subnetID } return nodeOptions, nil } +func (l *loadbalancers) getBackendSubnetID(ctx context.Context, service *v1.Service, useIPv6Backends bool) (int, error) { + if useIPv6Backends { + return 0, nil + } + + subnetID := 0 + if options.Options.NodeBalancerBackendIPv4SubnetID != 0 { + subnetID = options.Options.NodeBalancerBackendIPv4SubnetID + } + + backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] + if ok { + if err := validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + return 0, err + } + } + + if len(options.Options.VPCNames) > 0 && !options.Options.DisableNodeBalancerVPCBackends { + id, err := l.getSubnetIDForSVC(ctx, service) + if err != nil { + return 0, err + } + subnetID = id + } + + return subnetID, nil +} + +func resolveIPv6NodeBalancerBackendState(service *v1.Service) bool { + useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Backends) + if useIPv6 != nil { + return *useIPv6 + } + + return options.Options.EnableIPv6ForNodeBalancerBackends +} + +func formatNodeBalancerBackendAddress(ip string, nodePort int32) string { + return net.JoinHostPort(ip, strconv.Itoa(int(nodePort))) +} + +func (l *loadbalancers) buildNodeBalancerConfigNodes( + service *v1.Service, + nodes []*v1.Node, + nodePort int32, + subnetID int, + useIPv6Backends bool, + protocol linodego.ConfigProtocol, + oldNBNodeIDs map[string]int, +) ([]linodego.NodeBalancerConfigRebuildNodeOptions, error) { + newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes)) + for _, node := range nodes { + newNodeOpts, err := l.buildNodeBalancerNodeConfigRebuildOptions(service, node, nodePort, subnetID, useIPv6Backends, protocol) + if err != nil { + return nil, fmt.Errorf("failed to build NodeBalancer node config options for node %s: %w", node.Name, err) + } + oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address] + if ok { + newNodeOpts.ID = oldNodeID + } else { + klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address) + } + newNBNodes = append(newNBNodes, *newNodeOpts) + } + + return newNBNodes, nil +} + func (l *loadbalancers) retrieveKubeClient() error { if l.kubeClient != nil { return nil @@ -1379,6 +1414,23 @@ func getNodePrivateIP(node *v1.Node, subnetID int) (string, error) { return "", fmt.Errorf("no internal IP found for node %s", node.Name) } +func getNodeBackendIP(service *v1.Service, node *v1.Node, subnetID int, useIPv6Backends bool) (string, error) { + if !useIPv6Backends { + return getNodePrivateIP(node, subnetID) + } + + if publicIPv6, exists := node.Annotations[annotations.AnnLinodeNodePublicIPv6]; exists { + if prefix, err := netip.ParsePrefix(publicIPv6); err == nil { + if addr := prefix.Addr(); addr.Is6() { + return addr.String(), nil + } + } + } + + klog.V(4).Infof("Service %s requested IPv6 backends but node %s does not have a public IPv6 address", getServiceNn(service), node.Name) + return "", fmt.Errorf("service %s requested IPv6 backends but node %s does not have a public IPv6 address", getServiceNn(service), node.Name) +} + func getTLSCertInfo(ctx context.Context, kubeClient kubernetes.Interface, namespace string, config portConfig) (string, string, error) { if config.TLSSecretName == "" { return "", "", fmt.Errorf("TLS secret name for port %v is not specified", config.Port) diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 3351b9fa..9f01ab97 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -8,6 +8,7 @@ import ( stderrors "errors" "fmt" "math/rand" + "net" "net/http" "net/http/httptest" "os" @@ -4075,6 +4076,390 @@ func Test_getNodePrivateIP(t *testing.T) { } } +func Test_getNodeBackendIP(t *testing.T) { + service := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc", + Namespace: "default", + }, + } + + testcases := []struct { + name string + node *v1.Node + subnetID int + useIPv6Backends bool + expectedIP string + expectErr bool + }{ + { + name: "uses existing IPv4 path for non-vpc services", + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Annotations: map[string]string{ + annotations.AnnLinodeNodePrivateIP: "192.168.10.10", + }, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: "10.0.0.2"}, + {Type: v1.NodeExternalIP, Address: "2600:3c06::1"}, + }, + }, + }, + expectedIP: "192.168.10.10", + }, + { + name: "uses public IPv6 annotation when IPv6 backends are enabled", + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Annotations: map[string]string{ + annotations.AnnLinodeNodePublicIPv6: "2600:3c06::1/128", + }, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "172.232.0.2"}, + {Type: v1.NodeExternalIP, Address: "fd00::10"}, + }, + }, + }, + useIPv6Backends: true, + expectedIP: "2600:3c06::1", + }, + { + name: "uses public IPv6 annotation even when VPC backends are enabled", + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Annotations: map[string]string{ + annotations.AnnLinodeNodePrivateIP: "192.168.10.10", + annotations.AnnLinodeNodePublicIPv6: "2600:3c06::1/128", + }, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: "10.0.0.2"}, + {Type: v1.NodeExternalIP, Address: "fd00::20"}, + }, + }, + }, + subnetID: 100, + useIPv6Backends: true, + expectedIP: "2600:3c06::1", + }, + { + name: "errors when public IPv6 annotation is missing prefix length", + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Annotations: map[string]string{ + annotations.AnnLinodeNodePublicIPv6: "2600:3c06::2", + }, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "172.232.0.2"}, + {Type: v1.NodeExternalIP, Address: "fd00::11"}, + }, + }, + }, + useIPv6Backends: true, + expectErr: true, + }, + { + name: "errors when public IPv6 annotation is not a valid IPv6 value", + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Annotations: map[string]string{ + annotations.AnnLinodeNodePublicIPv6: "not-an-ip", + }, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "172.232.0.2"}, + {Type: v1.NodeExternalIP, Address: "2600:3c06::1"}, + }, + }, + }, + useIPv6Backends: true, + expectErr: true, + }, + { + name: "errors when IPv6 backends are requested and node lacks public IPv6 annotation", + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeExternalIP, Address: "172.232.0.2"}, + {Type: v1.NodeExternalIP, Address: "2600:3c06::1"}, + }, + }, + }, + useIPv6Backends: true, + expectErr: true, + }, + } + + for _, test := range testcases { + t.Run(test.name, func(t *testing.T) { + ip, err := getNodeBackendIP(service, test.node, test.subnetID, test.useIPv6Backends) + if test.expectErr { + if err == nil { + t.Fatal("expected error") + } + return + } + + if err != nil { + t.Fatal(err) + } + + if ip != test.expectedIP { + t.Fatalf("expected backend address %q, got %q", test.expectedIP, ip) + } + }) + } +} + +func Test_resolveIPv6NodeBalancerBackendState(t *testing.T) { + prev := options.Options.EnableIPv6ForNodeBalancerBackends + defer func() { + options.Options.EnableIPv6ForNodeBalancerBackends = prev + }() + + testcases := []struct { + name string + globalFlag bool + service *v1.Service + expectedUseIPv6 bool + }{ + { + name: "service annotation enables IPv6", + globalFlag: false, + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotations.AnnLinodeEnableIPv6Backends: "true", + }, + }, + }, + expectedUseIPv6: true, + }, + { + name: "service annotation disables IPv6 even when global flag is enabled", + globalFlag: true, + service: &v1.Service{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{annotations.AnnLinodeEnableIPv6Backends: "false"}}}, + expectedUseIPv6: false, + }, + { + name: "services use global IPv6 backend flag when annotation is absent", + globalFlag: true, + service: &v1.Service{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}}, + expectedUseIPv6: true, + }, + { + name: "services do not use IPv6 when annotation is absent and global flag is disabled", + globalFlag: false, + service: &v1.Service{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{}}}, + expectedUseIPv6: false, + }, + } + + for _, test := range testcases { + t.Run(test.name, func(t *testing.T) { + options.Options.EnableIPv6ForNodeBalancerBackends = test.globalFlag + useIPv6Backends := resolveIPv6NodeBalancerBackendState(test.service) + if useIPv6Backends != test.expectedUseIPv6 { + t.Fatalf("expected useIPv6Backends=%t, got %t", test.expectedUseIPv6, useIPv6Backends) + } + }) + } +} + +func Test_buildLoadBalancerRequestOmitsVPCConfigForIPv6Backends(t *testing.T) { + prevVPCNames := options.Options.VPCNames + prevSubnetNames := options.Options.SubnetNames + prevDisableVPC := options.Options.DisableNodeBalancerVPCBackends + prevEnableIPv6Backends := options.Options.EnableIPv6ForNodeBalancerBackends + defer func() { + options.Options.VPCNames = prevVPCNames + options.Options.SubnetNames = prevSubnetNames + options.Options.DisableNodeBalancerVPCBackends = prevDisableVPC + options.Options.EnableIPv6ForNodeBalancerBackends = prevEnableIPv6Backends + }() + + options.Options.VPCNames = []string{"test-vpc"} + options.Options.SubnetNames = []string{"default"} + options.Options.DisableNodeBalancerVPCBackends = false + + testcases := []struct { + name string + globalFlag bool + annotations map[string]string + }{ + { + name: "service annotation omits VPC config for IPv6 backends", + globalFlag: false, + annotations: map[string]string{ + annotations.AnnLinodeDefaultProtocol: "tcp", + annotations.AnnLinodeEnableIPv6Backends: "true", + }, + }, + { + name: "global flag omits VPC config for IPv6 backends", + globalFlag: true, + annotations: map[string]string{ + annotations.AnnLinodeDefaultProtocol: "tcp", + }, + }, + } + + for _, test := range testcases { + t.Run(test.name, func(t *testing.T) { + options.Options.EnableIPv6ForNodeBalancerBackends = test.globalFlag + + fake := newFake(t) + ts := httptest.NewServer(fake) + defer ts.Close() + + client := linodego.NewClient(http.DefaultClient) + client.SetBaseURL(ts.URL) + lb, ok := newLoadbalancers(&client, "us-west").(*loadbalancers) + if !ok { + t.Fatal("type assertion failed") + } + + fake.vpc[1] = &linodego.VPC{ + ID: 1, + Label: "test-vpc", + Subnets: []linodego.VPCSubnet{ + { + ID: 101, + Label: "default", + IPv4: "10.0.0.0/8", + }, + }, + } + fake.subnet[101] = &linodego.VPCSubnet{ + ID: 101, + Label: "default", + IPv4: "10.0.0.0/8", + } + + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: "foobar123", + Annotations: test.annotations, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "test", + Protocol: "TCP", + Port: int32(80), + NodePort: int32(30000), + }, + }, + }, + } + nodes := []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Annotations: map[string]string{ + annotations.AnnLinodeNodePublicIPv6: "2600:3c06:e727:1::1/128", + }, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: "10.0.0.2"}, + {Type: v1.NodeExternalIP, Address: "fd00::30"}, + }, + }, + }, + } + + _, err := lb.buildLoadBalancerRequest(t.Context(), "linodelb", svc, nodes) + if err != nil { + t.Fatal(err) + } + + var req *fakeRequest + for request := range fake.requests { + if request.Method == http.MethodPost && request.Path == "/nodebalancers" { + req = &request + break + } + } + if req == nil { + t.Fatal("expected nodebalancer create request") + } + + var createOpts linodego.NodeBalancerCreateOptions + if err := json.Unmarshal([]byte(req.Body), &createOpts); err != nil { + t.Fatalf("unable to unmarshal create request body %#v, error: %#v", req.Body, err) + } + if len(createOpts.VPCs) != 0 { + t.Fatalf("expected nodebalancer create request to omit VPC config for IPv6 backends, got %#v", createOpts.VPCs) + } + if len(createOpts.Configs) != 1 || len(createOpts.Configs[0].Nodes) != 1 { + t.Fatalf("expected a single nodebalancer config with one backend node, got %#v", createOpts.Configs) + } + if createOpts.Configs[0].Nodes[0].SubnetID != 0 { + t.Fatalf("expected IPv6 backend node to omit subnet ID, got %#v", createOpts.Configs[0].Nodes[0]) + } + host, _, hostPortErr := net.SplitHostPort(createOpts.Configs[0].Nodes[0].Address) + if hostPortErr != nil { + t.Fatal(hostPortErr) + } + if parsedIP := net.ParseIP(host); parsedIP == nil || parsedIP.To4() != nil { + t.Fatalf("expected IPv6 backend node address, got %q", createOpts.Configs[0].Nodes[0].Address) + } + }) + } +} + +func Test_buildNodeBalancerNodeConfigRebuildOptionsOmitsSubnetIDForIPv6Backends(t *testing.T) { + lb := &loadbalancers{} + service := &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc-test", Namespace: "default"}} + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Annotations: map[string]string{ + annotations.AnnLinodeNodePublicIPv6: "2600:3c06:e727:1::1/128", + }, + }, + } + + opts, err := lb.buildNodeBalancerNodeConfigRebuildOptions(service, node, 30000, 101, true, linodego.ProtocolTCP) + if err != nil { + t.Fatal(err) + } + if opts.SubnetID != 0 { + t.Fatalf("expected IPv6 backend rebuild options to omit subnet ID, got %#v", opts) + } +} + +func Test_formatNodeBalancerBackendAddress(t *testing.T) { + if got := formatNodeBalancerBackendAddress("192.168.0.10", 30000); got != "192.168.0.10:30000" { + t.Fatalf("unexpected IPv4 backend address format: %s", got) + } + + got := formatNodeBalancerBackendAddress("2600:3c06::1", 30000) + host, port, err := net.SplitHostPort(got) + if err != nil { + t.Fatal(err) + } + if host != "2600:3c06::1" || port != "30000" { + t.Fatalf("unexpected IPv6 backend address format: host=%s port=%s", host, port) + } +} + func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fakeAPI) { t.Helper() diff --git a/cloud/linode/options/options.go b/cloud/linode/options/options.go index 9a72d5f2..aac562f9 100644 --- a/cloud/linode/options/options.go +++ b/cloud/linode/options/options.go @@ -30,6 +30,7 @@ var Options struct { DisableNodeBalancerVPCBackends bool GlobalStopChannel chan<- struct{} EnableIPv6ForLoadBalancers bool + EnableIPv6ForNodeBalancerBackends bool AllocateNodeCIDRs bool DisableIPv6NodeCIDRAllocation bool ClusterCIDRIPv4 string diff --git a/deploy/chart/templates/daemonset.yaml b/deploy/chart/templates/daemonset.yaml index fa9387b3..f1f37e4e 100644 --- a/deploy/chart/templates/daemonset.yaml +++ b/deploy/chart/templates/daemonset.yaml @@ -172,6 +172,9 @@ spec: {{- if .Values.enableIPv6ForLoadBalancers }} - --enable-ipv6-for-loadbalancers={{ .Values.enableIPv6ForLoadBalancers }} {{- end }} + {{- if .Values.enableIPv6ForNodeBalancerBackends }} + - --enable-ipv6-for-nodebalancer-backends={{ .Values.enableIPv6ForNodeBalancerBackends }} + {{- end }} {{- if .Values.nodeBalancerBackendIPv4Subnet }} - --nodebalancer-backend-ipv4-subnet={{ .Values.nodeBalancerBackendIPv4Subnet }} {{- end }} diff --git a/deploy/chart/values.yaml b/deploy/chart/values.yaml index 2f55f503..17c9d4ed 100644 --- a/deploy/chart/values.yaml +++ b/deploy/chart/values.yaml @@ -108,6 +108,13 @@ tolerations: # This can also be controlled per-service using the "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress" annotation # enableIPv6ForLoadBalancers: true +# Enable public IPv6 backend addresses for NodeBalancer services. +# VPC IPv6 backend addresses are not currently supported. +# When enabled globally, both newly created and existing eligible services may be reconciled to use IPv6 backends. +# Per-service behavior can be overridden with the "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-backends" annotation; set it to "false" to keep a service on IPv4 backends only. +# If your cluster uses VPC-backed NodeBalancers, the nodes must still expose public IPv6 endpoints so CCM can read the node.k8s.linode.com/public-ipv6 annotation and program IPv6 backends. +# enableIPv6ForNodeBalancerBackends: false + # disableNodeBalancerVPCBackends is used to disable the use of VPC backends for NodeBalancers. # When set to true, NodeBalancers will use linode private IPs for backends instead of VPC IPs. # disableNodeBalancerVPCBackends: false diff --git a/devbox.json b/devbox.json index 0a62b2cb..e5b8284a 100644 --- a/devbox.json +++ b/devbox.json @@ -1,7 +1,6 @@ { "packages": [ "ctlptl@latest", - "clusterctl@latest", "docker@29.2.0", "envsubst@latest", "go@1.26.0", @@ -12,7 +11,8 @@ "kustomize@5.8.0", "kyverno-chainsaw@latest", "mockgen@1.6.0", - "yq-go@4.52.2" + "yq-go@4.52.2", + "clusterctl@1.12.2" ], "shell": { "init_hook": [ diff --git a/devbox.lock b/devbox.lock index 3a655772..1267727a 100644 --- a/devbox.lock +++ b/devbox.lock @@ -1,51 +1,51 @@ { "lockfile_version": "1", "packages": { - "clusterctl@latest": { - "last_modified": "2025-05-16T20:19:48Z", - "resolved": "github:NixOS/nixpkgs/12a55407652e04dcf2309436eb06fef0d3713ef3#clusterctl", + "clusterctl@1.12.2": { + "last_modified": "2026-02-23T15:40:43Z", + "resolved": "github:NixOS/nixpkgs/80d901ec0377e19ac3f7bb8c035201e2e098cc97#clusterctl", "source": "devbox-search", - "version": "1.10.1", + "version": "1.12.2", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/yqjd06qrwmh64rfcrjsvhfkn555gnl77-clusterctl-1.10.1", + "path": "/nix/store/778jxvqkkq2417wf7khzfwm3w1w9z4n1-clusterctl-1.12.2", "default": true } ], - "store_path": "/nix/store/yqjd06qrwmh64rfcrjsvhfkn555gnl77-clusterctl-1.10.1" + "store_path": "/nix/store/778jxvqkkq2417wf7khzfwm3w1w9z4n1-clusterctl-1.12.2" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/fkdlkd4gr4jj18g1zq87yj7whq2msn2a-clusterctl-1.10.1", + "path": "/nix/store/8dkagw89sav6kk19sfbvpjqa0il0pw6l-clusterctl-1.12.2", "default": true } ], - "store_path": "/nix/store/fkdlkd4gr4jj18g1zq87yj7whq2msn2a-clusterctl-1.10.1" + "store_path": "/nix/store/8dkagw89sav6kk19sfbvpjqa0il0pw6l-clusterctl-1.12.2" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/v66xr4x87ac25k0w9lvy0x56m2ldplvs-clusterctl-1.10.1", + "path": "/nix/store/8844gbxh2cfkpgszmnwhxplm43pg7z1r-clusterctl-1.12.2", "default": true } ], - "store_path": "/nix/store/v66xr4x87ac25k0w9lvy0x56m2ldplvs-clusterctl-1.10.1" + "store_path": "/nix/store/8844gbxh2cfkpgszmnwhxplm43pg7z1r-clusterctl-1.12.2" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/zlds3iyclypjmkpbr88wkw69vg6pvmi7-clusterctl-1.10.1", + "path": "/nix/store/b9sid26w0lnafiyzxpyxcink9crcvzh6-clusterctl-1.12.2", "default": true } ], - "store_path": "/nix/store/zlds3iyclypjmkpbr88wkw69vg6pvmi7-clusterctl-1.10.1" + "store_path": "/nix/store/b9sid26w0lnafiyzxpyxcink9crcvzh6-clusterctl-1.12.2" } } }, diff --git a/docs/configuration/annotations.md b/docs/configuration/annotations.md index 69144cf3..dc708cfc 100644 --- a/docs/configuration/annotations.md +++ b/docs/configuration/annotations.md @@ -40,6 +40,7 @@ The keys and the values in [annotations must be strings](https://kubernetes.io/d | `firewall-acl` | string | | The Firewall rules to be applied to the NodeBalancer. See [Firewall Configuration](#firewall-configuration) | | `nodebalancer-type` | string | | The type of NodeBalancer to create (options: common, premium, premium_40gb). See [NodeBalancer Types](#nodebalancer-type). Note: NodeBalancer types should always be specified in lowercase. | | `enable-ipv6-ingress` | bool | `false` | When `true`, both IPv4 and IPv6 addresses will be included in the LoadBalancerStatus ingress | +| `enable-ipv6-backends` | bool | `false` | When `true`, NodeBalancer services use node public IPv6 addresses as backend targets. VPC IPv6 backend addresses are not supported. | | `backend-ipv4-range` | string | | The IPv4 range from VPC subnet to be applied to the NodeBalancer backend. See [Nodebalancer VPC Configuration](#nodebalancer-vpc-configuration) | | `backend-vpc-name` | string | | VPC which is connected to the NodeBalancer backend. See [Nodebalancer VPC Configuration](#nodebalancer-vpc-configuration) | | `backend-subnet-name` | string | | Subnet within VPC which is connected to the NodeBalancer backend. See [Nodebalancer VPC Configuration](#nodebalancer-vpc-configuration) | diff --git a/docs/configuration/environment.md b/docs/configuration/environment.md index cbed0124..31b5b4c3 100644 --- a/docs/configuration/environment.md +++ b/docs/configuration/environment.md @@ -53,6 +53,7 @@ The CCM supports the following flags: | `--nodebalancer-backend-ipv4-subnet-name` | String | `""` | ipv4 subnet name to use for NodeBalancer backends | | `--disable-nodebalancer-vpc-backends` | Boolean | `false` | don't use VPC specific ip-addresses for nodebalancer backend ips when running in VPC (set to `true` for backward compatibility if needed) | | `--enable-ipv6-for-loadbalancers` | Boolean | `false` | Set both IPv4 and IPv6 addresses for all LoadBalancer services (when disabled, only IPv4 is used). This can also be configured per-service using the `service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress` annotation. | +| `--enable-ipv6-for-nodebalancer-backends` | Boolean | `false` | Use node public IPv6 addresses for NodeBalancer service backends. VPC IPv6 backend addresses are not supported. Can also be configured per-service using the `service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-backends` annotation. | | `--node-cidr-mask-size-ipv4` | Int | `24` | ipv4 cidr mask size for pod cidrs allocated to nodes | | `--node-cidr-mask-size-ipv6` | Int | `64` | ipv6 cidr mask size for pod cidrs allocated to nodes | | `--nodebalancer-prefix` | String | `ccm` | Name prefix for NoadBalancers. | diff --git a/docs/configuration/loadbalancer.md b/docs/configuration/loadbalancer.md index 742d3e37..21c93801 100644 --- a/docs/configuration/loadbalancer.md +++ b/docs/configuration/loadbalancer.md @@ -44,6 +44,68 @@ metadata: When IPv6 is enabled (either globally or per-service), both IPv4 and IPv6 addresses will be included in the service's LoadBalancer status. +### IPv6 Backend Support + +IPv6 frontends and IPv6 backends are configured independently. Frontend IPv6 controls what the Service publishes in `status.loadBalancer.ingress`, while backend IPv6 controls which node addresses a NodeBalancer targets. + +IPv6 backends require a dual-stack workload cluster. In practice, the cluster networking stack must support IPv6 NodePort traffic, and the Service itself should be created as dual-stack. A single-stack IPv4 `LoadBalancer` Service can still be annotated for IPv6 backends, but the NodeBalancer health checks and traffic path may fail because the backend NodePort is not exposed over IPv6. + +You can enable IPv6 backends globally for NodeBalancer services: + +```yaml +spec: + template: + spec: + containers: + - name: ccm-linode + args: + - --enable-ipv6-for-nodebalancer-backends=true +``` + +Or per service: + +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-backends: "true" +``` + +When IPv6 backends are enabled: +- NodeBalancer backend targets use the node public IPv6 annotation `node.k8s.linode.com/public-ipv6` +- IPv6 NodeBalancer backends are currently supported only through node public IPv6 addresses, not VPC IPv6 backend addresses +- both VPC-backed and non-VPC-backed NodeBalancer services are affected +- when VPC-backed NodeBalancers are enabled, CCM preserves the NodeBalancer VPC configuration instead of dropping it +- enabling the global `--enable-ipv6-for-nodebalancer-backends` flag can migrate existing eligible NodeBalancer services from IPv4 to IPv6 backends during reconcile +- to keep an existing Service on IPv4 while the global flag is enabled, set `service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-backends: "false"` on that Service +- every selected backend node must have a public IPv6 address available +- if your cluster uses VPC backends, the nodes still need public IPv6 endpoints so CCM can program IPv6 NodeBalancer backends +- the workload cluster and Service must be configured for dual-stack networking +- reconciliation fails and CCM logs an error if a selected backend node does not have the required public IPv6 address + +Recommended Service configuration for IPv6 backends: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + annotations: + service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-backends: "true" +spec: + type: LoadBalancer + ipFamilyPolicy: RequireDualStack + ipFamilies: + - IPv4 + - IPv6 + ports: + - port: 80 + targetPort: 8080 + selector: + app: my-app +``` + +If your cluster does not provide IPv6-capable NodePort routing, the NodeBalancer may still be created with IPv6 backend addresses, but the backends will not become healthy. Likewise, if your cluster is using VPC backends but the nodes do not also have public IPv6 endpoints, IPv6 backend reconciliation will fail because CCM does not currently program VPC IPv6 backend addresses. + ### Basic Configuration Create a LoadBalancer service: diff --git a/e2e/test/lb-with-ipv6-backends/chainsaw-test.yaml b/e2e/test/lb-with-ipv6-backends/chainsaw-test.yaml new file mode 100644 index 00000000..e923db04 --- /dev/null +++ b/e2e/test/lb-with-ipv6-backends/chainsaw-test.yaml @@ -0,0 +1,157 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-ipv6-backends + labels: + all: +spec: + namespace: "lb-with-ipv6-backends" + catch: + - script: + content: | + set -euo pipefail + echo "Test failed. Fetching CCM logs..." + kubectl logs -n kube-system daemonsets/ccm-linode | grep "lb-with-ipv6-backends" | tail -100 + steps: + - name: Create pods and service + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Check NodeBalancer backend addresses are IPv6 + try: + - script: + content: | + set -euo pipefail + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" --fail-early --retry 3 \ + "$LINODE_URL/v4/nodebalancers/$nbid/configs") + + config_id=$(echo "$nbconfig" | jq -r '.data[] | select(.port == 80) | .id') + + nodes=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" --fail-early --retry 3 \ + "$LINODE_URL/v4/nodebalancers/$nbid/configs/$config_id/nodes") + + addresses=$(echo "$nodes" | jq -r '.data[].address') + + if [[ -z "$addresses" ]]; then + echo "NO_BACKEND_ADDRESSES" + fi + + for address in $addresses; do + if [[ $address =~ ^\[(.*)\]:([0-9]+)$ ]]; then + host="${BASH_REMATCH[1]}" + else + host="${address%:*}" + fi + + if [[ $host == *:* ]]; then + echo "$address is IPv6" + else + echo "$address is NOT IPv6" + fi + done + check: + ($error): ~ + (contains($stdout, 'NO_BACKEND_ADDRESSES')): false + (contains($stdout, 'is NOT IPv6')): false + - name: Wait for loadbalancer to start serving traffic + try: + - script: + timeout: 10m + content: | + bash -ce ' + TARGET_IP=$(kubectl get svc svc-test -n "$NAMESPACE" -o json | jq -r '"'"'.status.loadBalancer.ingress[]? | select(.ip != null and (.ip | contains(":") | not)) | .ip'"'"' | head -n1) + TARGET="http://$TARGET_IP:80" + + for i in {1..24}; do + output=$(curl -s --max-time 8 "$TARGET" | jq -r .podName 2>/dev/null || true) + if [[ "$output" == *"test-"* ]]; then + echo "loadbalancer ready" + exit 0 + fi + sleep 5 + done + + echo "loadbalancer not ready" + exit 1 + ' + check: + ($error == null): true + (contains($stdout, 'loadbalancer ready')): true + - name: Fetch loadbalancer ip and check both pods reachable + try: + - script: + timeout: 10m + content: | + bash -ce ' + TARGET_IP=$(kubectl get svc svc-test -n "$NAMESPACE" -o json | jq -r '"'"'.status.loadBalancer.ingress[]? | select(.ip != null and (.ip | contains(":") | not)) | .ip'"'"' | head -n1) + TARGET="http://$TARGET_IP:80" + podnames=() + + for i in {1..10}; do + if [[ ${#podnames[@]} -lt 2 ]]; then + output=$(curl -s --max-time 8 "$TARGET" | jq -e .podName || true) + + if [[ "$output" == *"test-"* ]]; then + unique=true + for existing in "${podnames[@]}"; do + if [[ "$existing" == "$output" ]]; then + unique=false + break + fi + done + if [[ "$unique" == true ]]; then + podnames+=("$output") + fi + fi + else + break + fi + sleep 10 + done + + if [[ ${#podnames[@]} -lt 2 ]]; then + echo "all pods failed to respond" + else + echo "all pods responded" + fi + ' + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true + - name: Delete Pods + try: + - delete: + ref: + apiVersion: v1 + kind: Pod + - name: Delete Service + try: + - delete: + ref: + apiVersion: v1 + kind: Service diff --git a/e2e/test/lb-with-ipv6-backends/create-pods-services.yaml b/e2e/test/lb-with-ipv6-backends/create-pods-services.yaml new file mode 100644 index 00000000..dcbb1962 --- /dev/null +++ b/e2e/test/lb-with-ipv6-backends/create-pods-services.yaml @@ -0,0 +1,53 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-ipv6-backends + name: test +spec: + replicas: 2 + selector: + matchLabels: + app: lb-ipv6-backends + template: + metadata: + labels: + app: lb-ipv6-backends + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-backends: "true" + labels: + app: lb-ipv6-backends +spec: + type: LoadBalancer + ipFamilyPolicy: RequireDualStack + ipFamilies: + - IPv4 + - IPv6 + selector: + app: lb-ipv6-backends + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/main.go b/main.go index d477ea80..afe5941c 100644 --- a/main.go +++ b/main.go @@ -95,6 +95,7 @@ func main() { command.Flags().StringVar(&ccmOptions.Options.NodeBalancerBackendIPv4Subnet, "nodebalancer-backend-ipv4-subnet", "", "ipv4 subnet to use for NodeBalancer backends") command.Flags().StringSliceVar(&ccmOptions.Options.NodeBalancerTags, "nodebalancer-tags", []string{}, "Linode tags to apply to all NodeBalancers") command.Flags().BoolVar(&ccmOptions.Options.EnableIPv6ForLoadBalancers, "enable-ipv6-for-loadbalancers", false, "set both IPv4 and IPv6 addresses for all LoadBalancer services (when disabled, only IPv4 is used)") + command.Flags().BoolVar(&ccmOptions.Options.EnableIPv6ForNodeBalancerBackends, "enable-ipv6-for-nodebalancer-backends", false, "use public IPv6 addresses for NodeBalancer service backends, including VPC-backed NodeBalancers (when enabled, may update existing services during reconciliation and all selected backend nodes must have public IPv6)") command.Flags().IntVar(&ccmOptions.Options.NodeCIDRMaskSizeIPv4, "node-cidr-mask-size-ipv4", 0, "ipv4 cidr mask size for pod cidrs allocated to nodes") command.Flags().IntVar(&ccmOptions.Options.NodeCIDRMaskSizeIPv6, "node-cidr-mask-size-ipv6", 0, "ipv6 cidr mask size for pod cidrs allocated to nodes") command.Flags().IntVar(&ccmOptions.Options.NodeBalancerBackendIPv4SubnetID, "nodebalancer-backend-ipv4-subnet-id", 0, "ipv4 subnet id to use for NodeBalancer backends")