diff --git a/go.mod b/go.mod index 14c3d54d1d..aab1676d03 100644 --- a/go.mod +++ b/go.mod @@ -85,6 +85,7 @@ require ( k8s.io/kops v1.33.1 k8s.io/kubelet v0.35.0 k8s.io/utils v0.0.0-20260108192941-914a6e750570 + sigs.k8s.io/gateway-api v1.4.1 sigs.k8s.io/mdtoc v1.4.0 sigs.k8s.io/yaml v1.6.0 ) @@ -193,7 +194,7 @@ require ( github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/ettle/strcase v0.2.0 // indirect github.com/evanphx/json-patch v5.9.11+incompatible // indirect @@ -212,9 +213,9 @@ require ( github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.21.2 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect @@ -346,7 +347,7 @@ require ( github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/quasilyte/go-ruleguard v0.4.5 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.23 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect @@ -430,8 +431,8 @@ require ( golang.org/x/time v0.12.0 // indirect golang.org/x/tools v0.40.0 // indirect google.golang.org/api v0.239.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect - google.golang.org/grpc v1.75.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1 // indirect + google.golang.org/grpc v1.75.1 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -441,7 +442,7 @@ require ( honnef.co/go/tools v0.6.1 // indirect k8s.io/apiserver v0.35.0 // indirect k8s.io/component-base v0.35.0 // indirect - k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect + k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect k8s.io/kubectl v0.34.0 // indirect diff --git a/go.sum b/go.sum index 8ad2ff4c5d..60d3489e0c 100644 --- a/go.sum +++ b/go.sum @@ -313,8 +313,9 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= @@ -379,15 +380,17 @@ github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonpointer v0.21.2 h1:AqQaNADVwq/VnkCmQg6ogE+M3FOsKTytwges0JdwVuA= +github.com/go-openapi/jsonpointer v0.21.2/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= @@ -695,8 +698,8 @@ github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3v github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/mgechev/revive v1.12.0 h1:Q+/kkbbwerrVYPv9d9efaPGmAO/NsxwW/nE6ahpQaCU= github.com/mgechev/revive v1.12.0/go.mod h1:VXsY2LsTigk8XU9BpZauVLjVrhICMOV3k1lpB3CXrp8= -github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= -github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -851,8 +854,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/quasilyte/go-ruleguard v0.4.5 h1:AGY0tiOT5hJX9BTdx/xBdoCubQUAE2grkqY2lSwvZcA= github.com/quasilyte/go-ruleguard v0.4.5/go.mod h1:Vl05zJ538vcEEwu16V/Hdu7IYZWyKSwIy4c88Ro1kRE= github.com/quasilyte/go-ruleguard/dsl v0.3.23 h1:lxjt5B6ZCiBeeNO8/oQsegE6fLeCzuMRoVWSkXC4uvY= @@ -1349,10 +1352,10 @@ google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuO google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c h1:qXWI/sQtv5UKboZ/zUk7h+mrf/lXORyI+n9DKDAusdg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= -google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= -google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1 h1:pmJpJEvT846VzausCQ5d7KreSROcDqmO388w5YbnltA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250826171959-ef028d996bc1/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1416,8 +1419,9 @@ k8s.io/code-generator v0.34.1 h1:WpphT26E+j7tEgIUfFr5WfbJrktCGzB3JoJH9149xYc= k8s.io/code-generator v0.34.1/go.mod h1:DeWjekbDnJWRwpw3s0Jat87c+e0TgkxoR4ar608yqvg= k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= -k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q= k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d h1:qUrYOinhdAUL0xxhA4gPqogPBaS9nIq2l2kTb6pmeB0= +k8s.io/gengo/v2 v2.0.0-20250820003526-c297c0c1eb9d/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= @@ -1443,6 +1447,8 @@ oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= sigs.k8s.io/controller-runtime v0.23.1 h1:TjJSM80Nf43Mg21+RCy3J70aj/W6KyvDtOlpKf+PupE= sigs.k8s.io/controller-runtime v0.23.1/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= +sigs.k8s.io/gateway-api v1.4.1 h1:NPxFutNkKNa8UfLd2CMlEuhIPMQgDQ6DXNKG9sHbJU8= +sigs.k8s.io/gateway-api v1.4.1/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= diff --git a/pkg/actions/cluster/delete.go b/pkg/actions/cluster/delete.go index 781b205fbf..1d52535eb7 100644 --- a/pkg/actions/cluster/delete.go +++ b/pkg/actions/cluster/delete.go @@ -13,6 +13,7 @@ import ( ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" "github.com/kris-nova/logger" @@ -59,8 +60,20 @@ func deleteSharedResources(ctx context.Context, cfg *api.ClusterConfig, ctl *eks cfg.Metadata.Version = *ctl.Status.ClusterInfo.Cluster.Version - logger.Info("cleaning up AWS load balancers created by Kubernetes objects of Kind Service or Ingress") - if err := elb.Cleanup(ctx, ctl.AWSProvider.EC2(), ctl.AWSProvider.ELB(), ctl.AWSProvider.ELBV2(), clientSet, cfg); err != nil { + // Create rest.Config for Gateway API cleanup + // If this fails, we'll pass nil and skip Gateway cleanup but continue with Service/Ingress + var restConfig *rest.Config + if ctl.KubeProvider != nil { + rawClient, err := ctl.NewRawClient(cfg) + if err != nil { + logger.Warning("failed to create Kubernetes client for Gateway API cleanup, will skip Gateway resources: %v", err) + } else if rawClient != nil { + restConfig = rawClient.RestConfig() + } + } + + logger.Info("cleaning up AWS load balancers created by Kubernetes objects of Kind Service, Ingress, or Gateway") + if err := elb.Cleanup(ctx, ctl.AWSProvider.EC2(), ctl.AWSProvider.ELB(), ctl.AWSProvider.ELBV2(), clientSet, restConfig, cfg); err != nil { return err } } diff --git a/pkg/actions/cluster/export_test.go b/pkg/actions/cluster/export_test.go index f931f0452f..6b8ef890f9 100644 --- a/pkg/actions/cluster/export_test.go +++ b/pkg/actions/cluster/export_test.go @@ -1,6 +1,7 @@ package cluster import ( + "github.com/weaveworks/eksctl/pkg/eks" "github.com/weaveworks/eksctl/pkg/kubernetes" ) @@ -24,6 +25,14 @@ func (c *OwnedCluster) SetNewNodeGroupDrainer(newNodeGroupDrainer func(kubernete c.newNodeGroupDrainer = newNodeGroupDrainer } +func (c *OwnedCluster) MockKubeProvider(k eks.KubeProvider) { + c.ctl.KubeProvider = k +} + +func (c *UnownedCluster) MockKubeProvider(k eks.KubeProvider) { + c.ctl.KubeProvider = k +} + func SetProviderConstructor(f ProviderConstructor) { newClusterProvider = f } diff --git a/pkg/actions/cluster/owned_test.go b/pkg/actions/cluster/owned_test.go index 01ddccf3e8..04b1968eb3 100644 --- a/pkg/actions/cluster/owned_test.go +++ b/pkg/actions/cluster/owned_test.go @@ -23,6 +23,7 @@ import ( "github.com/weaveworks/eksctl/pkg/cfn/manager/fakes" "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" + eksfakes "github.com/weaveworks/eksctl/pkg/eks/fakes" "github.com/weaveworks/eksctl/pkg/kubernetes" "github.com/weaveworks/eksctl/pkg/testutils" "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" @@ -119,6 +120,11 @@ var _ = Describe("Delete", func() { c := cluster.NewOwnedCluster(cfg, ctl, nil, fakeStackManager, autoModeDeleter) fakeClientSet = fake.NewSimpleClientset() + // Mock KubeProvider to return nil for NewRawClient (Gateway API not available) + fakeKubeProvider := &eksfakes.FakeKubeProvider{} + fakeKubeProvider.NewRawClientReturns(nil, nil) + c.MockKubeProvider(fakeKubeProvider) + c.SetNewClientSet(func() (kubernetes.Interface, error) { return fakeClientSet, nil }) @@ -187,6 +193,11 @@ var _ = Describe("Delete", func() { c := cluster.NewOwnedCluster(cfg, ctl, nil, fakeStackManager, autoModeDeleter) fakeClientSet = fake.NewSimpleClientset() + // Mock KubeProvider to return nil for NewRawClient (Gateway API not available) + fakeKubeProvider := &eksfakes.FakeKubeProvider{} + fakeKubeProvider.NewRawClientReturns(nil, nil) + c.MockKubeProvider(fakeKubeProvider) + c.SetNewClientSet(func() (kubernetes.Interface, error) { return fakeClientSet, nil }) @@ -252,6 +263,11 @@ var _ = Describe("Delete", func() { c := cluster.NewOwnedCluster(cfg, ctl, nil, fakeStackManager, autoModeDeleter) fakeClientSet = fake.NewSimpleClientset() + // Mock KubeProvider to return nil for NewRawClient (Gateway API not available) + fakeKubeProvider := &eksfakes.FakeKubeProvider{} + fakeKubeProvider.NewRawClientReturns(nil, nil) + c.MockKubeProvider(fakeKubeProvider) + c.SetNewClientSet(func() (kubernetes.Interface, error) { return fakeClientSet, nil }) @@ -320,6 +336,12 @@ var _ = Describe("Delete", func() { }, nil) c := cluster.NewOwnedCluster(cfg, ctl, nil, fakeStackManager, autoModeDeleter) + + // Mock KubeProvider to return nil for NewRawClient (Gateway API not available) + fakeKubeProvider := &eksfakes.FakeKubeProvider{} + fakeKubeProvider.NewRawClientReturns(nil, nil) + c.MockKubeProvider(fakeKubeProvider) + c.SetNewNodeGroupDrainer(func(clientSet kubernetes.Interface) cluster.NodeGroupDrainer { mockedDrainer := &drainerMockOwned{} mockedDrainer.On("Drain", mock.Anything).Return(nil) diff --git a/pkg/actions/cluster/unowned_test.go b/pkg/actions/cluster/unowned_test.go index 904023fd95..29ff02a5e7 100644 --- a/pkg/actions/cluster/unowned_test.go +++ b/pkg/actions/cluster/unowned_test.go @@ -25,6 +25,7 @@ import ( "github.com/weaveworks/eksctl/pkg/cfn/manager/fakes" "github.com/weaveworks/eksctl/pkg/ctl/cmdutils" "github.com/weaveworks/eksctl/pkg/eks" + eksfakes "github.com/weaveworks/eksctl/pkg/eks/fakes" "github.com/weaveworks/eksctl/pkg/kubernetes" "github.com/weaveworks/eksctl/pkg/testutils" "github.com/weaveworks/eksctl/pkg/testutils/mockprovider" @@ -154,6 +155,11 @@ var _ = Describe("Delete", func() { c := cluster.NewUnownedCluster(cfg, ctl, fakeStackManager, autoModeDeleter) fakeClientSet := fake.NewSimpleClientset() + // Mock KubeProvider to return nil for NewRawClient (Gateway API not available) + fakeKubeProvider := &eksfakes.FakeKubeProvider{} + fakeKubeProvider.NewRawClientReturns(nil, nil) + c.MockKubeProvider(fakeKubeProvider) + c.SetNewClientSet(func() (kubernetes.Interface, error) { return fakeClientSet, nil }) @@ -245,6 +251,11 @@ var _ = Describe("Delete", func() { c := cluster.NewUnownedCluster(cfg, ctl, fakeStackManager, autoModeDeleter) fakeClientSet := fake.NewSimpleClientset() + // Mock KubeProvider to return nil for NewRawClient (Gateway API not available) + fakeKubeProvider := &eksfakes.FakeKubeProvider{} + fakeKubeProvider.NewRawClientReturns(nil, nil) + c.MockKubeProvider(fakeKubeProvider) + c.SetNewClientSet(func() (kubernetes.Interface, error) { return fakeClientSet, nil }) @@ -340,6 +351,11 @@ var _ = Describe("Delete", func() { c := cluster.NewUnownedCluster(cfg, ctl, fakeStackManager, autoModeDeleter) fakeClientSet := fake.NewSimpleClientset() + // Mock KubeProvider to return nil for NewRawClient (Gateway API not available) + fakeKubeProvider := &eksfakes.FakeKubeProvider{} + fakeKubeProvider.NewRawClientReturns(nil, nil) + c.MockKubeProvider(fakeKubeProvider) + c.SetNewClientSet(func() (kubernetes.Interface, error) { return fakeClientSet, nil }) @@ -428,6 +444,12 @@ var _ = Describe("Delete", func() { p.MockEKS().On("DeleteCluster", mock.Anything, mock.Anything).Return(&awseks.DeleteClusterOutput{}, nil) c := cluster.NewUnownedCluster(cfg, ctl, fakeStackManager, autoModeDeleter) + + // Mock KubeProvider to return nil for NewRawClient (Gateway API not available) + fakeKubeProvider := &eksfakes.FakeKubeProvider{} + fakeKubeProvider.NewRawClientReturns(nil, nil) + c.MockKubeProvider(fakeKubeProvider) + err := c.Delete(context.Background(), time.Microsecond, time.Second*0, false, false, false, 1) Expect(err).NotTo(HaveOccurred()) Expect(fakeStackManager.DeleteTasksForDeprecatedStacksCallCount()).To(Equal(1)) diff --git a/pkg/awsapi/autoscaling.go b/pkg/awsapi/autoscaling.go index 5c5bcae79a..846b71754f 100644 --- a/pkg/awsapi/autoscaling.go +++ b/pkg/awsapi/autoscaling.go @@ -64,7 +64,7 @@ type ASG interface { // [AttachTrafficSources]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachTrafficSources.html // [Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html AttachLoadBalancerTargetGroups(ctx context.Context, params *autoscaling.AttachLoadBalancerTargetGroupsInput, optFns ...func(*Options)) (*autoscaling.AttachLoadBalancerTargetGroupsOutput, error) - // This API operation is superseded by [https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachTrafficSources.html], which can attach multiple traffic sources + // This API operation is superseded by [AttachTrafficSources], which can attach multiple traffic sources // types. We recommend using AttachTrafficSources to simplify how you manage // traffic sources. However, we continue to support AttachLoadBalancers . You can // use both the original AttachLoadBalancers API operation and AttachTrafficSources @@ -84,7 +84,7 @@ type ASG interface { // // [DetachLoadBalancers]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DetachLoadBalancers.html // [DescribeLoadBalancers]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeLoadBalancers.html - // [https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachTrafficSources.html]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachTrafficSources.html + // [AttachTrafficSources]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachTrafficSources.html // [Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group]: https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html AttachLoadBalancers(ctx context.Context, params *autoscaling.AttachLoadBalancersInput, optFns ...func(*Options)) (*autoscaling.AttachLoadBalancersOutput, error) // Attaches one or more traffic sources to the specified Auto Scaling group. @@ -519,7 +519,7 @@ type ASG interface { // // [AttachLoadBalancerTargetGroups]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachLoadBalancerTargetGroups.html // [DescribeLoadBalancerTargetGroups]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeLoadBalancerTargetGroups.html - // [DetachTrafficSources]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeTrafficSources.html + // [DetachTrafficSources]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DetachTrafficSources.html // [AttachTrafficSources]: https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_AttachTrafficSources.html DetachLoadBalancerTargetGroups(ctx context.Context, params *autoscaling.DetachLoadBalancerTargetGroupsInput, optFns ...func(*Options)) (*autoscaling.DetachLoadBalancerTargetGroupsOutput, error) // This API operation is superseded by [DetachTrafficSources], which can detach multiple traffic sources diff --git a/pkg/awsapi/cloudwatchlogs.go b/pkg/awsapi/cloudwatchlogs.go index 13b1dcfe46..d9baaab20d 100644 --- a/pkg/awsapi/cloudwatchlogs.go +++ b/pkg/awsapi/cloudwatchlogs.go @@ -74,10 +74,17 @@ type CloudWatchLogs interface { // [StartQuery]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html // [GetQueryResults]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html AssociateKmsKey(ctx context.Context, params *cloudwatchlogs.AssociateKmsKeyInput, optFns ...func(*Options)) (*cloudwatchlogs.AssociateKmsKeyOutput, error) + // Associates a data source with an S3 Table Integration for query access in the + // 'logs' namespace. This enables querying log data using analytics engines that + // support Iceberg such as Amazon Athena, Amazon Redshift, and Apache Spark. + AssociateSourceToS3TableIntegration(ctx context.Context, params *cloudwatchlogs.AssociateSourceToS3TableIntegrationInput, optFns ...func(*Options)) (*cloudwatchlogs.AssociateSourceToS3TableIntegrationOutput, error) // Cancels the specified export task. // // The task must be in the PENDING or RUNNING state. CancelExportTask(ctx context.Context, params *cloudwatchlogs.CancelExportTaskInput, optFns ...func(*Options)) (*cloudwatchlogs.CancelExportTaskOutput, error) + // Cancels an active import task and stops importing data from the CloudTrail Lake + // Event Data Store. + CancelImportTask(ctx context.Context, params *cloudwatchlogs.CancelImportTaskInput, optFns ...func(*Options)) (*cloudwatchlogs.CancelImportTaskOutput, error) // Creates a delivery. A delivery is a connection between a logical delivery // source and a logical delivery destination that you have already created. // @@ -149,6 +156,64 @@ type CloudWatchLogs interface { // [DescribeExportTasks]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeExportTasks.html // [Real-time processing of log data with subscriptions]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Subscriptions.html CreateExportTask(ctx context.Context, params *cloudwatchlogs.CreateExportTaskInput, optFns ...func(*Options)) (*cloudwatchlogs.CreateExportTaskOutput, error) + // Starts an import from a data source to CloudWatch Log and creates a managed log + // group as the destination for the imported data. Currently, [CloudTrail Event Data Store]is the only + // supported data source. + // + // The import task must satisfy the following constraints: + // + // - The specified source must be in an ACTIVE state. + // + // - The API caller must have permissions to access the data in the provided + // source and to perform iam:PassRole on the provided import role which has the + // same permissions, as described below. + // + // - The provided IAM role must trust the "cloudtrail.amazonaws.com" principal + // and have the following permissions: + // + // - cloudtrail:GetEventDataStoreData + // + // - logs:CreateLogGroup + // + // - logs:CreateLogStream + // + // - logs:PutResourcePolicy + // + // - (If source has an associated AWS KMS Key) kms:Decrypt + // + // - (If source has an associated AWS KMS Key) kms:GenerateDataKey + // + // Example IAM policy for provided import role: + // + // [ { "Effect": "Allow", "Action": "iam:PassRole", "Resource": + // + // "arn:aws:iam::123456789012:role/apiCallerCredentials", "Condition": { + // "StringLike": { "iam:AssociatedResourceARN": + // "arn:aws:logs:us-east-1:123456789012:log-group:aws/cloudtrail/f1d45bff-d0e3-4868-b5d9-2eb678aa32fb:*" + // } } }, { "Effect": "Allow", "Action": [ "cloudtrail:GetEventDataStoreData" ], + // "Resource": [ + // "arn:aws:cloudtrail:us-east-1:123456789012:eventdatastore/f1d45bff-d0e3-4868-b5d9-2eb678aa32fb" + // ] }, { "Effect": "Allow", "Action": [ "logs:CreateImportTask", + // "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutResourcePolicy" ], + // "Resource": [ "arn:aws:logs:us-east-1:123456789012:log-group:/aws/cloudtrail/*" + // ] }, { "Effect": "Allow", "Action": [ "kms:Decrypt", "kms:GenerateDataKey" ], + // "Resource": [ + // "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012" ] + // } ] + // + // - If the import source has a customer managed key, the + // "cloudtrail.amazonaws.com" principal needs permissions to perform kms:Decrypt + // and kms:GenerateDataKey. + // + // - There can be no more than 3 active imports per account at a given time. + // + // - The startEventTime must be less than or equal to endEventTime. + // + // - The data being imported must be within the specified source's retention + // period. + // + // [CloudTrail Event Data Store]: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-event-data-store.html + CreateImportTask(ctx context.Context, params *cloudwatchlogs.CreateImportTaskInput, optFns ...func(*Options)) (*cloudwatchlogs.CreateImportTaskOutput, error) // Creates an anomaly detector that regularly scans one or more log groups and // look for patterns and anomalies in the logs. // @@ -231,13 +296,17 @@ type CloudWatchLogs interface { // // - Don't use ':' (colon) or '*' (asterisk) characters. CreateLogStream(ctx context.Context, params *cloudwatchlogs.CreateLogStreamInput, optFns ...func(*Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) - // Creates a new Scheduled Query that runs CloudWatch Logs Insights queries on a - // schedule and delivers results to specified destinations. + // Creates a scheduled query that runs CloudWatch Logs Insights queries at regular + // intervals. Scheduled queries enable proactive monitoring by automatically + // executing queries to detect patterns and anomalies in your log data. Query + // results can be delivered to Amazon S3 for analysis or further processing. CreateScheduledQuery(ctx context.Context, params *cloudwatchlogs.CreateScheduledQueryInput, optFns ...func(*Options)) (*cloudwatchlogs.CreateScheduledQueryOutput, error) // Deletes a CloudWatch Logs account policy. This stops the account-wide policy - // from applying to log groups in the account. If you delete a data protection - // policy or subscription filter policy, any log-group level policies of those - // types remain in effect. + // from applying to log groups or data sources in the account. If you delete a data + // protection policy or subscription filter policy, any log-group level policies of + // those types remain in effect. This operation supports deletion of data + // source-based field index policies, including facet configurations, in addition + // to log group-based policies. // // To use this operation, you must be signed on with the correct permissions // depending on the type of policy that you are deleting. @@ -254,6 +323,11 @@ type CloudWatchLogs interface { // - To delete a field index policy, you must have the logs:DeleteIndexPolicy and // logs:DeleteAccountPolicy permissions. // + // If you delete a field index policy that included facet configurations, those + // + // facets will no longer be available for interactive exploration in the CloudWatch + // Logs Insights console. However, facet data is retained for up to 30 days. + // // If you delete a field index policy, the indexing of the log events that // happened before you deleted the policy will still be used for up to 30 days to // improve CloudWatch Logs Insights queries. @@ -302,14 +376,21 @@ type CloudWatchLogs interface { // will still be used for as many as 30 days to improve CloudWatch Logs Insights // queries. // + // If the deleted policy included facet configurations, those facets will no + // longer be available for interactive exploration in the CloudWatch Logs Insights + // console for this log group. However, facet data is retained for up to 30 days. + // // You can't use this operation to delete an account-level index policy. Instead, - // use [DeletAccountPolicy]. + // use [DeleteAccountPolicy]. // // If you delete a log-group level field index policy and there is an // account-level field index policy, in a few minutes the log group begins using - // that account-wide policy to index new incoming log events. + // that account-wide policy to index new incoming log events. This operation only + // affects log group-level policies, including any facet configurations, and + // preserves any data source-based account policies that may apply to the log + // group. // - // [DeletAccountPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteAccountPolicy.html + // [DeleteAccountPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteAccountPolicy.html DeleteIndexPolicy(ctx context.Context, params *cloudwatchlogs.DeleteIndexPolicyInput, optFns ...func(*Options)) (*cloudwatchlogs.DeleteIndexPolicyOutput, error) // Deletes the integration between CloudWatch Logs and OpenSearch Service. If your // integration has active vended logs dashboards, you must specify true for the @@ -344,8 +425,8 @@ type CloudWatchLogs interface { // Log events do not expire if they belong to log groups without a retention // policy. DeleteRetentionPolicy(ctx context.Context, params *cloudwatchlogs.DeleteRetentionPolicyInput, optFns ...func(*Options)) (*cloudwatchlogs.DeleteRetentionPolicyOutput, error) - // Deletes an existing scheduled query and all its associated configurations. This - // operation permanently removes the scheduled query and cannot be undone. + // Deletes a scheduled query and stops all future executions. This operation also + // removes any configured actions and associated resources. DeleteScheduledQuery(ctx context.Context, params *cloudwatchlogs.DeleteScheduledQueryInput, optFns ...func(*Options)) (*cloudwatchlogs.DeleteScheduledQueryOutput, error) // Deletes the specified subscription filter. DeleteSubscriptionFilter(ctx context.Context, params *cloudwatchlogs.DeleteSubscriptionFilterInput, optFns ...func(*Options)) (*cloudwatchlogs.DeleteSubscriptionFilterOutput, error) @@ -409,6 +490,13 @@ type CloudWatchLogs interface { // // [PutIndexPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutIndexPolicy.html DescribeFieldIndexes(ctx context.Context, params *cloudwatchlogs.DescribeFieldIndexesInput, optFns ...func(*Options)) (*cloudwatchlogs.DescribeFieldIndexesOutput, error) + // Gets detailed information about the individual batches within an import task, + // including their status and any error messages. For CloudTrail Event Data Store + // sources, a batch refers to a subset of stored events grouped by their eventTime. + DescribeImportTaskBatches(ctx context.Context, params *cloudwatchlogs.DescribeImportTaskBatchesInput, optFns ...func(*Options)) (*cloudwatchlogs.DescribeImportTaskBatchesOutput, error) + // Lists and describes import tasks, with optional filtering by import status and + // source ARN. + DescribeImportTasks(ctx context.Context, params *cloudwatchlogs.DescribeImportTasksInput, optFns ...func(*Options)) (*cloudwatchlogs.DescribeImportTasksOutput, error) // Returns the field index policies of the specified log group. For more // information about field index policies, see [PutIndexPolicy]. // @@ -424,8 +512,9 @@ type CloudWatchLogs interface { // [PutIndexPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutIndexPolicy.html // [DescribeAccountPolicies]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeAccountPolicies.html DescribeIndexPolicies(ctx context.Context, params *cloudwatchlogs.DescribeIndexPoliciesInput, optFns ...func(*Options)) (*cloudwatchlogs.DescribeIndexPoliciesOutput, error) - // Returns information about log groups. You can return all your log groups or - // filter the results by prefix. The results are ASCII-sorted by log group name. + // Returns information about log groups, including data sources that ingest into + // each log group. You can return all your log groups or filter the results by + // prefix. The results are ASCII-sorted by log group name. // // CloudWatch Logs doesn't support IAM policies that control access to the // DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. @@ -464,6 +553,11 @@ type CloudWatchLogs interface { // Returns a list of CloudWatch Logs Insights queries that are scheduled, running, // or have been run recently in this account. You can request all queries or limit // it to queries of a specific log group or queries with a certain status. + // + // This operation includes both interactive queries started directly by users and + // automated queries executed by scheduled query configurations. Scheduled query + // executions appear in the results alongside manually initiated queries, providing + // visibility into all query activity in your account. DescribeQueries(ctx context.Context, params *cloudwatchlogs.DescribeQueriesInput, optFns ...func(*Options)) (*cloudwatchlogs.DescribeQueriesOutput, error) // This operation returns a paginated list of your saved CloudWatch Logs Insights // query definitions. You can retrieve query definitions from the current account @@ -503,6 +597,9 @@ type CloudWatchLogs interface { // // [StartQuery]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html DisassociateKmsKey(ctx context.Context, params *cloudwatchlogs.DisassociateKmsKeyInput, optFns ...func(*Options)) (*cloudwatchlogs.DisassociateKmsKeyOutput, error) + // Disassociates a data source from an S3 Table Integration, removing query access + // and deleting all associated data from the integration. + DisassociateSourceFromS3TableIntegration(ctx context.Context, params *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput, optFns ...func(*Options)) (*cloudwatchlogs.DisassociateSourceFromS3TableIntegrationOutput, error) // Lists log events from the specified log group. You can list all the log events // or filter the results using one or more of the following: // @@ -622,10 +719,17 @@ type CloudWatchLogs interface { // [CloudWatch Logs query.]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html // [Live Tail]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs_LiveTail.html GetLogEvents(ctx context.Context, params *cloudwatchlogs.GetLogEventsInput, optFns ...func(*Options)) (*cloudwatchlogs.GetLogEventsOutput, error) + // Discovers available fields for a specific data source and type. The response + // includes any field modifications introduced through pipelines, such as new + // fields or changed field types. + GetLogFields(ctx context.Context, params *cloudwatchlogs.GetLogFieldsInput, optFns ...func(*Options)) (*cloudwatchlogs.GetLogFieldsOutput, error) // Returns a list of the fields that are included in log events in the specified // log group. Includes the percentage of log events that contain each field. The // search is limited to a time period that you specify. // + // This operation is used for discovering fields within log group events. For + // discovering fields across data sources, use the GetLogFields operation. + // // You can specify the log group to search by using either logGroupIdentifier or // logGroupName . You must specify one of these parameters, but you can't specify // both. @@ -677,6 +781,11 @@ type CloudWatchLogs interface { // returns only partial results. If you see a value of Scheduled or Running for // the status, you can retry the operation later to see the final results. // + // This operation is used both for retrieving results from interactive queries and + // from automated scheduled query executions. Scheduled queries use GetQueryResults + // internally to retrieve query results for processing and delivery to configured + // destinations. + // // If you are using CloudWatch cross-account observability, you can use this // operation in a monitoring account to start queries in linked source accounts. // For more information, see [CloudWatch cross-account observability]. @@ -686,11 +795,11 @@ type CloudWatchLogs interface { // [CloudWatch Logs quotas]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html // [StartQuery]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html GetQueryResults(ctx context.Context, params *cloudwatchlogs.GetQueryResultsInput, optFns ...func(*Options)) (*cloudwatchlogs.GetQueryResultsOutput, error) - // Returns detailed information about a specified scheduled query, including its - // configuration, current state, and execution history. + // Retrieves details about a specific scheduled query, including its + // configuration, execution status, and metadata. GetScheduledQuery(ctx context.Context, params *cloudwatchlogs.GetScheduledQueryInput, optFns ...func(*Options)) (*cloudwatchlogs.GetScheduledQueryOutput, error) // Retrieves the execution history of a scheduled query within a specified time - // range, including execution status and destination processing metadata. + // range, including query results and destination processing status. GetScheduledQueryHistory(ctx context.Context, params *cloudwatchlogs.GetScheduledQueryHistoryInput, optFns ...func(*Options)) (*cloudwatchlogs.GetScheduledQueryHistoryOutput, error) // Returns the information about the log transformer associated with this log // group. @@ -700,6 +809,21 @@ type CloudWatchLogs interface { // // [DescribeAccountPolicies]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeAccountPolicies.html GetTransformer(ctx context.Context, params *cloudwatchlogs.GetTransformerInput, optFns ...func(*Options)) (*cloudwatchlogs.GetTransformerOutput, error) + // Returns an aggregate summary of all log groups in the Region grouped by + // specified data source characteristics. Supports optional filtering by log group + // class, name patterns, and data sources. If you perform this action in a + // monitoring account, you can also return aggregated summaries of log groups from + // source accounts that are linked to the monitoring account. For more information + // about using cross-account observability to set up monitoring accounts and source + // accounts, see [CloudWatch cross-account observability]. + // + // The operation aggregates log groups by data source name and type and optionally + // format, providing counts of log groups that share these characteristics. The + // operation paginates results. By default, it returns up to 50 results and + // includes a token to retrieve more results. + // + // [CloudWatch cross-account observability]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html + ListAggregateLogGroupSummaries(ctx context.Context, params *cloudwatchlogs.ListAggregateLogGroupSummariesInput, optFns ...func(*Options)) (*cloudwatchlogs.ListAggregateLogGroupSummariesOutput, error) // Returns a list of anomalies that log anomaly detectors have found. For details // about the structure format of each anomaly object that is returned, see the // example in this section. @@ -716,8 +840,12 @@ type CloudWatchLogs interface { // more information about using cross-account observability to set up monitoring // accounts and source accounts, see [CloudWatch cross-account observability]. // - // You can optionally filter the list by log group class and by using regular - // expressions in your request to match strings in the log group names. + // You can optionally filter the list by log group class, by using regular + // expressions in your request to match strings in the log group names, by using + // the fieldIndexes parameter to filter log groups based on which field indexes are + // configured, by using the dataSources parameter to filter log groups by data + // source types, and by using the fieldIndexNames parameter to filter by specific + // field index names. // // This operation is paginated. By default, your first use of this operation // returns 50 results, and includes a token to use in a subsequent operation to @@ -734,9 +862,13 @@ type CloudWatchLogs interface { // // [Create field indexes to improve query performance and reduce costs]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs-Field-Indexing.html ListLogGroupsForQuery(ctx context.Context, params *cloudwatchlogs.ListLogGroupsForQueryInput, optFns ...func(*Options)) (*cloudwatchlogs.ListLogGroupsForQueryOutput, error) - // Lists all scheduled queries in the current AWS account and region with optional - // filtering by state. + // Lists all scheduled queries in your account and region. You can filter results + // by state to show only enabled or disabled queries. ListScheduledQueries(ctx context.Context, params *cloudwatchlogs.ListScheduledQueriesInput, optFns ...func(*Options)) (*cloudwatchlogs.ListScheduledQueriesOutput, error) + // Returns a list of data source associations for a specified S3 Table + // Integration, showing which data sources are currently associated for query + // access. + ListSourcesForS3TableIntegration(ctx context.Context, params *cloudwatchlogs.ListSourcesForS3TableIntegrationInput, optFns ...func(*Options)) (*cloudwatchlogs.ListSourcesForS3TableIntegrationOutput, error) // Displays the tags associated with a CloudWatch Logs resource. Currently, log // groups and destinations support tagging. ListTagsForResource(ctx context.Context, params *cloudwatchlogs.ListTagsForResourceInput, optFns ...func(*Options)) (*cloudwatchlogs.ListTagsForResourceOutput, error) @@ -751,7 +883,13 @@ type CloudWatchLogs interface { ListTagsLogGroup(ctx context.Context, params *cloudwatchlogs.ListTagsLogGroupInput, optFns ...func(*Options)) (*cloudwatchlogs.ListTagsLogGroupOutput, error) // Creates an account-level data protection policy, subscription filter policy, // field index policy, transformer policy, or metric extraction policy that applies - // to all log groups or a subset of log groups in the account. + // to all log groups, a subset of log groups, or a data source name and type + // combination in the account. + // + // For field index policies, you can configure indexed fields as facets to enable + // interactive exploration of your logs. Facets provide value distributions and + // counts for indexed fields in the CloudWatch Logs Insights console without + // requiring query execution. For more information, see [Use facets to group and explore logs]. // // To use this operation, you must be signed on with the correct permissions // depending on the type of policy that you are creating. @@ -768,6 +906,9 @@ type CloudWatchLogs interface { // - To create a field index policy, you must have the logs:PutIndexPolicy and // logs:PutAccountPolicy permissions. // + // - To configure facets for field index policies, you must have the + // logs:PutIndexPolicy and logs:PutAccountPolicy permissions. + // // - To create a metric extraction policy, you must have the // logs:PutMetricExtractionPolicy and logs:PutAccountPolicy permissions. // @@ -872,27 +1013,9 @@ type CloudWatchLogs interface { // selectionCriteria parameter. If you have multiple account-level transformer // policies with selection criteria, no two of them can use the same or overlapping // log group name prefixes. For example, if you have one policy filtered to log - // groups that start with my-log , you can't have another field index policy + // groups that start with my-log , you can't have another transformer policy // filtered to my-logpprod or my-logging . // - // CloudWatch Logs provides default field indexes for all log groups in the - // Standard log class. Default field indexes are automatically available for the - // following fields: - // - // - @logStream - // - // - @aws.region - // - // - @aws.account - // - // - @source.log - // - // - traceId - // - // Default field indexes are in addition to any custom field indexes you define - // within your policy. Default field indexes are not counted towards your field - // index quota. - // // You can also set up a transformer at the log-group level. For more information, // see [PutTransformer]. If there is both a log-group level transformer created with PutTransformer // and an account-level transformer that could apply to the same log group, the log @@ -902,15 +1025,16 @@ type CloudWatchLogs interface { // # Field index policy // // You can use field index policies to create indexes on fields found in log - // events in the log group. Creating field indexes can help lower the scan volume - // for CloudWatch Logs Insights queries that reference those fields, because these - // queries attempt to skip the processing of log events that are known to not match - // the indexed field. Good fields to index are fields that you often need to query - // for and fields or values that match only a small fraction of the total log - // events. Common examples of indexes include request ID, session ID, user IDs, or - // instance IDs. For more information, see [Create field indexes to improve query performance and reduce costs] + // events for a log group or data source name and type combination. Creating field + // indexes can help lower the scan volume for CloudWatch Logs Insights queries that + // reference those fields, because these queries attempt to skip the processing of + // log events that are known to not match the indexed field. Good fields to index + // are fields that you often need to query for and fields or values that match only + // a small fraction of the total log events. Common examples of indexes include + // request ID, session ID, user IDs, or instance IDs. For more information, see [Create field indexes to improve query performance and reduce costs] // - // To find the fields that are in your log group events, use the [GetLogGroupFields] operation. + // To find the fields that are in your log group events, use the [GetLogGroupFields] operation. To + // find the fields for a data source use the [GetLogFields]operation. // // For example, suppose you have created a field index for requestId . Then, any // CloudWatch Logs Insights query on that log group that includes requestId = @@ -923,21 +1047,108 @@ type CloudWatchLogs interface { // // You can have one account-level field index policy that applies to all log // groups in the account. Or you can create as many as 20 account-level field index - // policies that are each scoped to a subset of log groups with the + // policies that are each scoped to a subset of log groups using LogGroupNamePrefix + // with the selectionCriteria parameter. You can have another 20 account-level + // field index policies using DataSourceName and DataSourceType for the // selectionCriteria parameter. If you have multiple account-level index policies - // with selection criteria, no two of them can use the same or overlapping log - // group name prefixes. For example, if you have one policy filtered to log groups - // that start with my-log , you can't have another field index policy filtered to - // my-logpprod or my-logging . + // with LogGroupNamePrefix selection criteria, no two of them can use the same or + // overlapping log group name prefixes. For example, if you have one policy + // filtered to log groups that start with my-log, you can't have another field + // index policy filtered to my-logpprod or my-logging. Similarly, if you have + // multiple account-level index policies with DataSourceName and DataSourceType + // selection criteria, no two of them can use the same data source name and type + // combination. For example, if you have one policy filtered to the data source + // name amazon_vpc and data source type flow you cannot create another policy with + // this combination. // // If you create an account-level field index policy in a monitoring account in // cross-account observability, the policy is applied only to the monitoring // account and not to any source accounts. // + // CloudWatch Logs provides default field indexes for all log groups in the + // Standard log class. Default field indexes are automatically available for the + // following fields: + // + // - @logStream + // + // - @aws.region + // + // - @aws.account + // + // - @source.log + // + // - @data_source_name + // + // - @data_source_type + // + // - @data_format + // + // - traceId + // + // - severityText + // + // - attributes.session.id + // + // CloudWatch Logs provides default field indexes for certain data source name and + // type combinations as well. Default field indexes are automatically available for + // the following data source name and type combinations as identified in the + // following list: + // + // amazon_vpc.flow + // + // - action + // + // - logStatus + // + // - region + // + // - flowDirection + // + // - type + // + // amazon_route53.resolver_query + // + // - transport + // + // - rcode + // + // aws_waf.access + // + // - action + // + // - httpRequest.country + // + // aws_cloudtrail.data , aws_cloudtrail.management + // + // - eventSource + // + // - eventName + // + // - awsRegion + // + // - userAgent + // + // - errorCode + // + // - eventType + // + // - managementEvent + // + // - readOnly + // + // - eventCategory + // + // - requestId + // + // Default field indexes are in addition to any custom field indexes you define + // within your policy. Default field indexes are not counted towards your [field index quota]. + // // If you want to create a field index policy for a single log group, you can use [PutIndexPolicy] - // instead of PutAccountPolicy . If you do so, that log group will use only that - // log-group level policy, and will ignore the account-level policy that you create - // with [PutAccountPolicy]. + // instead of PutAccountPolicy . If you do so, that log group will use that + // log-group level policy and any account-level policies that match at the data + // source level; any account-level policy that matches at the log group level (for + // example, no selection criteria or log group name prefix selection criteria) will + // be ignored. // // # Metric extraction policy // @@ -995,15 +1206,17 @@ type CloudWatchLogs interface { // // [PutDestination]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html // [PutTransformer]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutTransformer.html - // [PutIndexPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutIndexPolicy.html + // [GetLogFields]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogFields.html // [PutDataProtectionPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDataProtectionPolicy.html - // [Protect sensitive log data with masking]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html // [FilterLogEvents]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_FilterLogEvents.html - // [GetLogGroupFields]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogGroupFields.html // [Processors that you can use]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-Processors - // [PutAccountPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutAccountPolicy.html - // [Create field indexes to improve query performance and reduce costs]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs-Field-Indexing.html // [GetLogEvents]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html + // [field index quota]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs-Field-Indexing-Syntax + // [PutIndexPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutIndexPolicy.html + // [Protect sensitive log data with masking]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html + // [GetLogGroupFields]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogGroupFields.html + // [Use facets to group and explore logs]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs-Facets.html + // [Create field indexes to improve query performance and reduce costs]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs-Field-Indexing.html PutAccountPolicy(ctx context.Context, params *cloudwatchlogs.PutAccountPolicyInput, optFns ...func(*Options)) (*cloudwatchlogs.PutAccountPolicyOutput, error) // Creates a data protection policy for the specified log group. A data protection // policy can help safeguard sensitive data that's ingested by the log group by @@ -1183,6 +1396,12 @@ type CloudWatchLogs interface { // events. Common examples of indexes include request ID, session ID, userID, and // instance IDs. For more information, see [Create field indexes to improve query performance and reduce costs]. // + // You can configure indexed fields as facets to enable interactive exploration + // and filtering of your logs in the CloudWatch Logs Insights console. Facets allow + // you to view value distributions and counts for indexed fields without running + // queries. When you create a field index, you can optionally set it as a facet to + // enable this interactive analysis capability. For more information, see [Use facets to group and explore logs]. + // // To find the fields that are in your log group events, use the [GetLogGroupFields] operation. // // For example, suppose you have created a field index for requestId . Then, any @@ -1219,15 +1438,17 @@ type CloudWatchLogs interface { // . // // Log group-level field index policies created with PutIndexPolicy override - // account-level field index policies created with [PutAccountPolicy]. If you use PutIndexPolicy to - // create a field index policy for a log group, that log group uses only that - // policy. The log group ignores any account-wide field index policy that you might - // have created. + // account-level field index policies created with [PutAccountPolicy]that apply to log groups. If + // you use PutIndexPolicy to create a field index policy for a log group, that log + // group uses only that policy for log group-level indexing, including any facet + // configurations. The log group ignores any account-wide field index policy that + // applies to log groups, but data source-based account policies may still apply. // // [Log classes]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch_Logs_Log_Classes.html // [GetLogGroupFields]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogGroupFields.html // [PutAccountPolicy]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutAccountPolicy.html // [Create field indexes to improve query performance and reduce costs]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs-Field-Indexing.html + // [Use facets to group and explore logs]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs-Facets.html PutIndexPolicy(ctx context.Context, params *cloudwatchlogs.PutIndexPolicyInput, optFns ...func(*Options)) (*cloudwatchlogs.PutIndexPolicyOutput, error) // Creates an integration between CloudWatch Logs and another service in this // account. Currently, only integrations with OpenSearch Service are supported, and @@ -1512,8 +1733,9 @@ type CloudWatchLogs interface { // [SessionTimeoutException]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartLiveTailResponseStream.html#CWL-Type-StartLiveTailResponseStream-SessionTimeoutException // [SessionStreamingException]: https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartLiveTailResponseStream.html#CWL-Type-StartLiveTailResponseStream-SessionStreamingException StartLiveTail(ctx context.Context, params *cloudwatchlogs.StartLiveTailInput, optFns ...func(*Options)) (*cloudwatchlogs.StartLiveTailOutput, error) - // Starts a query of one or more log groups using CloudWatch Logs Insights. You - // specify the log groups and time range to query and the query string to use. + // Starts a query of one or more log groups or data sources using CloudWatch Logs + // Insights. You specify the log groups or data sources and time range to query and + // the query string to use. You can query up to 10 data sources in a single query. // // For more information, see [CloudWatch Logs Insights Query Syntax]. // @@ -1521,6 +1743,11 @@ type CloudWatchLogs interface { // CloudWatch Logs. You can use [GetQueryResults]to retrieve the results of a query, using the // queryId that StartQuery returns. // + // Interactive queries started with StartQuery share concurrency limits with + // automated scheduled query executions. Both types of queries count toward the + // same regional concurrent query quota, so high scheduled query activity may + // affect the availability of concurrent slots for interactive queries. + // // To specify the log groups to query, a StartQuery operation must include one of // the following: // @@ -1529,7 +1756,8 @@ type CloudWatchLogs interface { // // - Or the queryString must include a SOURCE command to select log groups for // the query. The SOURCE command can select log groups based on log group name - // prefix, account ID, and log class. + // prefix, account ID, and log class, or select data sources using dataSource + // syntax in LogsQL, PPL, and SQL. // // For more information about the SOURCE command, see [SOURCE]. // @@ -1559,6 +1787,11 @@ type CloudWatchLogs interface { // Stops a CloudWatch Logs Insights query that is in progress. If the query has // already ended, the operation returns an error indicating that the specified // query is not running. + // + // This operation can be used to cancel both interactive queries and individual + // scheduled query executions. When used with scheduled queries, StopQuery cancels + // only the specific execution identified by the query ID, not the scheduled query + // configuration itself. StopQuery(ctx context.Context, params *cloudwatchlogs.StopQueryInput, optFns ...func(*Options)) (*cloudwatchlogs.StopQueryOutput, error) // The TagLogGroup operation is on the path to deprecation. We recommend that you // use [TagResource]instead. @@ -1650,9 +1883,9 @@ type CloudWatchLogs interface { UpdateDeliveryConfiguration(ctx context.Context, params *cloudwatchlogs.UpdateDeliveryConfigurationInput, optFns ...func(*Options)) (*cloudwatchlogs.UpdateDeliveryConfigurationOutput, error) // Updates an existing log anomaly detector. UpdateLogAnomalyDetector(ctx context.Context, params *cloudwatchlogs.UpdateLogAnomalyDetectorInput, optFns ...func(*Options)) (*cloudwatchlogs.UpdateLogAnomalyDetectorOutput, error) - // Updates the configuration of an existing scheduled query. This operation - // follows PUT semantics, replacing the existing configuration with the provided - // values. + // Updates an existing scheduled query with new configuration. This operation uses + // PUT semantics, allowing modification of query parameters, schedule, and + // destinations. UpdateScheduledQuery(ctx context.Context, params *cloudwatchlogs.UpdateScheduledQueryInput, optFns ...func(*Options)) (*cloudwatchlogs.UpdateScheduledQueryOutput, error) } diff --git a/pkg/awsapi/ec2.go b/pkg/awsapi/ec2.go index 0be2ea7d3d..d712418275 100644 --- a/pkg/awsapi/ec2.go +++ b/pkg/awsapi/ec2.go @@ -1392,6 +1392,22 @@ type EC2 interface { // // [Route tables]: https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html CreateRouteTable(ctx context.Context, params *ec2.CreateRouteTableInput, optFns ...func(*Options)) (*ec2.CreateRouteTableOutput, error) + // Creates an Amazon secondary network. + // + // The allowed size for a secondary network CIDR block is between /28 netmask (16 + // IP addresses) and /12 netmask (1,048,576 IP addresses). + CreateSecondaryNetwork(ctx context.Context, params *ec2.CreateSecondaryNetworkInput, optFns ...func(*Options)) (*ec2.CreateSecondaryNetworkOutput, error) + // Creates a secondary subnet in a secondary network. + // + // A secondary subnet CIDR block must not overlap with the CIDR block of an + // existing secondary subnet in the secondary network. After you create a secondary + // subnet, you can't change its CIDR block. + // + // The allowed size for a secondary subnet CIDR block is between /28 netmask (16 + // IP addresses) and /12 netmask (1,048,576 IP addresses). Amazon reserves the + // first four IP addresses and the last IP address in each secondary subnet for + // internal use. + CreateSecondarySubnet(ctx context.Context, params *ec2.CreateSecondarySubnetInput, optFns ...func(*Options)) (*ec2.CreateSecondarySubnetOutput, error) // Creates a security group. // // A security group acts as a virtual firewall for your instance to control @@ -2124,6 +2140,12 @@ type EC2 interface { // Deletes the specified route table. You must disassociate the route table from // any subnets before you can delete it. You can't delete the main route table. DeleteRouteTable(ctx context.Context, params *ec2.DeleteRouteTableInput, optFns ...func(*Options)) (*ec2.DeleteRouteTableOutput, error) + // Deletes a secondary network. You must delete all secondary subnets in the + // secondary network before you can delete the secondary network. + DeleteSecondaryNetwork(ctx context.Context, params *ec2.DeleteSecondaryNetworkInput, optFns ...func(*Options)) (*ec2.DeleteSecondaryNetworkOutput, error) + // Deletes a secondary subnet. A secondary subnet must not contain any secondary + // interfaces prior to deletion. + DeleteSecondarySubnet(ctx context.Context, params *ec2.DeleteSecondarySubnetInput, optFns ...func(*Options)) (*ec2.DeleteSecondarySubnetOutput, error) // Deletes a security group. // // If you attempt to delete a security group that is associated with an instance @@ -2861,10 +2883,6 @@ type EC2 interface { // them through their termination. For more information, see [Instance lifecycle]in the Amazon EC2 // User Guide. // - // - SQL license exemption monitoring - For instances registered with the SQL LE - // service, status includes SQL license exemption monitoring health and processing - // status to provide operational visibility into license exemption functionality. - // // The Amazon EC2 API follows an eventual consistency model. This means that the // result of an API command you run that creates or modifies resources might not be // immediately available to all subsequent commands you run. For guidance on how to @@ -3289,6 +3307,12 @@ type EC2 interface { DescribeScheduledInstanceAvailability(ctx context.Context, params *ec2.DescribeScheduledInstanceAvailabilityInput, optFns ...func(*Options)) (*ec2.DescribeScheduledInstanceAvailabilityOutput, error) // Describes the specified Scheduled Instances or all your Scheduled Instances. DescribeScheduledInstances(ctx context.Context, params *ec2.DescribeScheduledInstancesInput, optFns ...func(*Options)) (*ec2.DescribeScheduledInstancesOutput, error) + // Describes one or more of your secondary interfaces. + DescribeSecondaryInterfaces(ctx context.Context, params *ec2.DescribeSecondaryInterfacesInput, optFns ...func(*Options)) (*ec2.DescribeSecondaryInterfacesOutput, error) + // Describes one or more secondary networks. + DescribeSecondaryNetworks(ctx context.Context, params *ec2.DescribeSecondaryNetworksInput, optFns ...func(*Options)) (*ec2.DescribeSecondaryNetworksOutput, error) + // Describes one or more of your secondary subnets. + DescribeSecondarySubnets(ctx context.Context, params *ec2.DescribeSecondarySubnetsInput, optFns ...func(*Options)) (*ec2.DescribeSecondarySubnetsOutput, error) // Describes the VPCs on the other side of a VPC peering or Transit Gateway // connection that are referencing the security groups you've specified in this // request. @@ -5412,9 +5436,13 @@ type EC2 interface { // With previous-generation instance types, resizing an EBS volume might require // detaching and reattaching the volume or stopping and restarting the instance. // - // After modifying a volume, you must wait at least six hours and ensure that the - // volume is in the in-use or available state before you can modify the same - // volume. This is sometimes referred to as a cooldown period. + // After you initiate a volume modification, you must wait for that modification + // to reach the completed state before you can initiate another modification for + // the same volume. You can modify a volume up to four times within a rolling + // 24-hour period, as long as the volume is in the in-use or available state, and + // all previous modifications for that volume are completed . If you exceed this + // limit, you get an error message that indicates when you can perform your next + // modification. // // [Monitor the progress of volume modifications]: https://docs.aws.amazon.com/ebs/latest/userguide/monitoring-volume-modifications.html // [Amazon EBS Elastic Volumes]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-modify-volume.html diff --git a/pkg/awsapi/eks.go b/pkg/awsapi/eks.go index ccbbf52ca4..2274e354e7 100644 --- a/pkg/awsapi/eks.go +++ b/pkg/awsapi/eks.go @@ -88,10 +88,10 @@ type EKS interface { // single tenant and unique. It runs on its own set of Amazon EC2 instances. // // The cluster control plane is provisioned across multiple Availability Zones and - // fronted by an ELB Network Load Balancer. Amazon EKS also provisions elastic - // network interfaces in your VPC subnets to provide connectivity from the control - // plane instances to the nodes (for example, to support kubectl exec , logs , and - // proxy data flows). + // fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also + // provisions elastic network interfaces in your VPC subnets to provide + // connectivity from the control plane instances to the nodes (for example, to + // support kubectl exec , logs , and proxy data flows). // // Amazon EKS nodes run in your Amazon Web Services account and connect to your // cluster's control plane over the Kubernetes API server endpoint and a @@ -242,11 +242,11 @@ type EKS interface { DeleteCapability(ctx context.Context, params *eks.DeleteCapabilityInput, optFns ...func(*Options)) (*eks.DeleteCapabilityOutput, error) // Deletes an Amazon EKS cluster control plane. // - // If you have active services in your cluster that are associated with a load - // balancer, you must delete those services before deleting the cluster so that the - // load balancers are deleted properly. Otherwise, you can have orphaned resources - // in your VPC that prevent you from being able to delete the VPC. For more - // information, see [Deleting a cluster]in the Amazon EKS User Guide. + // If you have active services and ingress resources in your cluster that are + // associated with a load balancer, you must delete those services before deleting + // the cluster so that the load balancers are deleted properly. Otherwise, you can + // have orphaned resources in your VPC that prevent you from being able to delete + // the VPC. For more information, see [Deleting a cluster]in the Amazon EKS User Guide. // // If you have managed node groups or Fargate profiles attached to the cluster, // you must delete them first. For more information, see DeleteNodgroup and diff --git a/pkg/eks/mocksv2/CloudWatchLogs.go b/pkg/eks/mocksv2/CloudWatchLogs.go index 56d749dc9b..26790ebdc2 100644 --- a/pkg/eks/mocksv2/CloudWatchLogs.go +++ b/pkg/eks/mocksv2/CloudWatchLogs.go @@ -97,6 +97,80 @@ func (_c *CloudWatchLogs_AssociateKmsKey_Call) RunAndReturn(run func(context.Con return _c } +// AssociateSourceToS3TableIntegration provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) AssociateSourceToS3TableIntegration(ctx context.Context, params *cloudwatchlogs.AssociateSourceToS3TableIntegrationInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.AssociateSourceToS3TableIntegrationOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for AssociateSourceToS3TableIntegration") + } + + var r0 *cloudwatchlogs.AssociateSourceToS3TableIntegrationOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.AssociateSourceToS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.AssociateSourceToS3TableIntegrationOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.AssociateSourceToS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.AssociateSourceToS3TableIntegrationOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.AssociateSourceToS3TableIntegrationOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.AssociateSourceToS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_AssociateSourceToS3TableIntegration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AssociateSourceToS3TableIntegration' +type CloudWatchLogs_AssociateSourceToS3TableIntegration_Call struct { + *mock.Call +} + +// AssociateSourceToS3TableIntegration is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.AssociateSourceToS3TableIntegrationInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) AssociateSourceToS3TableIntegration(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_AssociateSourceToS3TableIntegration_Call { + return &CloudWatchLogs_AssociateSourceToS3TableIntegration_Call{Call: _e.mock.On("AssociateSourceToS3TableIntegration", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_AssociateSourceToS3TableIntegration_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.AssociateSourceToS3TableIntegrationInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_AssociateSourceToS3TableIntegration_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.AssociateSourceToS3TableIntegrationInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_AssociateSourceToS3TableIntegration_Call) Return(_a0 *cloudwatchlogs.AssociateSourceToS3TableIntegrationOutput, _a1 error) *CloudWatchLogs_AssociateSourceToS3TableIntegration_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_AssociateSourceToS3TableIntegration_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.AssociateSourceToS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.AssociateSourceToS3TableIntegrationOutput, error)) *CloudWatchLogs_AssociateSourceToS3TableIntegration_Call { + _c.Call.Return(run) + return _c +} + // CancelExportTask provides a mock function with given fields: ctx, params, optFns func (_m *CloudWatchLogs) CancelExportTask(ctx context.Context, params *cloudwatchlogs.CancelExportTaskInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CancelExportTaskOutput, error) { _va := make([]interface{}, len(optFns)) @@ -171,6 +245,80 @@ func (_c *CloudWatchLogs_CancelExportTask_Call) RunAndReturn(run func(context.Co return _c } +// CancelImportTask provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) CancelImportTask(ctx context.Context, params *cloudwatchlogs.CancelImportTaskInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CancelImportTaskOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CancelImportTask") + } + + var r0 *cloudwatchlogs.CancelImportTaskOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.CancelImportTaskInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CancelImportTaskOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.CancelImportTaskInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.CancelImportTaskOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.CancelImportTaskOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.CancelImportTaskInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_CancelImportTask_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CancelImportTask' +type CloudWatchLogs_CancelImportTask_Call struct { + *mock.Call +} + +// CancelImportTask is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.CancelImportTaskInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) CancelImportTask(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_CancelImportTask_Call { + return &CloudWatchLogs_CancelImportTask_Call{Call: _e.mock.On("CancelImportTask", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_CancelImportTask_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.CancelImportTaskInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_CancelImportTask_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.CancelImportTaskInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_CancelImportTask_Call) Return(_a0 *cloudwatchlogs.CancelImportTaskOutput, _a1 error) *CloudWatchLogs_CancelImportTask_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_CancelImportTask_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.CancelImportTaskInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CancelImportTaskOutput, error)) *CloudWatchLogs_CancelImportTask_Call { + _c.Call.Return(run) + return _c +} + // CreateDelivery provides a mock function with given fields: ctx, params, optFns func (_m *CloudWatchLogs) CreateDelivery(ctx context.Context, params *cloudwatchlogs.CreateDeliveryInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateDeliveryOutput, error) { _va := make([]interface{}, len(optFns)) @@ -319,6 +467,80 @@ func (_c *CloudWatchLogs_CreateExportTask_Call) RunAndReturn(run func(context.Co return _c } +// CreateImportTask provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) CreateImportTask(ctx context.Context, params *cloudwatchlogs.CreateImportTaskInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateImportTaskOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateImportTask") + } + + var r0 *cloudwatchlogs.CreateImportTaskOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.CreateImportTaskInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateImportTaskOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.CreateImportTaskInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.CreateImportTaskOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.CreateImportTaskOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.CreateImportTaskInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_CreateImportTask_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateImportTask' +type CloudWatchLogs_CreateImportTask_Call struct { + *mock.Call +} + +// CreateImportTask is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.CreateImportTaskInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) CreateImportTask(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_CreateImportTask_Call { + return &CloudWatchLogs_CreateImportTask_Call{Call: _e.mock.On("CreateImportTask", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_CreateImportTask_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.CreateImportTaskInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_CreateImportTask_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.CreateImportTaskInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_CreateImportTask_Call) Return(_a0 *cloudwatchlogs.CreateImportTaskOutput, _a1 error) *CloudWatchLogs_CreateImportTask_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_CreateImportTask_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.CreateImportTaskInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateImportTaskOutput, error)) *CloudWatchLogs_CreateImportTask_Call { + _c.Call.Return(run) + return _c +} + // CreateLogAnomalyDetector provides a mock function with given fields: ctx, params, optFns func (_m *CloudWatchLogs) CreateLogAnomalyDetector(ctx context.Context, params *cloudwatchlogs.CreateLogAnomalyDetectorInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogAnomalyDetectorOutput, error) { _va := make([]interface{}, len(optFns)) @@ -2613,6 +2835,154 @@ func (_c *CloudWatchLogs_DescribeFieldIndexes_Call) RunAndReturn(run func(contex return _c } +// DescribeImportTaskBatches provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) DescribeImportTaskBatches(ctx context.Context, params *cloudwatchlogs.DescribeImportTaskBatchesInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeImportTaskBatchesOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DescribeImportTaskBatches") + } + + var r0 *cloudwatchlogs.DescribeImportTaskBatchesOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.DescribeImportTaskBatchesInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeImportTaskBatchesOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.DescribeImportTaskBatchesInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.DescribeImportTaskBatchesOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.DescribeImportTaskBatchesOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.DescribeImportTaskBatchesInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_DescribeImportTaskBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DescribeImportTaskBatches' +type CloudWatchLogs_DescribeImportTaskBatches_Call struct { + *mock.Call +} + +// DescribeImportTaskBatches is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.DescribeImportTaskBatchesInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) DescribeImportTaskBatches(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_DescribeImportTaskBatches_Call { + return &CloudWatchLogs_DescribeImportTaskBatches_Call{Call: _e.mock.On("DescribeImportTaskBatches", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_DescribeImportTaskBatches_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.DescribeImportTaskBatchesInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_DescribeImportTaskBatches_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.DescribeImportTaskBatchesInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_DescribeImportTaskBatches_Call) Return(_a0 *cloudwatchlogs.DescribeImportTaskBatchesOutput, _a1 error) *CloudWatchLogs_DescribeImportTaskBatches_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_DescribeImportTaskBatches_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.DescribeImportTaskBatchesInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeImportTaskBatchesOutput, error)) *CloudWatchLogs_DescribeImportTaskBatches_Call { + _c.Call.Return(run) + return _c +} + +// DescribeImportTasks provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) DescribeImportTasks(ctx context.Context, params *cloudwatchlogs.DescribeImportTasksInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeImportTasksOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DescribeImportTasks") + } + + var r0 *cloudwatchlogs.DescribeImportTasksOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.DescribeImportTasksInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeImportTasksOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.DescribeImportTasksInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.DescribeImportTasksOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.DescribeImportTasksOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.DescribeImportTasksInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_DescribeImportTasks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DescribeImportTasks' +type CloudWatchLogs_DescribeImportTasks_Call struct { + *mock.Call +} + +// DescribeImportTasks is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.DescribeImportTasksInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) DescribeImportTasks(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_DescribeImportTasks_Call { + return &CloudWatchLogs_DescribeImportTasks_Call{Call: _e.mock.On("DescribeImportTasks", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_DescribeImportTasks_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.DescribeImportTasksInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_DescribeImportTasks_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.DescribeImportTasksInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_DescribeImportTasks_Call) Return(_a0 *cloudwatchlogs.DescribeImportTasksOutput, _a1 error) *CloudWatchLogs_DescribeImportTasks_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_DescribeImportTasks_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.DescribeImportTasksInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeImportTasksOutput, error)) *CloudWatchLogs_DescribeImportTasks_Call { + _c.Call.Return(run) + return _c +} + // DescribeIndexPolicies provides a mock function with given fields: ctx, params, optFns func (_m *CloudWatchLogs) DescribeIndexPolicies(ctx context.Context, params *cloudwatchlogs.DescribeIndexPoliciesInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeIndexPoliciesOutput, error) { _va := make([]interface{}, len(optFns)) @@ -3279,6 +3649,80 @@ func (_c *CloudWatchLogs_DisassociateKmsKey_Call) RunAndReturn(run func(context. return _c } +// DisassociateSourceFromS3TableIntegration provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) DisassociateSourceFromS3TableIntegration(ctx context.Context, params *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DisassociateSourceFromS3TableIntegrationOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DisassociateSourceFromS3TableIntegration") + } + + var r0 *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DisassociateSourceFromS3TableIntegrationOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.DisassociateSourceFromS3TableIntegrationOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DisassociateSourceFromS3TableIntegration' +type CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call struct { + *mock.Call +} + +// DisassociateSourceFromS3TableIntegration is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) DisassociateSourceFromS3TableIntegration(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call { + return &CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call{Call: _e.mock.On("DisassociateSourceFromS3TableIntegration", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call) Return(_a0 *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationOutput, _a1 error) *CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.DisassociateSourceFromS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.DisassociateSourceFromS3TableIntegrationOutput, error)) *CloudWatchLogs_DisassociateSourceFromS3TableIntegration_Call { + _c.Call.Return(run) + return _c +} + // FilterLogEvents provides a mock function with given fields: ctx, params, optFns func (_m *CloudWatchLogs) FilterLogEvents(ctx context.Context, params *cloudwatchlogs.FilterLogEventsInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.FilterLogEventsOutput, error) { _va := make([]interface{}, len(optFns)) @@ -3945,6 +4389,80 @@ func (_c *CloudWatchLogs_GetLogEvents_Call) RunAndReturn(run func(context.Contex return _c } +// GetLogFields provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) GetLogFields(ctx context.Context, params *cloudwatchlogs.GetLogFieldsInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.GetLogFieldsOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetLogFields") + } + + var r0 *cloudwatchlogs.GetLogFieldsOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.GetLogFieldsInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.GetLogFieldsOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.GetLogFieldsInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.GetLogFieldsOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.GetLogFieldsOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.GetLogFieldsInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_GetLogFields_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLogFields' +type CloudWatchLogs_GetLogFields_Call struct { + *mock.Call +} + +// GetLogFields is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.GetLogFieldsInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) GetLogFields(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_GetLogFields_Call { + return &CloudWatchLogs_GetLogFields_Call{Call: _e.mock.On("GetLogFields", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_GetLogFields_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.GetLogFieldsInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_GetLogFields_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.GetLogFieldsInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_GetLogFields_Call) Return(_a0 *cloudwatchlogs.GetLogFieldsOutput, _a1 error) *CloudWatchLogs_GetLogFields_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_GetLogFields_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.GetLogFieldsInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.GetLogFieldsOutput, error)) *CloudWatchLogs_GetLogFields_Call { + _c.Call.Return(run) + return _c +} + // GetLogGroupFields provides a mock function with given fields: ctx, params, optFns func (_m *CloudWatchLogs) GetLogGroupFields(ctx context.Context, params *cloudwatchlogs.GetLogGroupFieldsInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.GetLogGroupFieldsOutput, error) { _va := make([]interface{}, len(optFns)) @@ -4463,6 +4981,80 @@ func (_c *CloudWatchLogs_GetTransformer_Call) RunAndReturn(run func(context.Cont return _c } +// ListAggregateLogGroupSummaries provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) ListAggregateLogGroupSummaries(ctx context.Context, params *cloudwatchlogs.ListAggregateLogGroupSummariesInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.ListAggregateLogGroupSummariesOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ListAggregateLogGroupSummaries") + } + + var r0 *cloudwatchlogs.ListAggregateLogGroupSummariesOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.ListAggregateLogGroupSummariesInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.ListAggregateLogGroupSummariesOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.ListAggregateLogGroupSummariesInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.ListAggregateLogGroupSummariesOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.ListAggregateLogGroupSummariesOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.ListAggregateLogGroupSummariesInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_ListAggregateLogGroupSummaries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAggregateLogGroupSummaries' +type CloudWatchLogs_ListAggregateLogGroupSummaries_Call struct { + *mock.Call +} + +// ListAggregateLogGroupSummaries is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.ListAggregateLogGroupSummariesInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) ListAggregateLogGroupSummaries(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_ListAggregateLogGroupSummaries_Call { + return &CloudWatchLogs_ListAggregateLogGroupSummaries_Call{Call: _e.mock.On("ListAggregateLogGroupSummaries", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_ListAggregateLogGroupSummaries_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.ListAggregateLogGroupSummariesInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_ListAggregateLogGroupSummaries_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.ListAggregateLogGroupSummariesInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_ListAggregateLogGroupSummaries_Call) Return(_a0 *cloudwatchlogs.ListAggregateLogGroupSummariesOutput, _a1 error) *CloudWatchLogs_ListAggregateLogGroupSummaries_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_ListAggregateLogGroupSummaries_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.ListAggregateLogGroupSummariesInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.ListAggregateLogGroupSummariesOutput, error)) *CloudWatchLogs_ListAggregateLogGroupSummaries_Call { + _c.Call.Return(run) + return _c +} + // ListAnomalies provides a mock function with given fields: ctx, params, optFns func (_m *CloudWatchLogs) ListAnomalies(ctx context.Context, params *cloudwatchlogs.ListAnomaliesInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.ListAnomaliesOutput, error) { _va := make([]interface{}, len(optFns)) @@ -4907,6 +5499,80 @@ func (_c *CloudWatchLogs_ListScheduledQueries_Call) RunAndReturn(run func(contex return _c } +// ListSourcesForS3TableIntegration provides a mock function with given fields: ctx, params, optFns +func (_m *CloudWatchLogs) ListSourcesForS3TableIntegration(ctx context.Context, params *cloudwatchlogs.ListSourcesForS3TableIntegrationInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.ListSourcesForS3TableIntegrationOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ListSourcesForS3TableIntegration") + } + + var r0 *cloudwatchlogs.ListSourcesForS3TableIntegrationOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.ListSourcesForS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.ListSourcesForS3TableIntegrationOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *cloudwatchlogs.ListSourcesForS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) *cloudwatchlogs.ListSourcesForS3TableIntegrationOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cloudwatchlogs.ListSourcesForS3TableIntegrationOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *cloudwatchlogs.ListSourcesForS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CloudWatchLogs_ListSourcesForS3TableIntegration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSourcesForS3TableIntegration' +type CloudWatchLogs_ListSourcesForS3TableIntegration_Call struct { + *mock.Call +} + +// ListSourcesForS3TableIntegration is a helper method to define mock.On call +// - ctx context.Context +// - params *cloudwatchlogs.ListSourcesForS3TableIntegrationInput +// - optFns ...func(*cloudwatchlogs.Options) +func (_e *CloudWatchLogs_Expecter) ListSourcesForS3TableIntegration(ctx interface{}, params interface{}, optFns ...interface{}) *CloudWatchLogs_ListSourcesForS3TableIntegration_Call { + return &CloudWatchLogs_ListSourcesForS3TableIntegration_Call{Call: _e.mock.On("ListSourcesForS3TableIntegration", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *CloudWatchLogs_ListSourcesForS3TableIntegration_Call) Run(run func(ctx context.Context, params *cloudwatchlogs.ListSourcesForS3TableIntegrationInput, optFns ...func(*cloudwatchlogs.Options))) *CloudWatchLogs_ListSourcesForS3TableIntegration_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*cloudwatchlogs.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*cloudwatchlogs.Options)) + } + } + run(args[0].(context.Context), args[1].(*cloudwatchlogs.ListSourcesForS3TableIntegrationInput), variadicArgs...) + }) + return _c +} + +func (_c *CloudWatchLogs_ListSourcesForS3TableIntegration_Call) Return(_a0 *cloudwatchlogs.ListSourcesForS3TableIntegrationOutput, _a1 error) *CloudWatchLogs_ListSourcesForS3TableIntegration_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CloudWatchLogs_ListSourcesForS3TableIntegration_Call) RunAndReturn(run func(context.Context, *cloudwatchlogs.ListSourcesForS3TableIntegrationInput, ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.ListSourcesForS3TableIntegrationOutput, error)) *CloudWatchLogs_ListSourcesForS3TableIntegration_Call { + _c.Call.Return(run) + return _c +} + // ListTagsForResource provides a mock function with given fields: ctx, params, optFns func (_m *CloudWatchLogs) ListTagsForResource(ctx context.Context, params *cloudwatchlogs.ListTagsForResourceInput, optFns ...func(*cloudwatchlogs.Options)) (*cloudwatchlogs.ListTagsForResourceOutput, error) { _va := make([]interface{}, len(optFns)) diff --git a/pkg/eks/mocksv2/EC2.go b/pkg/eks/mocksv2/EC2.go index 912ff2f6d9..13f97757b7 100644 --- a/pkg/eks/mocksv2/EC2.go +++ b/pkg/eks/mocksv2/EC2.go @@ -9050,6 +9050,154 @@ func (_c *EC2_CreateRouteTable_Call) RunAndReturn(run func(context.Context, *ec2 return _c } +// CreateSecondaryNetwork provides a mock function with given fields: ctx, params, optFns +func (_m *EC2) CreateSecondaryNetwork(ctx context.Context, params *ec2.CreateSecondaryNetworkInput, optFns ...func(*ec2.Options)) (*ec2.CreateSecondaryNetworkOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateSecondaryNetwork") + } + + var r0 *ec2.CreateSecondaryNetworkOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *ec2.CreateSecondaryNetworkInput, ...func(*ec2.Options)) (*ec2.CreateSecondaryNetworkOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *ec2.CreateSecondaryNetworkInput, ...func(*ec2.Options)) *ec2.CreateSecondaryNetworkOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ec2.CreateSecondaryNetworkOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *ec2.CreateSecondaryNetworkInput, ...func(*ec2.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EC2_CreateSecondaryNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateSecondaryNetwork' +type EC2_CreateSecondaryNetwork_Call struct { + *mock.Call +} + +// CreateSecondaryNetwork is a helper method to define mock.On call +// - ctx context.Context +// - params *ec2.CreateSecondaryNetworkInput +// - optFns ...func(*ec2.Options) +func (_e *EC2_Expecter) CreateSecondaryNetwork(ctx interface{}, params interface{}, optFns ...interface{}) *EC2_CreateSecondaryNetwork_Call { + return &EC2_CreateSecondaryNetwork_Call{Call: _e.mock.On("CreateSecondaryNetwork", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *EC2_CreateSecondaryNetwork_Call) Run(run func(ctx context.Context, params *ec2.CreateSecondaryNetworkInput, optFns ...func(*ec2.Options))) *EC2_CreateSecondaryNetwork_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*ec2.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*ec2.Options)) + } + } + run(args[0].(context.Context), args[1].(*ec2.CreateSecondaryNetworkInput), variadicArgs...) + }) + return _c +} + +func (_c *EC2_CreateSecondaryNetwork_Call) Return(_a0 *ec2.CreateSecondaryNetworkOutput, _a1 error) *EC2_CreateSecondaryNetwork_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EC2_CreateSecondaryNetwork_Call) RunAndReturn(run func(context.Context, *ec2.CreateSecondaryNetworkInput, ...func(*ec2.Options)) (*ec2.CreateSecondaryNetworkOutput, error)) *EC2_CreateSecondaryNetwork_Call { + _c.Call.Return(run) + return _c +} + +// CreateSecondarySubnet provides a mock function with given fields: ctx, params, optFns +func (_m *EC2) CreateSecondarySubnet(ctx context.Context, params *ec2.CreateSecondarySubnetInput, optFns ...func(*ec2.Options)) (*ec2.CreateSecondarySubnetOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateSecondarySubnet") + } + + var r0 *ec2.CreateSecondarySubnetOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *ec2.CreateSecondarySubnetInput, ...func(*ec2.Options)) (*ec2.CreateSecondarySubnetOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *ec2.CreateSecondarySubnetInput, ...func(*ec2.Options)) *ec2.CreateSecondarySubnetOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ec2.CreateSecondarySubnetOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *ec2.CreateSecondarySubnetInput, ...func(*ec2.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EC2_CreateSecondarySubnet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateSecondarySubnet' +type EC2_CreateSecondarySubnet_Call struct { + *mock.Call +} + +// CreateSecondarySubnet is a helper method to define mock.On call +// - ctx context.Context +// - params *ec2.CreateSecondarySubnetInput +// - optFns ...func(*ec2.Options) +func (_e *EC2_Expecter) CreateSecondarySubnet(ctx interface{}, params interface{}, optFns ...interface{}) *EC2_CreateSecondarySubnet_Call { + return &EC2_CreateSecondarySubnet_Call{Call: _e.mock.On("CreateSecondarySubnet", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *EC2_CreateSecondarySubnet_Call) Run(run func(ctx context.Context, params *ec2.CreateSecondarySubnetInput, optFns ...func(*ec2.Options))) *EC2_CreateSecondarySubnet_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*ec2.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*ec2.Options)) + } + } + run(args[0].(context.Context), args[1].(*ec2.CreateSecondarySubnetInput), variadicArgs...) + }) + return _c +} + +func (_c *EC2_CreateSecondarySubnet_Call) Return(_a0 *ec2.CreateSecondarySubnetOutput, _a1 error) *EC2_CreateSecondarySubnet_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EC2_CreateSecondarySubnet_Call) RunAndReturn(run func(context.Context, *ec2.CreateSecondarySubnetInput, ...func(*ec2.Options)) (*ec2.CreateSecondarySubnetOutput, error)) *EC2_CreateSecondarySubnet_Call { + _c.Call.Return(run) + return _c +} + // CreateSecurityGroup provides a mock function with given fields: ctx, params, optFns func (_m *EC2) CreateSecurityGroup(ctx context.Context, params *ec2.CreateSecurityGroupInput, optFns ...func(*ec2.Options)) (*ec2.CreateSecurityGroupOutput, error) { _va := make([]interface{}, len(optFns)) @@ -15858,6 +16006,154 @@ func (_c *EC2_DeleteRouteTable_Call) RunAndReturn(run func(context.Context, *ec2 return _c } +// DeleteSecondaryNetwork provides a mock function with given fields: ctx, params, optFns +func (_m *EC2) DeleteSecondaryNetwork(ctx context.Context, params *ec2.DeleteSecondaryNetworkInput, optFns ...func(*ec2.Options)) (*ec2.DeleteSecondaryNetworkOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteSecondaryNetwork") + } + + var r0 *ec2.DeleteSecondaryNetworkOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DeleteSecondaryNetworkInput, ...func(*ec2.Options)) (*ec2.DeleteSecondaryNetworkOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DeleteSecondaryNetworkInput, ...func(*ec2.Options)) *ec2.DeleteSecondaryNetworkOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ec2.DeleteSecondaryNetworkOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *ec2.DeleteSecondaryNetworkInput, ...func(*ec2.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EC2_DeleteSecondaryNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteSecondaryNetwork' +type EC2_DeleteSecondaryNetwork_Call struct { + *mock.Call +} + +// DeleteSecondaryNetwork is a helper method to define mock.On call +// - ctx context.Context +// - params *ec2.DeleteSecondaryNetworkInput +// - optFns ...func(*ec2.Options) +func (_e *EC2_Expecter) DeleteSecondaryNetwork(ctx interface{}, params interface{}, optFns ...interface{}) *EC2_DeleteSecondaryNetwork_Call { + return &EC2_DeleteSecondaryNetwork_Call{Call: _e.mock.On("DeleteSecondaryNetwork", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *EC2_DeleteSecondaryNetwork_Call) Run(run func(ctx context.Context, params *ec2.DeleteSecondaryNetworkInput, optFns ...func(*ec2.Options))) *EC2_DeleteSecondaryNetwork_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*ec2.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*ec2.Options)) + } + } + run(args[0].(context.Context), args[1].(*ec2.DeleteSecondaryNetworkInput), variadicArgs...) + }) + return _c +} + +func (_c *EC2_DeleteSecondaryNetwork_Call) Return(_a0 *ec2.DeleteSecondaryNetworkOutput, _a1 error) *EC2_DeleteSecondaryNetwork_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EC2_DeleteSecondaryNetwork_Call) RunAndReturn(run func(context.Context, *ec2.DeleteSecondaryNetworkInput, ...func(*ec2.Options)) (*ec2.DeleteSecondaryNetworkOutput, error)) *EC2_DeleteSecondaryNetwork_Call { + _c.Call.Return(run) + return _c +} + +// DeleteSecondarySubnet provides a mock function with given fields: ctx, params, optFns +func (_m *EC2) DeleteSecondarySubnet(ctx context.Context, params *ec2.DeleteSecondarySubnetInput, optFns ...func(*ec2.Options)) (*ec2.DeleteSecondarySubnetOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteSecondarySubnet") + } + + var r0 *ec2.DeleteSecondarySubnetOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DeleteSecondarySubnetInput, ...func(*ec2.Options)) (*ec2.DeleteSecondarySubnetOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DeleteSecondarySubnetInput, ...func(*ec2.Options)) *ec2.DeleteSecondarySubnetOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ec2.DeleteSecondarySubnetOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *ec2.DeleteSecondarySubnetInput, ...func(*ec2.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EC2_DeleteSecondarySubnet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteSecondarySubnet' +type EC2_DeleteSecondarySubnet_Call struct { + *mock.Call +} + +// DeleteSecondarySubnet is a helper method to define mock.On call +// - ctx context.Context +// - params *ec2.DeleteSecondarySubnetInput +// - optFns ...func(*ec2.Options) +func (_e *EC2_Expecter) DeleteSecondarySubnet(ctx interface{}, params interface{}, optFns ...interface{}) *EC2_DeleteSecondarySubnet_Call { + return &EC2_DeleteSecondarySubnet_Call{Call: _e.mock.On("DeleteSecondarySubnet", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *EC2_DeleteSecondarySubnet_Call) Run(run func(ctx context.Context, params *ec2.DeleteSecondarySubnetInput, optFns ...func(*ec2.Options))) *EC2_DeleteSecondarySubnet_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*ec2.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*ec2.Options)) + } + } + run(args[0].(context.Context), args[1].(*ec2.DeleteSecondarySubnetInput), variadicArgs...) + }) + return _c +} + +func (_c *EC2_DeleteSecondarySubnet_Call) Return(_a0 *ec2.DeleteSecondarySubnetOutput, _a1 error) *EC2_DeleteSecondarySubnet_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EC2_DeleteSecondarySubnet_Call) RunAndReturn(run func(context.Context, *ec2.DeleteSecondarySubnetInput, ...func(*ec2.Options)) (*ec2.DeleteSecondarySubnetOutput, error)) *EC2_DeleteSecondarySubnet_Call { + _c.Call.Return(run) + return _c +} + // DeleteSecurityGroup provides a mock function with given fields: ctx, params, optFns func (_m *EC2) DeleteSecurityGroup(ctx context.Context, params *ec2.DeleteSecurityGroupInput, optFns ...func(*ec2.Options)) (*ec2.DeleteSecurityGroupOutput, error) { _va := make([]interface{}, len(optFns)) @@ -28290,6 +28586,228 @@ func (_c *EC2_DescribeScheduledInstances_Call) RunAndReturn(run func(context.Con return _c } +// DescribeSecondaryInterfaces provides a mock function with given fields: ctx, params, optFns +func (_m *EC2) DescribeSecondaryInterfaces(ctx context.Context, params *ec2.DescribeSecondaryInterfacesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeSecondaryInterfacesOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DescribeSecondaryInterfaces") + } + + var r0 *ec2.DescribeSecondaryInterfacesOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DescribeSecondaryInterfacesInput, ...func(*ec2.Options)) (*ec2.DescribeSecondaryInterfacesOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DescribeSecondaryInterfacesInput, ...func(*ec2.Options)) *ec2.DescribeSecondaryInterfacesOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ec2.DescribeSecondaryInterfacesOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *ec2.DescribeSecondaryInterfacesInput, ...func(*ec2.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EC2_DescribeSecondaryInterfaces_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DescribeSecondaryInterfaces' +type EC2_DescribeSecondaryInterfaces_Call struct { + *mock.Call +} + +// DescribeSecondaryInterfaces is a helper method to define mock.On call +// - ctx context.Context +// - params *ec2.DescribeSecondaryInterfacesInput +// - optFns ...func(*ec2.Options) +func (_e *EC2_Expecter) DescribeSecondaryInterfaces(ctx interface{}, params interface{}, optFns ...interface{}) *EC2_DescribeSecondaryInterfaces_Call { + return &EC2_DescribeSecondaryInterfaces_Call{Call: _e.mock.On("DescribeSecondaryInterfaces", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *EC2_DescribeSecondaryInterfaces_Call) Run(run func(ctx context.Context, params *ec2.DescribeSecondaryInterfacesInput, optFns ...func(*ec2.Options))) *EC2_DescribeSecondaryInterfaces_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*ec2.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*ec2.Options)) + } + } + run(args[0].(context.Context), args[1].(*ec2.DescribeSecondaryInterfacesInput), variadicArgs...) + }) + return _c +} + +func (_c *EC2_DescribeSecondaryInterfaces_Call) Return(_a0 *ec2.DescribeSecondaryInterfacesOutput, _a1 error) *EC2_DescribeSecondaryInterfaces_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EC2_DescribeSecondaryInterfaces_Call) RunAndReturn(run func(context.Context, *ec2.DescribeSecondaryInterfacesInput, ...func(*ec2.Options)) (*ec2.DescribeSecondaryInterfacesOutput, error)) *EC2_DescribeSecondaryInterfaces_Call { + _c.Call.Return(run) + return _c +} + +// DescribeSecondaryNetworks provides a mock function with given fields: ctx, params, optFns +func (_m *EC2) DescribeSecondaryNetworks(ctx context.Context, params *ec2.DescribeSecondaryNetworksInput, optFns ...func(*ec2.Options)) (*ec2.DescribeSecondaryNetworksOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DescribeSecondaryNetworks") + } + + var r0 *ec2.DescribeSecondaryNetworksOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DescribeSecondaryNetworksInput, ...func(*ec2.Options)) (*ec2.DescribeSecondaryNetworksOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DescribeSecondaryNetworksInput, ...func(*ec2.Options)) *ec2.DescribeSecondaryNetworksOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ec2.DescribeSecondaryNetworksOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *ec2.DescribeSecondaryNetworksInput, ...func(*ec2.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EC2_DescribeSecondaryNetworks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DescribeSecondaryNetworks' +type EC2_DescribeSecondaryNetworks_Call struct { + *mock.Call +} + +// DescribeSecondaryNetworks is a helper method to define mock.On call +// - ctx context.Context +// - params *ec2.DescribeSecondaryNetworksInput +// - optFns ...func(*ec2.Options) +func (_e *EC2_Expecter) DescribeSecondaryNetworks(ctx interface{}, params interface{}, optFns ...interface{}) *EC2_DescribeSecondaryNetworks_Call { + return &EC2_DescribeSecondaryNetworks_Call{Call: _e.mock.On("DescribeSecondaryNetworks", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *EC2_DescribeSecondaryNetworks_Call) Run(run func(ctx context.Context, params *ec2.DescribeSecondaryNetworksInput, optFns ...func(*ec2.Options))) *EC2_DescribeSecondaryNetworks_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*ec2.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*ec2.Options)) + } + } + run(args[0].(context.Context), args[1].(*ec2.DescribeSecondaryNetworksInput), variadicArgs...) + }) + return _c +} + +func (_c *EC2_DescribeSecondaryNetworks_Call) Return(_a0 *ec2.DescribeSecondaryNetworksOutput, _a1 error) *EC2_DescribeSecondaryNetworks_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EC2_DescribeSecondaryNetworks_Call) RunAndReturn(run func(context.Context, *ec2.DescribeSecondaryNetworksInput, ...func(*ec2.Options)) (*ec2.DescribeSecondaryNetworksOutput, error)) *EC2_DescribeSecondaryNetworks_Call { + _c.Call.Return(run) + return _c +} + +// DescribeSecondarySubnets provides a mock function with given fields: ctx, params, optFns +func (_m *EC2) DescribeSecondarySubnets(ctx context.Context, params *ec2.DescribeSecondarySubnetsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeSecondarySubnetsOutput, error) { + _va := make([]interface{}, len(optFns)) + for _i := range optFns { + _va[_i] = optFns[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, params) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DescribeSecondarySubnets") + } + + var r0 *ec2.DescribeSecondarySubnetsOutput + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DescribeSecondarySubnetsInput, ...func(*ec2.Options)) (*ec2.DescribeSecondarySubnetsOutput, error)); ok { + return rf(ctx, params, optFns...) + } + if rf, ok := ret.Get(0).(func(context.Context, *ec2.DescribeSecondarySubnetsInput, ...func(*ec2.Options)) *ec2.DescribeSecondarySubnetsOutput); ok { + r0 = rf(ctx, params, optFns...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ec2.DescribeSecondarySubnetsOutput) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *ec2.DescribeSecondarySubnetsInput, ...func(*ec2.Options)) error); ok { + r1 = rf(ctx, params, optFns...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EC2_DescribeSecondarySubnets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DescribeSecondarySubnets' +type EC2_DescribeSecondarySubnets_Call struct { + *mock.Call +} + +// DescribeSecondarySubnets is a helper method to define mock.On call +// - ctx context.Context +// - params *ec2.DescribeSecondarySubnetsInput +// - optFns ...func(*ec2.Options) +func (_e *EC2_Expecter) DescribeSecondarySubnets(ctx interface{}, params interface{}, optFns ...interface{}) *EC2_DescribeSecondarySubnets_Call { + return &EC2_DescribeSecondarySubnets_Call{Call: _e.mock.On("DescribeSecondarySubnets", + append([]interface{}{ctx, params}, optFns...)...)} +} + +func (_c *EC2_DescribeSecondarySubnets_Call) Run(run func(ctx context.Context, params *ec2.DescribeSecondarySubnetsInput, optFns ...func(*ec2.Options))) *EC2_DescribeSecondarySubnets_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]func(*ec2.Options), len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(func(*ec2.Options)) + } + } + run(args[0].(context.Context), args[1].(*ec2.DescribeSecondarySubnetsInput), variadicArgs...) + }) + return _c +} + +func (_c *EC2_DescribeSecondarySubnets_Call) Return(_a0 *ec2.DescribeSecondarySubnetsOutput, _a1 error) *EC2_DescribeSecondarySubnets_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EC2_DescribeSecondarySubnets_Call) RunAndReturn(run func(context.Context, *ec2.DescribeSecondarySubnetsInput, ...func(*ec2.Options)) (*ec2.DescribeSecondarySubnetsOutput, error)) *EC2_DescribeSecondarySubnets_Call { + _c.Call.Return(run) + return _c +} + // DescribeSecurityGroupReferences provides a mock function with given fields: ctx, params, optFns func (_m *EC2) DescribeSecurityGroupReferences(ctx context.Context, params *ec2.DescribeSecurityGroupReferencesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeSecurityGroupReferencesOutput, error) { _va := make([]interface{}, len(optFns)) diff --git a/pkg/elb/cleanup.go b/pkg/elb/cleanup.go index 10d1b6b086..7fd31b0960 100644 --- a/pkg/elb/cleanup.go +++ b/pkg/elb/cleanup.go @@ -25,7 +25,9 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" cloudprovider "k8s.io/cloud-provider" + gatewayclient "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned" api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5" "github.com/weaveworks/eksctl/pkg/awsapi" @@ -63,9 +65,9 @@ type DescribeLoadBalancersAPIV2 interface { DescribeLoadBalancers(ctx context.Context, params *elasticloadbalancingv2.DescribeLoadBalancersInput, optFns ...func(*elasticloadbalancingv2.Options)) (*elasticloadbalancingv2.DescribeLoadBalancersOutput, error) } -// Cleanup finds and deletes any dangling ELBs associated to a Kubernetes Service +// Cleanup finds and deletes any dangling ELBs associated to a Kubernetes Service, Ingress, or Gateway func Cleanup(ctx context.Context, ec2API awsapi.EC2, elbAPI DescribeLoadBalancersAPI, elbv2API DescribeLoadBalancersAPIV2, - kubernetesCS kubernetes.Interface, clusterConfig *api.ClusterConfig) error { + kubernetesCS kubernetes.Interface, restConfig *rest.Config, clusterConfig *api.ClusterConfig) error { deadline, ok := ctx.Deadline() if !ok { @@ -90,6 +92,22 @@ func Cleanup(ctx context.Context, ec2API awsapi.EC2, elbAPI DescribeLoadBalancer return errors.New(errStr) } + // List Gateways only if restConfig is available (Gateway API cleanup requires gateway client) + var gateways []Gateway + var gwClient gatewayclient.Interface + if restConfig != nil { + gateways, gwClient, err = listGateway(ctx, restConfig) + if err != nil { + errStr := fmt.Sprintf("cannot list Kubernetes Gateways: %s", err) + if k8serrors.IsForbidden(err) { + errStr = fmt.Sprintf("%s (deleting a cluster requires permission to list Kubernetes Gateways)", errStr) + } + return errors.New(errStr) + } + } else { + logger.Debug("skipping Gateway API cleanup (restConfig not available)") + } + // Delete Services of type 'LoadBalancer' and Ingresses with IngressClass of alb // collecting their ELBs, NLBs and ALBs to wait for them to be deleted later on. awsLoadBalancers := map[string]loadBalancer{} @@ -143,6 +161,32 @@ func Cleanup(ctx context.Context, ec2API awsapi.EC2, elbAPI DescribeLoadBalancer return errors.New(errStr) } } + // For k8s Kind Gateway + for _, g := range gateways { + gatewayMetadata := g.GetMetadata() + + lb, err := getGatewayLoadBalancer(ctx, ec2API, elbAPI, elbv2API, gwClient, clusterConfig.Metadata.Name, g) + if err != nil { + return fmt.Errorf("cannot obtain information for load balancer from Gateway %s/%s: %w", + gatewayMetadata.Namespace, gatewayMetadata.Name, err) + } + if lb == nil { + continue + } + logger.Debug( + "tracking deletion of load balancer %s of kind %d with security groups %v", + lb.name, lb.kind, convertStringSetToSlice(lb.ownedSecurityGroupIDs), + ) + awsLoadBalancers[lb.name] = *lb + logger.Debug("deleting Gateway %s/%s", gatewayMetadata.Namespace, gatewayMetadata.Name) + if err := g.Delete(ctx, gwClient); err != nil { + errStr := fmt.Sprintf("cannot delete Kubernetes Gateway %s/%s: %s", gatewayMetadata.Namespace, gatewayMetadata.Name, err) + if k8serrors.IsForbidden(err) { + errStr = fmt.Sprintf("%s (deleting a cluster requires permission to delete Kubernetes Gateway)", errStr) + } + return errors.New(errStr) + } + } // Wait for all the load balancers backing the LoadBalancer services to disappear pollInterval := 2 * time.Second @@ -406,13 +450,22 @@ func getSecurityGroupsOwnedByLoadBalancer(ctx context.Context, ec2API awsapi.EC2 switch loadBalancerKind { case network: - // V2 ELBs just use the Security Group of the EC2 instances - return map[string]struct{}{}, nil + // NLBs can now have security groups attached + nlb, err := describeELBv2LoadBalancer(ctx, elbv2API, loadBalancerName) + if err != nil { + return nil, fmt.Errorf("cannot describe NLB: %w", err) + } + if nlb == nil { + // The load balancer wasn't found + return map[string]struct{}{}, nil + } + groupIDs = nlb.SecurityGroups + case application: - alb, err := describeApplicationLoadBalancer(ctx, elbv2API, loadBalancerName) + alb, err := describeELBv2LoadBalancer(ctx, elbv2API, loadBalancerName) if err != nil { - return nil, fmt.Errorf("cannot describe ELB: %w", err) + return nil, fmt.Errorf("cannot describe ALB: %w", err) } if alb == nil { // The load balancer wasn't found @@ -503,7 +556,7 @@ func elbExists(ctx context.Context, elbAPI DescribeLoadBalancersAPI, elbv2API De return desc != nil, err } -func describeApplicationLoadBalancer(ctx context.Context, elbv2API DescribeLoadBalancersAPIV2, +func describeELBv2LoadBalancer(ctx context.Context, elbv2API DescribeLoadBalancersAPIV2, name string) (*elbv2types.LoadBalancer, error) { response, err := elbv2API.DescribeLoadBalancers(ctx, &elasticloadbalancingv2.DescribeLoadBalancersInput{ diff --git a/pkg/elb/cleanup_test.go b/pkg/elb/cleanup_test.go index 127b84ef40..78473cb6f0 100644 --- a/pkg/elb/cleanup_test.go +++ b/pkg/elb/cleanup_test.go @@ -115,4 +115,45 @@ var _ = Describe("ELB Cleanup", func() { } }) }) + + When("Verifying security group cleanup works for Gateway API load balancers", func() { + It("should handle Gateway API load balancers the same as Ingress ALBs", func() { + // Gateway API ALBs use the same naming pattern as Ingress ALBs + + testCases := []struct { + description string + hostname string + expected string + }{ + { + description: "Gateway LB with standard format", + hostname: "k8s-default-mygateway-abc123-1234567890.us-west-2.elb.amazonaws.com", + expected: "k8s-default-mygateway-abc123", + }, + { + description: "Internal Gateway LB", + hostname: "internal-k8s-default-gateway-xyz789-987654321.us-west-2.elb.amazonaws.com", + expected: "k8s-default-gateway-xyz789", + }, + { + description: "Gateway LB with namespace and gateway name", + hostname: "k8s-prod-api-gw-hash1234-1111111111.eu-central-1.elb.amazonaws.com", + expected: "k8s-prod-api-gw-hash1234", + }, + } + + for _, tc := range testCases { + // Test that getGatewayLBName produces the expected name + name, err := getGatewayLBName([]string{tc.hostname}) + Expect(err).NotTo(HaveOccurred(), "Failed for: %s", tc.description) + Expect(name).To(Equal(tc.expected), "Failed for: %s", tc.description) + + // Verify the name is also compatible with getIngressELBName + // This demonstrates that Gateway and Ingress ALBs use the same naming pattern + ingressName, err := getIngressELBName([]string{tc.hostname}) + Expect(err).NotTo(HaveOccurred(), "Failed for: %s", tc.description) + Expect(ingressName).To(Equal(name), "Gateway and Ingress name parsing should produce identical results for: %s", tc.description) + } + }) + }) }) diff --git a/pkg/elb/gateway.go b/pkg/elb/gateway.go new file mode 100644 index 0000000000..0c8f8d6f63 --- /dev/null +++ b/pkg/elb/gateway.go @@ -0,0 +1,229 @@ +package elb + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/kris-nova/logger" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + gatewayclient "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned" + + "github.com/weaveworks/eksctl/pkg/awsapi" +) + +const ( + // AWS Load Balancer Controller names for Gateway API + awsLBCALBController = "gateway.k8s.aws/alb" + awsLBCNLBController = "gateway.k8s.aws/nlb" +) + +// Gateway interface abstracts over different Gateway API versions +type Gateway interface { + Delete(ctx context.Context, gwClient gatewayclient.Interface) error + GetGatewayClassName() string + GetMetadata() metav1.ObjectMeta + GetLoadBalancerAddresses() []string +} + +// v1Gateway wraps Gateway API v1 Gateway +type v1Gateway struct { + gateway gatewayv1.Gateway +} + +func (g *v1Gateway) Delete(ctx context.Context, gwClient gatewayclient.Interface) error { + return gwClient.GatewayV1().Gateways(g.gateway.Namespace). + Delete(ctx, g.gateway.Name, metav1.DeleteOptions{}) +} + +func (g *v1Gateway) GetGatewayClassName() string { + return string(g.gateway.Spec.GatewayClassName) +} + +func (g *v1Gateway) GetMetadata() metav1.ObjectMeta { + return g.gateway.ObjectMeta +} + +func (g *v1Gateway) GetLoadBalancerAddresses() []string { + var addresses []string + for _, addr := range g.gateway.Status.Addresses { + if addr.Type != nil && *addr.Type == gatewayv1.HostnameAddressType { + addresses = append(addresses, addr.Value) + } + } + return addresses +} + +// listGateway lists all Gateway resources across all namespaces using Gateway API v1 +// Returns an empty list if Gateway API CRDs are not installed (no error) +func listGateway(ctx context.Context, restConfig *rest.Config) ([]Gateway, gatewayclient.Interface, error) { + gwClient, err := gatewayclient.NewForConfig(restConfig) + if err != nil { + return nil, nil, fmt.Errorf("failed to create gateway client: %w", err) + } + + logger.Debug("using v1 Gateway API") + + gateways, err := gwClient.GatewayV1().Gateways(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) + if err != nil { + // Check if this is a "not found" error (CRDs not installed) + if isGatewayAPINotFoundErr(err) { + logger.Debug("Gateway API v1 CRDs not found, skipping Gateway cleanup") + return []Gateway{}, gwClient, nil + } + return nil, nil, err + } + + var gatewayList []Gateway + for i := range gateways.Items { + gatewayList = append(gatewayList, &v1Gateway{gateway: gateways.Items[i]}) + } + return gatewayList, gwClient, nil +} + +// isGatewayAPINotFoundErr checks if the error indicates Gateway API CRDs are not installed +func isGatewayAPINotFoundErr(err error) bool { + if err == nil { + return false + } + + // Check for NotFound status error + if k8serrors.IsNotFound(err) { + return true + } + + // Check for common error messages when CRDs are not installed + errStr := err.Error() + return strings.Contains(errStr, "not found") || + strings.Contains(errStr, "could not find the requested resource") || + strings.Contains(errStr, "no matches for kind") || + strings.Contains(errStr, "the server could not find the requested resource") +} + +// getGatewayClass retrieves a GatewayClass by name and returns its controller name +// Returns empty string if the GatewayClass is not found (no error) +func getGatewayClass(ctx context.Context, gwClient gatewayclient.Interface, gatewayClassName string) (string, error) { + if gatewayClassName == "" { + return "", nil + } + + gatewayClass, err := gwClient.GatewayV1().GatewayClasses().Get(ctx, gatewayClassName, metav1.GetOptions{}) + if err != nil { + // If GatewayClass not found, return empty string (no error) + if k8serrors.IsNotFound(err) { + logger.Debug("GatewayClass %q not found", gatewayClassName) + return "", nil + } + return "", fmt.Errorf("failed to get GatewayClass %q: %w", gatewayClassName, err) + } + return string(gatewayClass.Spec.ControllerName), nil +} + +// isAWSLoadBalancerController checks if the controller name matches AWS LBC patterns +func isAWSLoadBalancerController(controllerName string) bool { + return controllerName == awsLBCALBController || controllerName == awsLBCNLBController +} + +// getGatewayLBName parses the load balancer name from Gateway DNS addresses +// Gateway load balancers follow a similar naming pattern to Ingress ALBs: +// k8s---..elb.amazonaws.com +// The load balancer name is extracted by removing the region and domain suffix, and the hash suffix +func getGatewayLBName(addresses []string) (string, error) { + if len(addresses) == 0 { + return "", fmt.Errorf("no addresses provided") + } + + // Expected format: k8s-namespace-gateway-hash.region.elb.amazonaws.com + // or internal-k8s-namespace-gateway-hash.region.elb.amazonaws.com for internal load balancers + hostNameParts := strings.Split(addresses[0], ".") + if len(hostNameParts) == 0 || len(hostNameParts[0]) == 0 { + return "", fmt.Errorf("cannot get the hostname: %v", hostNameParts) + } + + name := strings.TrimPrefix(hostNameParts[0], "internal-") + + idIdx := strings.LastIndex(name, "-") + if idIdx != -1 { + name = name[:idIdx] + } + + // AWS load balancer names cannot exceed 32 characters + if len(name) > 32 { + return "", fmt.Errorf("parsed name exceeds maximum of 32 characters: %s", name) + } + + return name, nil +} + +// getGatewayLoadBalancer extracts load balancer information from a Gateway resource +// Returns nil if the Gateway is not managed by AWS LBC or has not been provisioned +func getGatewayLoadBalancer(ctx context.Context, ec2API awsapi.EC2, elbAPI DescribeLoadBalancersAPI, + elbv2API DescribeLoadBalancersAPIV2, gwClient gatewayclient.Interface, clusterName string, + gateway Gateway) (*loadBalancer, error) { + + metadata := gateway.GetMetadata() + gatewayClassName := gateway.GetGatewayClassName() + + // Get the GatewayClass to check if it's managed by AWS LBC + controllerName, err := getGatewayClass(ctx, gwClient, gatewayClassName) + if err != nil { + return nil, fmt.Errorf("cannot get GatewayClass %q: %w", gatewayClassName, err) + } + + // Skip Gateways not managed by AWS Load Balancer Controller + if !isAWSLoadBalancerController(controllerName) { + logger.Debug("Gateway %s/%s uses controller %q, not AWS LBC, skip", + metadata.Namespace, metadata.Name, controllerName) + return nil, nil + } + + // Check if the Gateway has been provisioned (status.addresses populated) + addresses := gateway.GetLoadBalancerAddresses() + if len(addresses) == 0 { + logger.Debug("Gateway %s/%s is managed by AWS LBC, but not provisioned yet, skip", + metadata.Namespace, metadata.Name) + return nil, nil + } + + // Parse the load balancer name from the DNS address + name, err := getGatewayLBName(addresses) + if err != nil { + logger.Debug("Gateway %s/%s is managed by AWS LBC, but cannot parse load balancer name, skip: %s", + metadata.Namespace, metadata.Name, err) + return nil, nil + } + + logger.Debug("Gateway load balancer resource name: %s", name) + + // Determine load balancer kind based on controller name + var kind loadBalancerKind + switch controllerName { + case awsLBCALBController: + kind = application + case awsLBCNLBController: + kind = network + default: + // This should not happen due to isAWSLoadBalancerController check above + return nil, fmt.Errorf("unexpected AWS LBC controller name: %s", controllerName) + } + + // Retrieve security groups owned by the load balancer + ctx, cleanup := context.WithTimeout(ctx, 30*time.Second) + defer cleanup() + securityGroupIDs, err := getSecurityGroupsOwnedByLoadBalancer(ctx, ec2API, elbAPI, elbv2API, clusterName, name, kind) + if err != nil { + return nil, fmt.Errorf("cannot obtain security groups for Gateway load balancer %s: %w", name, err) + } + + return &loadBalancer{ + name: name, + kind: kind, + ownedSecurityGroupIDs: securityGroupIDs, + }, nil +} diff --git a/pkg/elb/gateway_test.go b/pkg/elb/gateway_test.go new file mode 100644 index 0000000000..389e5943d9 --- /dev/null +++ b/pkg/elb/gateway_test.go @@ -0,0 +1,287 @@ +package elb + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + gatewayv1 "sigs.k8s.io/gateway-api/apis/v1" + gatewayfake "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned/fake" +) + +var _ = Describe("Gateway API", func() { + Describe("Gateway Interface", func() { + Describe("v1Gateway", func() { + It("should return correct gateway class name", func() { + className := gatewayv1.ObjectName("test-class") + gateway := &v1Gateway{ + gateway: gatewayv1.Gateway{ + Spec: gatewayv1.GatewaySpec{ + GatewayClassName: className, + }, + }, + } + Expect(gateway.GetGatewayClassName()).To(Equal("test-class")) + }) + + It("should return correct metadata", func() { + gateway := &v1Gateway{ + gateway: gatewayv1.Gateway{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-gateway", + Namespace: "default", + }, + }, + } + meta := gateway.GetMetadata() + Expect(meta.Name).To(Equal("test-gateway")) + Expect(meta.Namespace).To(Equal("default")) + }) + + It("should return load balancer addresses for hostname type", func() { + addrType := gatewayv1.HostnameAddressType + gateway := &v1Gateway{ + gateway: gatewayv1.Gateway{ + Status: gatewayv1.GatewayStatus{ + Addresses: []gatewayv1.GatewayStatusAddress{ + { + Type: &addrType, + Value: "test.elb.amazonaws.com", + }, + }, + }, + }, + } + addresses := gateway.GetLoadBalancerAddresses() + Expect(addresses).To(HaveLen(1)) + Expect(addresses[0]).To(Equal("test.elb.amazonaws.com")) + }) + + It("should filter out non-hostname address types", func() { + hostnameType := gatewayv1.HostnameAddressType + ipType := gatewayv1.IPAddressType + gateway := &v1Gateway{ + gateway: gatewayv1.Gateway{ + Status: gatewayv1.GatewayStatus{ + Addresses: []gatewayv1.GatewayStatusAddress{ + { + Type: &hostnameType, + Value: "test.elb.amazonaws.com", + }, + { + Type: &ipType, + Value: "192.168.1.1", + }, + }, + }, + }, + } + addresses := gateway.GetLoadBalancerAddresses() + Expect(addresses).To(HaveLen(1)) + Expect(addresses[0]).To(Equal("test.elb.amazonaws.com")) + }) + + It("should return empty slice when no hostname addresses exist", func() { + gateway := &v1Gateway{ + gateway: gatewayv1.Gateway{ + Status: gatewayv1.GatewayStatus{ + Addresses: []gatewayv1.GatewayStatusAddress{}, + }, + }, + } + addresses := gateway.GetLoadBalancerAddresses() + Expect(addresses).To(BeEmpty()) + }) + }) + }) + + Describe("listGateway", func() { + Context("graceful handling of missing CRDs", func() { + It("should detect CRD not found errors", func() { + testCases := []struct { + errorMsg string + expected bool + }{ + {"not found", true}, + {"could not find the requested resource", true}, + {"no matches for kind", true}, + {"the server could not find the requested resource", true}, + {"some other error", false}, + {"", false}, + } + + for _, tc := range testCases { + var err error + if tc.errorMsg != "" { + err = fmt.Errorf("%s", tc.errorMsg) + } + result := isGatewayAPINotFoundErr(err) + Expect(result).To(Equal(tc.expected), "Error message: %s", tc.errorMsg) + } + }) + + It("should return false for nil error", func() { + Expect(isGatewayAPINotFoundErr(nil)).To(BeFalse()) + }) + }) + }) + + Describe("getGatewayClass", func() { + var ( + ctx context.Context + gwClient *gatewayfake.Clientset + ) + + BeforeEach(func() { + ctx = context.Background() + }) + + It("should return AWS LBC ALB controller name", func() { + gatewayClass := &gatewayv1.GatewayClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "aws-alb", + }, + Spec: gatewayv1.GatewayClassSpec{ + ControllerName: awsLBCALBController, + }, + } + gwClient = gatewayfake.NewSimpleClientset(gatewayClass) + + controllerName, err := getGatewayClass(ctx, gwClient, "aws-alb") + Expect(err).NotTo(HaveOccurred()) + Expect(controllerName).To(Equal(awsLBCALBController)) + }) + + It("should return AWS LBC NLB controller name", func() { + gatewayClass := &gatewayv1.GatewayClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "aws-nlb", + }, + Spec: gatewayv1.GatewayClassSpec{ + ControllerName: awsLBCNLBController, + }, + } + gwClient = gatewayfake.NewSimpleClientset(gatewayClass) + + controllerName, err := getGatewayClass(ctx, gwClient, "aws-nlb") + Expect(err).NotTo(HaveOccurred()) + Expect(controllerName).To(Equal(awsLBCNLBController)) + }) + + It("should return non-AWS controller name", func() { + gatewayClass := &gatewayv1.GatewayClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "istio", + }, + Spec: gatewayv1.GatewayClassSpec{ + ControllerName: "istio.io/gateway-controller", + }, + } + gwClient = gatewayfake.NewSimpleClientset(gatewayClass) + + controllerName, err := getGatewayClass(ctx, gwClient, "istio") + Expect(err).NotTo(HaveOccurred()) + Expect(controllerName).To(Equal("istio.io/gateway-controller")) + }) + + It("should return empty string when GatewayClass is not found", func() { + gwClient = gatewayfake.NewSimpleClientset() + + controllerName, err := getGatewayClass(ctx, gwClient, "non-existent") + Expect(err).NotTo(HaveOccurred()) + Expect(controllerName).To(BeEmpty()) + }) + + It("should return empty string when gatewayClassName is empty", func() { + gwClient = gatewayfake.NewSimpleClientset() + + controllerName, err := getGatewayClass(ctx, gwClient, "") + Expect(err).NotTo(HaveOccurred()) + Expect(controllerName).To(BeEmpty()) + }) + }) + + Describe("isAWSLoadBalancerController", func() { + It("should return true for AWS LBC ALB controller", func() { + Expect(isAWSLoadBalancerController(awsLBCALBController)).To(BeTrue()) + }) + + It("should return true for AWS LBC NLB controller", func() { + Expect(isAWSLoadBalancerController(awsLBCNLBController)).To(BeTrue()) + }) + + It("should return false for non-AWS controllers", func() { + testCases := []string{ + "istio.io/gateway-controller", + "nginx.org/gateway-controller", + "traefik.io/gateway-controller", + "kong.io/gateway-controller", + "", + "gateway.k8s.aws/other", + "gateway.k8s.aws", + } + + for _, controllerName := range testCases { + Expect(isAWSLoadBalancerController(controllerName)).To(BeFalse(), + "Controller name: %s should not be identified as AWS LBC", controllerName) + } + }) + }) + + Describe("getGatewayLBName", func() { + Context("valid Gateway DNS names", func() { + It("should parse external ALB DNS name", func() { + addresses := []string{"k8s-default-testgw-abc123.us-west-2.elb.amazonaws.com"} + name, err := getGatewayLBName(addresses) + Expect(err).NotTo(HaveOccurred()) + Expect(name).To(Equal("k8s-default-testgw")) + }) + + It("should parse internal ALB DNS name", func() { + addresses := []string{"internal-k8s-default-testgw-xyz789.eu-central-1.elb.amazonaws.com"} + name, err := getGatewayLBName(addresses) + Expect(err).NotTo(HaveOccurred()) + Expect(name).To(Equal("k8s-default-testgw")) + }) + + It("should parse NLB DNS name", func() { + addresses := []string{"k8s-kube-system-gateway-def456.us-east-1.elb.amazonaws.com"} + name, err := getGatewayLBName(addresses) + Expect(err).NotTo(HaveOccurred()) + Expect(name).To(Equal("k8s-kube-system-gateway")) + }) + + It("should parse internal NLB DNS name", func() { + addresses := []string{"internal-k8s-prod-api-gw-hash123.ap-southeast-1.elb.amazonaws.com"} + name, err := getGatewayLBName(addresses) + Expect(err).NotTo(HaveOccurred()) + Expect(name).To(Equal("k8s-prod-api-gw")) + }) + }) + + Context("error cases", func() { + It("should return error for empty addresses", func() { + _, err := getGatewayLBName([]string{}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no addresses provided")) + }) + + It("should return error for invalid format", func() { + _, err := getGatewayLBName([]string{""}) + Expect(err).To(HaveOccurred()) + }) + + It("should return error for name exceeding 32 characters", func() { + // Create a very long hostname that would result in >32 char name + longName := "k8s-verylongnamespace-verylonggatewayname-hash123.us-west-2.elb.amazonaws.com" + _, err := getGatewayLBName([]string{longName}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("exceeds maximum of 32 characters")) + }) + }) + }) +}) diff --git a/pkg/kubernetes/client.go b/pkg/kubernetes/client.go index d4a192150b..c86db5a3d1 100644 --- a/pkg/kubernetes/client.go +++ b/pkg/kubernetes/client.go @@ -162,6 +162,9 @@ func (c *RawClient) ServerVersion() (string, error) { // ClientSet returns the underlying ClientSet func (c *RawClient) ClientSet() Interface { return c.clientSet } +// RestConfig returns the underlying REST config +func (c *RawClient) RestConfig() *restclient.Config { return c.config } + // NewHelperFor construct a raw client helper instance for a give gvk // (it's based on k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go) func (c *RawClient) NewHelperFor(gvk schema.GroupVersionKind) (*resource.Helper, error) {