Skip to content

Commit ee4d2d4

Browse files
committed
feat(build): replace kustomize by helm
1 parent 560737a commit ee4d2d4

File tree

3 files changed

+45
-193
lines changed

3 files changed

+45
-193
lines changed

Makefile

Lines changed: 33 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,6 @@ vet: ## Run go vet against code.
6161
test: manifests generate fmt vet setup-envtest ## Run tests.
6262
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -v $$(go list ./... | grep -v /e2e) -coverprofile cover.out
6363

64-
# TODO(user): To use a different vendor for e2e tests, modify the setup under 'tests/e2e'.
65-
# The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally.
66-
# CertManager is installed by default; skip with:
67-
# - CERT_MANAGER_INSTALL_SKIP=true
6864
.PHONY: test-e2e
6965
test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind.
7066
@command -v kind >/dev/null 2>&1 || { \
@@ -75,7 +71,20 @@ test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated
7571
echo "No Kind cluster is running. Please start a Kind cluster before running the e2e tests."; \
7672
exit 1; \
7773
}
78-
go test ./test/e2e/ -v -ginkgo.v
74+
CERT_MANAGER_INSTALL_SKIP=true go test ./test/e2e/ -v -ginkgo.v
75+
76+
.PHONY: e2e-deploy
77+
e2e-deploy: manifests helm
78+
$(HELM) template -n managed-postgres-operator-system managed-postgres-operator-controller-manager deploy/charts/managed-postgres-operator \
79+
--set podLabels.control-plane=controller-manager \
80+
--set image.repository=$(firstword $(subst :, ,$(IMG))) \
81+
--set image.tag=$(lastword $(subst :, ,$(IMG))) \
82+
| $(KUBECTL) apply -n managed-postgres-operator-system -f -
83+
84+
.PHONY: e2e-undeploy
85+
e2e-undeploy: manifests helm
86+
$(HELM) template -n managed-postgres-operator-system managed-postgres-operator \
87+
| $(KUBECTL) delete -n managed-postgres-operator-system -f -
7988

8089
.PHONY: lint
8190
lint: golangci-lint ## Run golangci-lint linter
@@ -104,11 +113,11 @@ run: manifests generate fmt vet ## Run a controller from your host.
104113
# More info: https://docs.docker.com/develop/develop-images/build_enhancements/
105114
.PHONY: docker-build
106115
docker-build: ## Build docker image with the manager.
107-
$(CONTAINER_TOOL) build -t ${IMG} .
116+
$(CONTAINER_TOOL) build -t $(IMG) .
108117

109118
.PHONY: docker-push
110119
docker-push: ## Push docker image with the manager.
111-
$(CONTAINER_TOOL) push ${IMG}
120+
$(CONTAINER_TOOL) push $(IMG)
112121

113122
# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple
114123
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
@@ -123,38 +132,33 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform
123132
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
124133
- $(CONTAINER_TOOL) buildx create --name managed-postgres-operator-builder
125134
$(CONTAINER_TOOL) buildx use managed-postgres-operator-builder
126-
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
135+
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag $(IMG) -f Dockerfile.cross .
127136
- $(CONTAINER_TOOL) buildx rm managed-postgres-operator-builder
128137
rm Dockerfile.cross
129138

130-
.PHONY: build-installer
131-
build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
132-
mkdir -p dist
133-
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
134-
$(KUSTOMIZE) build config/default > dist/install.yaml
135-
136139
##@ Deployment
137140

138141
ifndef ignore-not-found
139142
ignore-not-found = false
140143
endif
141144

142145
.PHONY: install
143-
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
144-
$(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f -
146+
install: manifests ## Install CRDs into the K8s cluster specified in ~/.kube/config.
147+
$(KUBECTL) apply -f deploy/crds/
145148

146149
.PHONY: uninstall
147-
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
148-
$(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
150+
uninstall: manifests ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
151+
$(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f deploy/crds/
149152

150153
.PHONY: deploy
151-
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
152-
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
153-
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
154+
deploy: manifests helm ## Deploy controller to the K8s cluster specified in ~/.kube/config.
155+
$(HELM) install managed-postgres-operator-controller-manager deploy/charts/managed-postgres-operator \
156+
| $(KUBECTL) apply -n managed-postgres-operator-system -f -
154157

155158
.PHONY: undeploy
156-
undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
157-
$(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
159+
undeploy: helm ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
160+
$(HELM) template managed-postgres-operator deploy/charts/managed-postgres-operator \
161+
| $(KUBECTL) delete -n managed-postgres-operator-system -f -
158162

159163
##@ Dependencies
160164

@@ -165,24 +169,24 @@ $(LOCALBIN):
165169

166170
## Tool Binaries
167171
KUBECTL ?= kubectl
168-
KUSTOMIZE ?= $(LOCALBIN)/kustomize
172+
HELM ?= $(LOCALBIN)/helm
169173
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
170174
ENVTEST ?= $(LOCALBIN)/setup-envtest
171175
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
172176

173177
## Tool Versions
174-
KUSTOMIZE_VERSION ?= v5.5.0
178+
HELM_VERSION ?= v4.0.0
175179
CONTROLLER_TOOLS_VERSION ?= v0.17.2
176180
#ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20)
177181
ENVTEST_VERSION ?= $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}')
178182
#ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31)
179183
ENVTEST_K8S_VERSION ?= $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}')
180184
GOLANGCI_LINT_VERSION ?= v1.63.4
181185

182-
.PHONY: kustomize
183-
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
184-
$(KUSTOMIZE): $(LOCALBIN)
185-
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
186+
.PHONY: helm
187+
helm: $(HELM) ## Download helm locally if necessary.
188+
$(HELM): $(LOCALBIN)
189+
$(call go-install-tool,$(HELM),helm.sh/helm/v4/cmd/helm,$(HELM_VERSION))
186190

187191
.PHONY: controller-gen
188192
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.

deploy/charts/managed-postgres-operator/values.yaml

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,16 @@ serviceAccount:
2222
podAnnotations: {}
2323
podLabels: {}
2424

25-
podSecurityContext: {}
26-
27-
securityContext: {}
25+
podSecurityContext:
26+
runAsNonRoot: true
27+
runAsUser: 1000
28+
seccompProfile:
29+
type: RuntimeDefault
30+
31+
securityContext:
32+
allowPrivilegeEscalation: false
33+
capabilities:
34+
drop: ["ALL"]
2835

2936
resources: {}
3037

test/e2e/e2e_test.go

Lines changed: 2 additions & 161 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,8 @@ limitations under the License.
1717
package e2e
1818

1919
import (
20-
"encoding/json"
2120
"fmt"
22-
"os"
2321
"os/exec"
24-
"path/filepath"
2522
"time"
2623

2724
. "github.com/onsi/ginkgo/v2"
@@ -66,7 +63,7 @@ var _ = Describe("Manager", Ordered, func() {
6663
Expect(err).NotTo(HaveOccurred(), "Failed to install CRDs")
6764

6865
By("deploying the controller-manager")
69-
cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage))
66+
cmd = exec.Command("make", "e2e-deploy", fmt.Sprintf("IMG=%s", projectImage))
7067
_, err = utils.Run(cmd)
7168
Expect(err).NotTo(HaveOccurred(), "Failed to deploy the controller-manager")
7269
})
@@ -79,7 +76,7 @@ var _ = Describe("Manager", Ordered, func() {
7976
_, _ = utils.Run(cmd)
8077

8178
By("undeploying the controller-manager")
82-
cmd = exec.Command("make", "undeploy")
79+
cmd = exec.Command("make", "e2e-undeploy")
8380
_, _ = utils.Run(cmd)
8481

8582
By("uninstalling CRDs")
@@ -169,161 +166,5 @@ var _ = Describe("Manager", Ordered, func() {
169166
}
170167
Eventually(verifyControllerUp).Should(Succeed())
171168
})
172-
173-
It("should ensure the metrics endpoint is serving metrics", func() {
174-
By("creating a ClusterRoleBinding for the service account to allow access to metrics")
175-
cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName,
176-
"--clusterrole=managed-postgres-operator-metrics-reader",
177-
fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName),
178-
)
179-
_, err := utils.Run(cmd)
180-
Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding")
181-
182-
By("validating that the metrics service is available")
183-
cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace)
184-
_, err = utils.Run(cmd)
185-
Expect(err).NotTo(HaveOccurred(), "Metrics service should exist")
186-
187-
By("getting the service account token")
188-
token, err := serviceAccountToken()
189-
Expect(err).NotTo(HaveOccurred())
190-
Expect(token).NotTo(BeEmpty())
191-
192-
By("waiting for the metrics endpoint to be ready")
193-
verifyMetricsEndpointReady := func(g Gomega) {
194-
cmd := exec.Command("kubectl", "get", "endpoints", metricsServiceName, "-n", namespace)
195-
output, err := utils.Run(cmd)
196-
g.Expect(err).NotTo(HaveOccurred())
197-
g.Expect(output).To(ContainSubstring("8443"), "Metrics endpoint is not ready")
198-
}
199-
Eventually(verifyMetricsEndpointReady).Should(Succeed())
200-
201-
By("verifying that the controller manager is serving the metrics server")
202-
verifyMetricsServerStarted := func(g Gomega) {
203-
cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace)
204-
output, err := utils.Run(cmd)
205-
g.Expect(err).NotTo(HaveOccurred())
206-
g.Expect(output).To(ContainSubstring("controller-runtime.metrics\tServing metrics server"),
207-
"Metrics server not yet started")
208-
}
209-
Eventually(verifyMetricsServerStarted).Should(Succeed())
210-
211-
By("creating the curl-metrics pod to access the metrics endpoint")
212-
cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never",
213-
"--namespace", namespace,
214-
"--image=curlimages/curl:latest",
215-
"--overrides",
216-
fmt.Sprintf(`{
217-
"spec": {
218-
"containers": [{
219-
"name": "curl",
220-
"image": "curlimages/curl:latest",
221-
"command": ["/bin/sh", "-c"],
222-
"args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"],
223-
"securityContext": {
224-
"allowPrivilegeEscalation": false,
225-
"capabilities": {
226-
"drop": ["ALL"]
227-
},
228-
"runAsNonRoot": true,
229-
"runAsUser": 1000,
230-
"seccompProfile": {
231-
"type": "RuntimeDefault"
232-
}
233-
}
234-
}],
235-
"serviceAccount": "%s"
236-
}
237-
}`, token, metricsServiceName, namespace, serviceAccountName))
238-
_, err = utils.Run(cmd)
239-
Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod")
240-
241-
By("waiting for the curl-metrics pod to complete.")
242-
verifyCurlUp := func(g Gomega) {
243-
cmd := exec.Command("kubectl", "get", "pods", "curl-metrics",
244-
"-o", "jsonpath={.status.phase}",
245-
"-n", namespace)
246-
output, err := utils.Run(cmd)
247-
g.Expect(err).NotTo(HaveOccurred())
248-
g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status")
249-
}
250-
Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed())
251-
252-
By("getting the metrics by checking curl-metrics logs")
253-
metricsOutput := getMetricsOutput()
254-
Expect(metricsOutput).To(ContainSubstring(
255-
"controller_runtime_reconcile_total",
256-
))
257-
})
258-
259-
// +kubebuilder:scaffold:e2e-webhooks-checks
260-
261-
// TODO: Customize the e2e test suite with scenarios specific to your project.
262-
// Consider applying sample/CR(s) and check their status and/or verifying
263-
// the reconciliation by using the metrics, i.e.:
264-
// metricsOutput := getMetricsOutput()
265-
// Expect(metricsOutput).To(ContainSubstring(
266-
// fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`,
267-
// strings.ToLower(<Kind>),
268-
// ))
269169
})
270170
})
271-
272-
// serviceAccountToken returns a token for the specified service account in the given namespace.
273-
// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request
274-
// and parsing the resulting token from the API response.
275-
func serviceAccountToken() (string, error) {
276-
const tokenRequestRawString = `{
277-
"apiVersion": "authentication.k8s.io/v1",
278-
"kind": "TokenRequest"
279-
}`
280-
281-
// Temporary file to store the token request
282-
secretName := fmt.Sprintf("%s-token-request", serviceAccountName)
283-
tokenRequestFile := filepath.Join("/tmp", secretName)
284-
err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644))
285-
if err != nil {
286-
return "", err
287-
}
288-
289-
var out string
290-
verifyTokenCreation := func(g Gomega) {
291-
// Execute kubectl command to create the token
292-
cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf(
293-
"/api/v1/namespaces/%s/serviceaccounts/%s/token",
294-
namespace,
295-
serviceAccountName,
296-
), "-f", tokenRequestFile)
297-
298-
output, err := cmd.CombinedOutput()
299-
g.Expect(err).NotTo(HaveOccurred())
300-
301-
// Parse the JSON output to extract the token
302-
var token tokenRequest
303-
err = json.Unmarshal(output, &token)
304-
g.Expect(err).NotTo(HaveOccurred())
305-
306-
out = token.Status.Token
307-
}
308-
Eventually(verifyTokenCreation).Should(Succeed())
309-
310-
return out, err
311-
}
312-
313-
// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint.
314-
func getMetricsOutput() string {
315-
By("getting the curl-metrics logs")
316-
cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace)
317-
metricsOutput, err := utils.Run(cmd)
318-
Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod")
319-
Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK"))
320-
return metricsOutput
321-
}
322-
323-
// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response,
324-
// containing only the token field that we need to extract.
325-
type tokenRequest struct {
326-
Status struct {
327-
Token string `json:"token"`
328-
} `json:"status"`
329-
}

0 commit comments

Comments
 (0)