diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index fdf26617a18..338531881a2 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -72,7 +72,7 @@ jobs: environment: deploy strategy: matrix: - id: ["operator-sdk", "helm-operator", "scorecard-test", "ansible-operator", "ansible-operator-2.11-preview", "scorecard-storage", "scorecard-untar"] + id: ["operator-sdk", "helm-operator", "scorecard-test", "ansible-operator", "ansible-operator-2.11-preview"] steps: - name: set up qemu diff --git a/.github/workflows/test-ansible.yml b/.github/workflows/test-ansible.yml index e0becd5efe0..c712a38f8a8 100644 --- a/.github/workflows/test-ansible.yml +++ b/.github/workflows/test-ansible.yml @@ -48,10 +48,12 @@ jobs: with: fetch-depth: 0 - run: sudo rm -rf /usr/local/bin/kustomize - - run: | + - uses: actions/setup-python@v4 + with: + python-version: '3.8' + - name: Run test e2e ansible molecule + run: | env - export PATH=/opt/python/3.8.12/bin:${PATH} - sudo apt-get install python3 python3-pip - sudo pip3 install --upgrade setuptools pip - sudo pip3 install ansible~=2.9.13 + pip3 install --user --upgrade setuptools pip + pip3 install --user ansible~=2.9.13 make test-e2e-ansible-molecule diff --git a/.golangci.yml b/.golangci.yml index 05de4e55484..53c9dc6c40d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,6 +5,7 @@ linters: - nakedret - misspell - ineffassign + - ginkgolinter - goconst - goimports - errcheck diff --git a/Makefile b/Makefile index ff8e52c96be..cdeffaad6f0 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ SHELL = /bin/bash # This value must be updated to the release tag of the most recent release, a change that must # occur in the release commit. IMAGE_VERSION will be removed once each subproject that uses this # version is moved to a separate repo and release process. -export IMAGE_VERSION = v1.28.0 +export IMAGE_VERSION = v1.29.0 # Build-time variables to inject into binaries export SIMPLE_VERSION = $(shell (test "$(shell git describe --tags)" = "$(shell git describe --tags --abbrev=0)" && echo $(shell git describe --tags)) || echo $(shell git describe --tags --abbrev=0)+git) export GIT_VERSION = $(shell git describe --dirty --tags --always) @@ -92,7 +92,7 @@ build/scorecard-test build/scorecard-test-kuttl build/custom-scorecard-tests: # Convenience wrapper for building all remotely hosted images. .PHONY: image-build -IMAGE_TARGET_LIST = operator-sdk helm-operator ansible-operator ansible-operator-2.11-preview scorecard-test scorecard-test-kuttl scorecard-untar scorecard-storage +IMAGE_TARGET_LIST = operator-sdk helm-operator ansible-operator ansible-operator-2.11-preview scorecard-test scorecard-test-kuttl image-build: $(foreach i,$(IMAGE_TARGET_LIST),image/$(i)) ## Build all images. # Convenience wrapper for building dependency base images. diff --git a/changelog/fragments/10-mark-unsafe.yaml b/changelog/fragments/10-mark-unsafe.yaml new file mode 100644 index 00000000000..1be431ff0a0 --- /dev/null +++ b/changelog/fragments/10-mark-unsafe.yaml @@ -0,0 +1,25 @@ +# entries is a list of entries to include in +# release notes and/or the migration guide +entries: + - description: > + markUnsafe now correctly marks as unsafe the spec extra variable. + + # kind is one of: + # - addition + # - change + # - deprecation + # - removal + # - bugfix + kind: "bugfix" + + # Is this a breaking change? + breaking: false + + # NOTE: ONLY USE `pull_request_override` WHEN ADDING THIS + # FILE FOR A PREVIOUSLY MERGED PULL_REQUEST! + # + # The generator auto-detects the PR number from the commit + # message in which this file was originally added. + # + # What is the pull request number (without the "#")? + # pull_request_override: 0 diff --git a/changelog/generated/v1.29.0.md b/changelog/generated/v1.29.0.md new file mode 100644 index 00000000000..ec8cd0c2e47 --- /dev/null +++ b/changelog/generated/v1.29.0.md @@ -0,0 +1,13 @@ +## v1.29.0 + +### Changes + +- (scorecard): Update kuttl to v0.15.0 in the scorecard-test-kuttl image. ([#6401](https://github.com/operator-framework/operator-sdk/pull/6401)) +- (ansible/v1): Bump the golang base image version in the ansible-operator Dockerfiles from 1.18 to 1.19. ([#6398](https://github.com/operator-framework/operator-sdk/pull/6398)) +- (operator-sdk run bundle): Compress the bundle content, to avoid the configMap exceed max length error. The error will look like this: +`... ConfigMap ... is invalid: []: Too long: must have at most 1048576 bytes`. +Fixes issue [#6323](https://github.com/operator-framework/operator-sdk/issues/6323). ([#6408](https://github.com/operator-framework/operator-sdk/pull/6408)) + +### Bug Fixes + +- (docs): Update the go version in the developer guide. The documentation wasn't updated when the go version was bumped to v1.19. ([#6101](https://github.com/operator-framework/operator-sdk/pull/6101)) diff --git a/go.mod b/go.mod index a5660d7a405..2313248b685 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/onsi/ginkgo/v2 v2.7.0 github.com/onsi/gomega v1.24.2 github.com/operator-framework/api v0.17.4-0.20230223191600-0131a6301e42 - github.com/operator-framework/helm-operator-plugins v0.0.12-0.20230307164205-6e30bde28688 + github.com/operator-framework/helm-operator-plugins v0.0.12-0.20230413193425-4632388adc61 github.com/operator-framework/java-operator-plugins v0.7.1-0.20230306190439-0eed476d2b75 github.com/operator-framework/operator-lib v0.11.1-0.20230306195046-28cadc6b6055 github.com/operator-framework/operator-manifest-tools v0.2.3-0.20230227155221-caa8b9e1ab12 diff --git a/go.sum b/go.sum index 88cae94217d..48871937300 100644 --- a/go.sum +++ b/go.sum @@ -807,8 +807,8 @@ github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xA github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/operator-framework/api v0.17.4-0.20230223191600-0131a6301e42 h1:d/Pnr19TnmIq3zQ6ebewC+5jt5zqYbRkvYd37YZENQY= github.com/operator-framework/api v0.17.4-0.20230223191600-0131a6301e42/go.mod h1:l/cuwtPxkVUY7fzYgdust2m9tlmb8I4pOvbsUufRb24= -github.com/operator-framework/helm-operator-plugins v0.0.12-0.20230307164205-6e30bde28688 h1:cT4lGfZjYupUkxzieyfC/7RadF7Vk1FybgWCnViutnM= -github.com/operator-framework/helm-operator-plugins v0.0.12-0.20230307164205-6e30bde28688/go.mod h1:QpVyiSOKGbWADyNRl7LvMlRuuMGrWXJQdEYyHPQWMUg= +github.com/operator-framework/helm-operator-plugins v0.0.12-0.20230413193425-4632388adc61 h1:FPO2hS4HNIU2pzWeX2KusKxqDFeGIURRMkxRtn/i570= +github.com/operator-framework/helm-operator-plugins v0.0.12-0.20230413193425-4632388adc61/go.mod h1:QpVyiSOKGbWADyNRl7LvMlRuuMGrWXJQdEYyHPQWMUg= github.com/operator-framework/java-operator-plugins v0.7.1-0.20230306190439-0eed476d2b75 h1:mjMid39qs1lEXpIldVmj7sa1wtuZvYge8oHkT0qOY0Y= github.com/operator-framework/java-operator-plugins v0.7.1-0.20230306190439-0eed476d2b75/go.mod h1:oQTt35EEUrDY8ca/kRWYz5omWsVhk9Sj78vKlHFqxjM= github.com/operator-framework/operator-lib v0.11.1-0.20230306195046-28cadc6b6055 h1:G9N8wEf9qDZ/4Fj5cbIejKUoFOYta0v72Yg8tPAdvc0= diff --git a/hack/check-links.sh b/hack/check-links.sh index 83651e8ee51..b7b5a84824e 100755 --- a/hack/check-links.sh +++ b/hack/check-links.sh @@ -16,4 +16,4 @@ docker run --rm -v sdk-html:/target klakegg/html-proofer:3.18.8 /target \ --http-status-ignore 429 \ --allow_hash_href \ --typhoeus '{"followlocation":true,"connecttimeout":600,"timeout":600}' \ - --url-ignore "/github.com\/operator-framework\/operator-sdk\/edit\/master\//,https://docs.github.com/en/get-started/quickstart/fork-a-repo" + --url-ignore "/github.com\/operator-framework\/operator-sdk\/edit\/master\//,https://docs.github.com/en/get-started/quickstart/fork-a-repo,https://github.com/operator-framework/operator-sdk/settings/access" diff --git a/hack/generate/samples/internal/go/memcached-with-customization/memcached_with_customization.go b/hack/generate/samples/internal/go/memcached-with-customization/memcached_with_customization.go index 69c48546f94..03637208f86 100644 --- a/hack/generate/samples/internal/go/memcached-with-customization/memcached_with_customization.go +++ b/hack/generate/samples/internal/go/memcached-with-customization/memcached_with_customization.go @@ -144,6 +144,8 @@ func (mh *Memcached) Run() { mh.uncommentManifestsKustomizationv3() } + mh.customizingMain() + mh.implementingE2ETests() cmd := exec.Command("go", "mod", "tidy") @@ -480,7 +482,7 @@ func (mh *Memcached) implementingMonitoring() { mh.customizingController() log.Infof("customizing Main") - mh.customizingMain() + mh.customizingMainMonitoring() log.Infof("customizing Dockerfile") mh.customizingDockerfile() @@ -784,10 +786,26 @@ func (mh *Memcached) customizingController() { pkg.CheckError("adding metric incrementation", err) } -// customizingMain will customize main.go to register metrics +// customizingMain will add comments to main func (mh *Memcached) customizingMain() { var mainPath string + if mh.isV3() { + mainPath = filepath.Join(mh.ctx.Dir, "main.go") + } else { + mainPath = filepath.Join(mh.ctx.Dir, "cmd", "main.go") + } + + err := kbutil.InsertCode(mainPath, + "Scheme: mgr.GetScheme(),", + mainRecorderFragment) + pkg.CheckError("adding recorder fragment", err) +} + +// customizingMainMonitoring will customize main.go to register metrics +func (mh *Memcached) customizingMainMonitoring() { + var mainPath string + marker := "\"github.com/example/memcached-operator/" if mh.isV3() { mainPath = filepath.Join(mh.ctx.Dir, "main.go") @@ -1385,6 +1403,10 @@ const controllerPrometheusRuleFragment = ` memcached := &cachev1alpha1.Memcached{} err = r.Get(ctx, req.NamespacedName, memcached)` +const mainRecorderFragment = ` +// Add a Recorder to the reconciler. +// This allows the operator author to emit events during reconcilliation.` + const monitoringv1ImportFragment = ` monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" diff --git a/images/ansible-operator-2.11-preview/Dockerfile b/images/ansible-operator-2.11-preview/Dockerfile index bb7daa368b0..c59d8925530 100644 --- a/images/ansible-operator-2.11-preview/Dockerfile +++ b/images/ansible-operator-2.11-preview/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM --platform=$BUILDPLATFORM golang:1.18 as builder +FROM --platform=$BUILDPLATFORM golang:1.19 as builder ARG TARGETARCH WORKDIR /workspace diff --git a/images/ansible-operator/Dockerfile b/images/ansible-operator/Dockerfile index e8c1898567f..0ad97ad9375 100644 --- a/images/ansible-operator/Dockerfile +++ b/images/ansible-operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM --platform=$BUILDPLATFORM golang:1.18 as builder +FROM --platform=$BUILDPLATFORM golang:1.19 as builder ARG TARGETARCH WORKDIR /workspace diff --git a/images/scorecard-test-kuttl/Dockerfile b/images/scorecard-test-kuttl/Dockerfile index 9a83cd8362a..559c7a73365 100644 --- a/images/scorecard-test-kuttl/Dockerfile +++ b/images/scorecard-test-kuttl/Dockerfile @@ -18,8 +18,8 @@ COPY . . RUN GOOS=linux GOARCH=$TARGETARCH make build/scorecard-test-kuttl # Final image. -#FROM kudobuilder/kuttl@sha256:924a709a1d2c6bede8815415ea5d5be640b506ec5aeaddc68acb443ae8ee7926 -FROM kudobuilder/kuttl:v0.12.1 +#FROM kudobuilder/kuttl@sha256:8d4dad161521450db95f88fe0e62487cc6587c5818df2a4e750fb9e54c082170 +FROM kudobuilder/kuttl:v0.15.0 ENV HOME=/opt/scorecard-test-kuttl \ USER_NAME=scorecard-test-kuttl \ @@ -29,6 +29,8 @@ ENV HOME=/opt/scorecard-test-kuttl \ RUN echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd WORKDIR ${HOME} +# kuttl writes a kubeconfig file in the current working directory +RUN chmod g+w "${HOME}" COPY --from=builder /workspace/build/scorecard-test-kuttl /usr/local/bin/scorecard-test-kuttl COPY --from=builder /workspace/images/scorecard-test-kuttl/entrypoint /usr/local/bin/entrypoint diff --git a/images/scorecard-test-kuttl/main.go b/images/scorecard-test-kuttl/main.go index 3a0080133ff..46841a5bff6 100644 --- a/images/scorecard-test-kuttl/main.go +++ b/images/scorecard-test-kuttl/main.go @@ -31,10 +31,10 @@ import ( // scorecard v1alpha3.TestStatus json format. // // The kuttl output is expected to be produced by kubectl-kuttl -// at /tmp/kuttl-test.json. +// at /tmp/kuttl-report.json. func main() { - jsonFile, err := os.Open("/tmp/kuttl-test.json") + jsonFile, err := os.Open("/tmp/kuttl-report.json") if err != nil { printErrorStatus(fmt.Errorf("could not open kuttl report %v", err)) return diff --git a/internal/ansible/handler/logging_enqueue_annotation_test.go b/internal/ansible/handler/logging_enqueue_annotation_test.go index 38ae5ef563a..4d7944a64b1 100644 --- a/internal/ansible/handler/logging_enqueue_annotation_test.go +++ b/internal/ansible/handler/logging_enqueue_annotation_test.go @@ -95,8 +95,7 @@ var _ = Describe("LoggingEnqueueRequestForAnnotation", func() { } repl.SetGroupVersionKind(schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}) - err := handler.SetOwnerAnnotations(podOwner, repl) - Expect(err).To(BeNil()) + Expect(handler.SetOwnerAnnotations(podOwner, repl)).To(Succeed()) evt := event.CreateEvent{ Object: repl, @@ -280,8 +279,7 @@ var _ = Describe("LoggingEnqueueRequestForAnnotation", func() { newPod.Name = pod.Name + "2" newPod.Namespace = pod.Namespace + "2" - err := handler.SetOwnerAnnotations(podOwner, pod) - Expect(err).To(BeNil()) + Expect(handler.SetOwnerAnnotations(podOwner, pod)).To(Succeed()) evt := event.UpdateEvent{ ObjectOld: pod, @@ -395,8 +393,7 @@ var _ = Describe("LoggingEnqueueRequestForAnnotation", func() { newPod.Name = pod.Name + "2" newPod.Namespace = pod.Namespace + "2" - err := handler.SetOwnerAnnotations(podOwner, pod) - Expect(err).To(BeNil()) + Expect(handler.SetOwnerAnnotations(podOwner, pod)).To(Succeed()) var podOwner2 = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -406,8 +403,7 @@ var _ = Describe("LoggingEnqueueRequestForAnnotation", func() { } podOwner2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Kind: "Pod"}) - err = handler.SetOwnerAnnotations(podOwner2, newPod) - Expect(err).To(BeNil()) + Expect(handler.SetOwnerAnnotations(podOwner2, newPod)).To(Succeed()) evt := event.UpdateEvent{ ObjectOld: pod, diff --git a/internal/ansible/handler/logging_enqueue_object_test.go b/internal/ansible/handler/logging_enqueue_object_test.go index b1141e95167..00420d6ddb5 100644 --- a/internal/ansible/handler/logging_enqueue_object_test.go +++ b/internal/ansible/handler/logging_enqueue_object_test.go @@ -76,7 +76,7 @@ var _ = Describe("LoggingEnqueueRequestForObject", func() { // verify metrics gauges, err := metrics.Registry.Gather() Expect(err).NotTo(HaveOccurred()) - Expect(len(gauges)).To(Equal(1)) + Expect(gauges).To(HaveLen(1)) assertMetrics(gauges[0], 1, []*corev1.Pod{pod}) }) }) @@ -119,7 +119,7 @@ var _ = Describe("LoggingEnqueueRequestForObject", func() { // verify metrics gauges, err := metrics.Registry.Gather() Expect(err).NotTo(HaveOccurred()) - Expect(len(gauges)).To(Equal(0)) + Expect(gauges).To(BeEmpty()) }) }) Context("when a gauge does not exist", func() { @@ -148,7 +148,7 @@ var _ = Describe("LoggingEnqueueRequestForObject", func() { // verify metrics gauges, err := metrics.Registry.Gather() Expect(err).NotTo(HaveOccurred()) - Expect(len(gauges)).To(Equal(0)) + Expect(gauges).To(BeEmpty()) }) }) @@ -187,36 +187,31 @@ var _ = Describe("LoggingEnqueueRequestForObject", func() { // verify metrics gauges, err := metrics.Registry.Gather() Expect(err).NotTo(HaveOccurred()) - Expect(len(gauges)).To(Equal(1)) + Expect(gauges).To(HaveLen(1)) assertMetrics(gauges[0], 2, []*corev1.Pod{newpod, pod}) }) }) }) func assertMetrics(gauge *dto.MetricFamily, count int, pods []*corev1.Pod) { - // need variables to compare the pointers - name := "name" - namespace := "namespace" - g := "group" - v := "version" - k := "kind" - - Expect(len(gauge.Metric)).To(Equal(count)) + Expect(gauge.Metric).To(HaveLen(count)) for i := 0; i < count; i++ { Expect(*gauge.Metric[i].Gauge.Value).To(Equal(float64(pods[i].GetObjectMeta().GetCreationTimestamp().UTC().Unix()))) for _, l := range gauge.Metric[i].Label { - switch l.Name { - case &name: - Expect(l.Value).To(Equal(pods[i].GetObjectMeta().GetName())) - case &namespace: - Expect(l.Value).To(Equal(pods[i].GetObjectMeta().GetNamespace())) - case &g: - Expect(l.Value).To(Equal(pods[i].GetObjectKind().GroupVersionKind().Group)) - case &v: - Expect(l.Value).To(Equal(pods[i].GetObjectKind().GroupVersionKind().Version)) - case &k: - Expect(l.Value).To(Equal(pods[i].GetObjectKind().GroupVersionKind().Kind)) + if l.Name != nil { + switch *l.Name { + case "name": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectMeta().GetName()))) + case "namespace": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectMeta().GetNamespace()))) + case "group": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectKind().GroupVersionKind().Group))) + case "version": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectKind().GroupVersionKind().Version))) + case "kind": + Expect(l.Value).To(HaveValue(Equal(pods[i].GetObjectKind().GroupVersionKind().Kind))) + } } } } diff --git a/internal/ansible/proxy/inject_owner_test.go b/internal/ansible/proxy/inject_owner_test.go index fe00e2deb14..741a816f344 100644 --- a/internal/ansible/proxy/inject_owner_test.go +++ b/internal/ansible/proxy/inject_owner_test.go @@ -112,7 +112,7 @@ var _ = Describe("injectOwnerReferenceHandler", func() { } ownerRefs := modifiedCM.ObjectMeta.OwnerReferences - Expect(len(ownerRefs)).To(Equal(1)) + Expect(ownerRefs).To(HaveLen(1)) ownerRef := ownerRefs[0] diff --git a/internal/ansible/runner/runner.go b/internal/ansible/runner/runner.go index 06660ec3b61..1f2db21117a 100644 --- a/internal/ansible/runner/runner.go +++ b/internal/ansible/runner/runner.go @@ -366,6 +366,9 @@ func (r *runner) makeParameters(u *unstructured.Unstructured) map[string]interfa specKey := fmt.Sprintf("%s_spec", objKey) parameters[specKey] = spec + if r.markUnsafe { + parameters[specKey] = markUnsafe(spec) + } for k, v := range r.Vars { parameters[k] = v @@ -391,7 +394,7 @@ func (r *runner) makeParameters(u *unstructured.Unstructured) map[string]interfa func markUnsafe(values interface{}) interface{} { switch v := values.(type) { case []interface{}: - var p []interface{} + p := make([]interface{}, 0) for _, n := range v { p = append(p, markUnsafe(n)) } diff --git a/internal/cmd/ansible-operator/version/cmd_test.go b/internal/cmd/ansible-operator/version/cmd_test.go index da079e8028e..ec65551013b 100644 --- a/internal/cmd/ansible-operator/version/cmd_test.go +++ b/internal/cmd/ansible-operator/version/cmd_test.go @@ -48,7 +48,7 @@ var _ = Describe("Running a version command", func() { w.Close() }() stdout, err := io.ReadAll(r) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) stdoutString := string(stdout) version := ver.GitVersion if version == "unknown" { diff --git a/internal/cmd/helm-operator/version/cmd_test.go b/internal/cmd/helm-operator/version/cmd_test.go index da079e8028e..ec65551013b 100644 --- a/internal/cmd/helm-operator/version/cmd_test.go +++ b/internal/cmd/helm-operator/version/cmd_test.go @@ -48,7 +48,7 @@ var _ = Describe("Running a version command", func() { w.Close() }() stdout, err := io.ReadAll(r) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) stdoutString := string(stdout) version := ver.GitVersion if version == "unknown" { diff --git a/internal/cmd/operator-sdk/bundle/cmd_test.go b/internal/cmd/operator-sdk/bundle/cmd_test.go index dca11c4d959..b31448c74dd 100644 --- a/internal/cmd/operator-sdk/bundle/cmd_test.go +++ b/internal/cmd/operator-sdk/bundle/cmd_test.go @@ -26,7 +26,7 @@ var _ = Describe("Running a bundle command", func() { Expect(cmd).NotTo(BeNil()) subcommands := cmd.Commands() - Expect(len(subcommands)).To(Equal(1)) + Expect(subcommands).To(HaveLen(1)) Expect(subcommands[0].Use).To(Equal("validate")) }) }) diff --git a/internal/cmd/operator-sdk/bundle/validate/optional_test.go b/internal/cmd/operator-sdk/bundle/validate/optional_test.go index f76c0e75b44..288d2f9aa95 100644 --- a/internal/cmd/operator-sdk/bundle/validate/optional_test.go +++ b/internal/cmd/operator-sdk/bundle/validate/optional_test.go @@ -46,7 +46,7 @@ var _ = Describe("Running optional validators", func() { It("runs no validators for an empty selector", func() { bundle = &apimanifests.Bundle{} sel = labels.SelectorFromSet(map[string]string{}) - Expect(vals.run(bundle, sel, nil)).To(HaveLen(0)) + Expect(vals.run(bundle, sel, nil)).To(BeEmpty()) }) It("runs a validator for one selector on an empty bundle", func() { bundle = &apimanifests.Bundle{} @@ -80,14 +80,14 @@ var _ = Describe("Running optional validators", func() { It("returns an error for an empty selector with no validators", func() { sel = labels.SelectorFromSet(map[string]string{}) err = vals.checkMatches(sel) - Expect(err).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) }) It("returns an error for an unmatched selector with no validators", func() { sel = labels.SelectorFromSet(map[string]string{ nameKey: "operatorhub", }) err = vals.checkMatches(sel) - Expect(err).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) }) It("returns no error for an unmatched selector with all optional validators", func() { sel = labels.SelectorFromSet(map[string]string{ @@ -95,7 +95,7 @@ var _ = Describe("Running optional validators", func() { }) vals = optionalValidators err = vals.checkMatches(sel) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) }) }) diff --git a/internal/cmd/operator-sdk/olm/cmd_test.go b/internal/cmd/operator-sdk/olm/cmd_test.go index cff5ab905f9..f7fcfa75841 100644 --- a/internal/cmd/operator-sdk/olm/cmd_test.go +++ b/internal/cmd/operator-sdk/olm/cmd_test.go @@ -28,7 +28,7 @@ var _ = Describe("Running an olm command", func() { Expect(cmd.Short).NotTo(BeNil()) subcommands := cmd.Commands() - Expect(len(subcommands)).To(Equal(3)) + Expect(subcommands).To(HaveLen(3)) Expect(subcommands[0].Use).To(Equal("install")) Expect(subcommands[1].Use).To(Equal("status")) Expect(subcommands[2].Use).To(Equal("uninstall")) diff --git a/internal/cmd/operator-sdk/run/cmd_test.go b/internal/cmd/operator-sdk/run/cmd_test.go index 59fe6eaf24d..5519b7a06a0 100644 --- a/internal/cmd/operator-sdk/run/cmd_test.go +++ b/internal/cmd/operator-sdk/run/cmd_test.go @@ -29,7 +29,7 @@ var _ = Describe("Running a run command", func() { Expect(cmd.Long).NotTo(BeNil()) subcommands := cmd.Commands() - Expect(len(subcommands)).To(Equal(3)) + Expect(subcommands).To(HaveLen(3)) Expect(subcommands[0].Use).To(Equal("bundle ")) Expect(subcommands[1].Use).To(Equal("bundle-upgrade ")) Expect(subcommands[2].Use).To(Equal("packagemanifests [packagemanifests-root-dir]")) diff --git a/internal/cmd/operator-sdk/run/packagemanifests/packagemanifests_test.go b/internal/cmd/operator-sdk/run/packagemanifests/packagemanifests_test.go index 8aba2fddaeb..d320ced6b7b 100644 --- a/internal/cmd/operator-sdk/run/packagemanifests/packagemanifests_test.go +++ b/internal/cmd/operator-sdk/run/packagemanifests/packagemanifests_test.go @@ -31,7 +31,7 @@ var _ = Describe("Running a run packagemanifests command", func() { Expect(cmd.Short).NotTo(BeNil()) Expect(cmd.Long).NotTo(BeNil()) aliases := cmd.Aliases - Expect(len(aliases)).To(Equal(1)) + Expect(aliases).To(HaveLen(1)) Expect(aliases[0]).To(Equal("pm")) }) }) diff --git a/internal/cmd/operator-sdk/scorecard/cmd.go b/internal/cmd/operator-sdk/scorecard/cmd.go index 7265ac4055a..d81a869d056 100644 --- a/internal/cmd/operator-sdk/scorecard/cmd.go +++ b/internal/cmd/operator-sdk/scorecard/cmd.go @@ -90,11 +90,15 @@ If the argument holds an image tag, it must be present remotely.`, "Disable resource cleanup after tests are run") scorecardCmd.Flags().DurationVarP(&c.waitTime, "wait-time", "w", 30*time.Second, "seconds to wait for tests to complete. Example: 35s") + // Please note that for Operator-sdk + Preflight + DCI integration in disconnected environments, + // it is necessary to refer to storage-image and untar-image using their digests instead of tags. + // If you need to make changes to these images, please ensure that you always use the digests. scorecardCmd.Flags().StringVarP(&c.storageImage, "storage-image", "b", - "quay.io/operator-framework/scorecard-storage:latest", + "quay.io/operator-framework/scorecard-storage@sha256:a3bfda71281393c7794cabdd39c563fb050d3020fd0b642ea164646bdd39a0e2", "Storage image to be used by the Scorecard pod") + // Use the digest of the latest scorecard-untar image scorecardCmd.Flags().StringVarP(&c.untarImage, "untar-image", "u", - "quay.io/operator-framework/scorecard-untar:latest", + "quay.io/operator-framework/scorecard-untar@sha256:2e728c5e67a7f4dec0df157a322dd5671212e8ae60f69137463bd4fdfbff8747", "Untar image to be used by the Scorecard pod") scorecardCmd.Flags().StringVarP(&c.testOutput, "test-output", "t", "test-output", "Test output directory.") diff --git a/internal/cmd/operator-sdk/scorecard/cmd_test.go b/internal/cmd/operator-sdk/scorecard/cmd_test.go index 9f68f072f8e..a34f0cfe81d 100644 --- a/internal/cmd/operator-sdk/scorecard/cmd_test.go +++ b/internal/cmd/operator-sdk/scorecard/cmd_test.go @@ -69,12 +69,14 @@ var _ = Describe("Running the scorecard command", func() { flag = cmd.Flags().Lookup("storage-image") Expect(flag).NotTo(BeNil()) Expect(flag.Shorthand).To(Equal("b")) - Expect(flag.DefValue).To(Equal("quay.io/operator-framework/scorecard-storage:latest")) + // Use the digest of the latest scorecard-storage image + Expect(flag.DefValue).To(Equal("quay.io/operator-framework/scorecard-storage@sha256:a3bfda71281393c7794cabdd39c563fb050d3020fd0b642ea164646bdd39a0e2")) flag = cmd.Flags().Lookup("untar-image") Expect(flag).NotTo(BeNil()) Expect(flag.Shorthand).To(Equal("u")) - Expect(flag.DefValue).To(Equal("quay.io/operator-framework/scorecard-untar:latest")) + // Use the digest of the latest scorecard-untar image + Expect(flag.DefValue).To(Equal("quay.io/operator-framework/scorecard-untar@sha256:2e728c5e67a7f4dec0df157a322dd5671212e8ae60f69137463bd4fdfbff8747")) }) }) diff --git a/internal/generate/clusterserviceversion/bases/definitions/crd_test.go b/internal/generate/clusterserviceversion/bases/definitions/crd_test.go index 60c642072eb..925c7fa84b0 100644 --- a/internal/generate/clusterserviceversion/bases/definitions/crd_test.go +++ b/internal/generate/clusterserviceversion/bases/definitions/crd_test.go @@ -43,7 +43,7 @@ var _ = Describe("getTypedDescriptors", func() { It("handles an empty set of marked fields", func() { out = getTypedDescriptors(markedFields, reflect.TypeOf(v1alpha1.SpecDescriptor{}), spec) - Expect(out).To(HaveLen(0)) + Expect(out).To(BeEmpty()) }) It("returns one spec descriptor for one spec marker on a field", func() { markedFields[crd.TypeIdent{}] = []*fieldInfo{ @@ -79,7 +79,7 @@ var _ = Describe("getTypedDescriptors", func() { }, } out = getTypedDescriptors(markedFields, reflect.TypeOf(v1alpha1.SpecDescriptor{}), spec) - Expect(out).To(HaveLen(0)) + Expect(out).To(BeEmpty()) }) It("returns one status descriptor for one status marker on a field", func() { markedFields[crd.TypeIdent{}] = []*fieldInfo{ diff --git a/internal/generate/clusterserviceversion/clusterserviceversion_test.go b/internal/generate/clusterserviceversion/clusterserviceversion_test.go index 91b435f5edf..21ce1c79cf5 100644 --- a/internal/generate/clusterserviceversion/clusterserviceversion_test.go +++ b/internal/generate/clusterserviceversion/clusterserviceversion_test.go @@ -259,7 +259,7 @@ var _ = Describe("Testing CRDs with single version", func() { } // Update the input's and expected CSV's Deployment image. collectManifestsFromFileHelper(g.Collector, goBasicOperatorPath) - Expect(len(g.Collector.Deployments)).To(BeNumerically(">=", 1)) + Expect(g.Collector.Deployments).ToNot(BeEmpty()) imageTag := "controller:v" + g.Version modifyDepImageHelper(&g.Collector.Deployments[0].Spec, imageTag) updatedCSV := updateCSV(newCSVUIMeta, modifyCSVDepImageHelper(imageTag)) @@ -269,7 +269,7 @@ var _ = Describe("Testing CRDs with single version", func() { Expect(csv).To(Equal(updatedCSV)) // verify if conversion webhooks are added - Expect(len(csv.Spec.WebhookDefinitions)).NotTo(Equal(0)) + Expect(csv.Spec.WebhookDefinitions).NotTo(BeEmpty()) Expect(containsConversionWebhookDefinition(csv.Spec.WebhookDefinitions)).To(BeTrue()) }) }) @@ -424,14 +424,14 @@ func readFileHelper(path string) string { func modifyCSVDepImageHelper(tag string) func(csv *v1alpha1.ClusterServiceVersion) { return func(csv *v1alpha1.ClusterServiceVersion) { depSpecs := csv.Spec.InstallStrategy.StrategySpec.DeploymentSpecs - ExpectWithOffset(2, len(depSpecs)).To(BeNumerically(">=", 1)) + ExpectWithOffset(2, depSpecs).ToNot(BeEmpty()) modifyDepImageHelper(&depSpecs[0].Spec, tag) } } func modifyDepImageHelper(depSpec *appsv1.DeploymentSpec, tag string) { containers := depSpec.Template.Spec.Containers - ExpectWithOffset(1, len(containers)).To(BeNumerically(">=", 1)) + ExpectWithOffset(1, containers).ToNot(BeEmpty()) containers[0].Image = tag } diff --git a/internal/generate/clusterserviceversion/clusterserviceversion_updaters_test.go b/internal/generate/clusterserviceversion/clusterserviceversion_updaters_test.go index 96026496ab7..212e32a6e0d 100644 --- a/internal/generate/clusterserviceversion/clusterserviceversion_updaters_test.go +++ b/internal/generate/clusterserviceversion/clusterserviceversion_updaters_test.go @@ -53,7 +53,7 @@ var _ = Describe("apply functions", func() { c.Deployments = []appsv1.Deployment{newDeploymentWithLabels(depName, labels)} applyDeployments(c, strategy) - Expect(len(strategy.DeploymentSpecs)).To(Equal(1)) + Expect(strategy.DeploymentSpecs).To(HaveLen(1)) Expect(strategy.DeploymentSpecs[0].Label).To(Equal(labels)) }) }) diff --git a/internal/generate/collector/clusterserviceversion_test.go b/internal/generate/collector/clusterserviceversion_test.go index 7a2b668df5a..dd62b2c0de4 100644 --- a/internal/generate/collector/clusterserviceversion_test.go +++ b/internal/generate/collector/clusterserviceversion_test.go @@ -37,16 +37,16 @@ var _ = Describe("SplitCSVPermissionsObjects", func() { It("returns empty lists for an empty Manifests", func() { c.Roles = []rbacv1.Role{} inPerm, inCPerm, out = c.SplitCSVPermissionsObjects(nil) - Expect(inPerm).To(HaveLen(0)) - Expect(inCPerm).To(HaveLen(0)) - Expect(out).To(HaveLen(0)) + Expect(inPerm).To(BeEmpty()) + Expect(inCPerm).To(BeEmpty()) + Expect(out).To(BeEmpty()) }) It("splitting 1 Role no RoleBinding", func() { c.Roles = []rbacv1.Role{newRole("my-role")} inPerm, inCPerm, out = c.SplitCSVPermissionsObjects(nil) - Expect(inPerm).To(HaveLen(0)) - Expect(inCPerm).To(HaveLen(0)) + Expect(inPerm).To(BeEmpty()) + Expect(inCPerm).To(BeEmpty()) Expect(out).To(HaveLen(1)) Expect(getRoleNames(out)).To(Equal([]string{"my-role"})) }) @@ -58,8 +58,8 @@ var _ = Describe("SplitCSVPermissionsObjects", func() { newRoleBinding("my-role-binding", newRoleRef("my-role"), newServiceAccountSubject("my-other-account")), } inPerm, inCPerm, out = c.SplitCSVPermissionsObjects(nil) - Expect(inPerm).To(HaveLen(0)) - Expect(inCPerm).To(HaveLen(0)) + Expect(inPerm).To(BeEmpty()) + Expect(inCPerm).To(BeEmpty()) Expect(out).To(HaveLen(2)) Expect(getRoleNames(out)).To(Equal([]string{"my-role"})) Expect(getRoleBindingNames(out)).To(Equal([]string{"my-role-binding"})) @@ -72,8 +72,8 @@ var _ = Describe("SplitCSVPermissionsObjects", func() { newRoleBinding("my-role-binding", newClusterRoleRef("my-role"), newServiceAccountSubject("my-other-account")), } inPerm, inCPerm, out = c.SplitCSVPermissionsObjects(nil) - Expect(inPerm).To(HaveLen(0)) - Expect(inCPerm).To(HaveLen(0)) + Expect(inPerm).To(BeEmpty()) + Expect(inCPerm).To(BeEmpty()) Expect(out).To(HaveLen(2)) Expect(getClusterRoleNames(out)).To(Equal([]string{"my-role"})) Expect(getRoleBindingNames(out)).To(Equal([]string{"my-role-binding"})) @@ -88,8 +88,8 @@ var _ = Describe("SplitCSVPermissionsObjects", func() { newRoleBinding("my-role-binding-2", newClusterRoleRef("my-role"), newServiceAccountSubject("my-other-account")), } inPerm, inCPerm, out = c.SplitCSVPermissionsObjects(nil) - Expect(inPerm).To(HaveLen(0)) - Expect(inCPerm).To(HaveLen(0)) + Expect(inPerm).To(BeEmpty()) + Expect(inCPerm).To(BeEmpty()) Expect(out).To(HaveLen(4)) Expect(getRoleNames(out)).To(Equal([]string{"my-role"})) Expect(getClusterRoleNames(out)).To(Equal([]string{"my-role"})) @@ -105,8 +105,8 @@ var _ = Describe("SplitCSVPermissionsObjects", func() { inPerm, inCPerm, out = c.SplitCSVPermissionsObjects(nil) Expect(inPerm).To(HaveLen(1)) Expect(getRoleNames(inPerm)).To(Equal([]string{"my-role"})) - Expect(inCPerm).To(HaveLen(0)) - Expect(out).To(HaveLen(0)) + Expect(inCPerm).To(BeEmpty()) + Expect(out).To(BeEmpty()) }) It("splitting 1 ClusterRole 1 RoleBinding with 1 Subject containing a Deployment serviceAccountName", func() { @@ -118,8 +118,8 @@ var _ = Describe("SplitCSVPermissionsObjects", func() { inPerm, inCPerm, out = c.SplitCSVPermissionsObjects(nil) Expect(inPerm).To(HaveLen(1)) Expect(getClusterRoleNames(inPerm)).To(Equal([]string{"my-role"})) - Expect(inCPerm).To(HaveLen(0)) - Expect(out).To(HaveLen(0)) + Expect(inCPerm).To(BeEmpty()) + Expect(out).To(BeEmpty()) }) It("splitting 1 Role 1 ClusterRole 1 RoleBinding with 1 Subject containing a Deployment serviceAccountName", func() { @@ -134,8 +134,8 @@ var _ = Describe("SplitCSVPermissionsObjects", func() { Expect(inPerm).To(HaveLen(2)) Expect(getRoleNames(inPerm)).To(Equal([]string{"my-role"})) Expect(getClusterRoleNames(inPerm)).To(Equal([]string{"my-role"})) - Expect(inCPerm).To(HaveLen(0)) - Expect(out).To(HaveLen(0)) + Expect(inCPerm).To(BeEmpty()) + Expect(out).To(BeEmpty()) }) It("splitting 1 Role 1 ClusterRole 1 RoleBinding with 2 Subjects containing a Deployment serviceAccountName", func() { @@ -219,7 +219,7 @@ var _ = Describe("SplitCSVPermissionsObjects", func() { Expect(getClusterRoleNames(inPerm)).To(Equal([]string{roleName1})) Expect(inCPerm).To(HaveLen(2)) Expect(getClusterRoleNames(inCPerm)).To(Equal([]string{roleName1, roleName2})) - Expect(out).To(HaveLen(0)) + Expect(out).To(BeEmpty()) }) }) }) diff --git a/internal/generate/packagemanifest/packagemanifest_test.go b/internal/generate/packagemanifest/packagemanifest_test.go index 69a8719753c..3fc612f8204 100644 --- a/internal/generate/packagemanifest/packagemanifest_test.go +++ b/internal/generate/packagemanifest/packagemanifest_test.go @@ -195,7 +195,7 @@ packageName: memcached-operator Expect(err).NotTo(HaveOccurred()) Expect(pm).NotTo(BeNil()) Expect(pm.PackageName).To(Equal("memcached-operator")) - Expect(len(pm.Channels)).To(Equal(1)) + Expect(pm.Channels).To(HaveLen(1)) Expect(pm.Channels[0].Name).To(Equal("alpha")) Expect(pm.Channels[0].CurrentCSVName).To(Equal("memcached-operator.v0.0.1")) Expect(pm.DefaultChannelName).To(Equal("alpha")) diff --git a/internal/olm/operator/registry/configmap/configmap_test.go b/internal/olm/operator/registry/configmap/configmap_test.go index a3173e57b06..cdbb00dda42 100644 --- a/internal/olm/operator/registry/configmap/configmap_test.go +++ b/internal/olm/operator/registry/configmap/configmap_test.go @@ -100,10 +100,10 @@ var _ = Describe("ConfigMap", func() { binaryData := make(map[string][]byte) expected, err := yaml.Marshal(obj) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) binaryData[makeObjectFileName(expected, userInput...)] = expected // Test and verify function - Expect(addObjectToBinaryData(b, obj, userInput...)).Should(BeNil()) + Expect(addObjectToBinaryData(b, obj, userInput...)).Should(Succeed()) Expect(b).Should(Equal(binaryData)) }) @@ -122,10 +122,9 @@ var _ = Describe("ConfigMap", func() { userInput := []string{"userInput", "userInput2"} b, e := makeObjectBinaryData(obj, userInput...) - Expect(e).Should(BeNil()) + Expect(e).ShouldNot(HaveOccurred()) // Test and verify function - e = addObjectToBinaryData(binaryData, obj, userInput...) - Expect(e).Should(BeNil()) + Expect(addObjectToBinaryData(binaryData, obj, userInput...)).Should(Succeed()) Expect(b).Should(Equal(binaryData)) }) @@ -133,7 +132,6 @@ var _ = Describe("ConfigMap", func() { Describe("makeBundleBinaryData", func() { It("should serialize bundle to binary data", func() { - var e error b := apimanifests.Bundle{ Name: "testbundle", Objects: []*unstructured.Unstructured{ @@ -147,11 +145,10 @@ var _ = Describe("ConfigMap", func() { } binaryData, err := makeBundleBinaryData(&b) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) val := make(map[string][]byte) for _, obj := range b.Objects { - e = addObjectToBinaryData(val, obj, obj.GetName(), obj.GetKind()) - Expect(e).Should(BeNil()) + Expect(addObjectToBinaryData(val, obj, obj.GetName(), obj.GetKind())).Should(Succeed()) } Expect(binaryData).Should(Equal(val)) @@ -218,17 +215,17 @@ var _ = Describe("ConfigMap", func() { }) It("should serialize packagemanifest to binary data", func() { binaryDataByConfigMap, err := makeConfigMapsForPackageManifests(&p, b) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) val := make(map[string]map[string][]byte) cmName := getRegistryConfigMapName(p.PackageName) + "-package" val[cmName], err = makeObjectBinaryData(p) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) for _, bundle := range b { version := bundle.CSV.Spec.Version.String() cmName := getRegistryConfigMapName(p.PackageName) + "-" + k8sutil.FormatOperatorNameDNS1123(version) val[cmName], e = makeBundleBinaryData(bundle) - Expect(e).Should(BeNil()) + Expect(e).ShouldNot(HaveOccurred()) } Expect(binaryDataByConfigMap).Should(Equal(val)) @@ -240,7 +237,6 @@ var _ = Describe("ConfigMap", func() { var ( rr RegistryResources list corev1.ConfigMapList - e error ) BeforeEach(func() { fakeclient := fake.NewClientBuilder().WithObjects( @@ -274,10 +270,9 @@ var _ = Describe("ConfigMap", func() { client_cr.MatchingLabels(makeRegistryLabels(rr.Pkg.PackageName)), client_cr.InNamespace("testns"), } - e = rr.Client.KubeClient.List(context.TODO(), &list, opts...) - Expect(e).Should(BeNil()) + Expect(rr.Client.KubeClient.List(context.TODO(), &list, opts...)).Should(Succeed()) configmaps, err := rr.getRegistryConfigMaps(context.TODO(), "testns") - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) Expect(configmaps).Should(Equal(list.Items)) }) diff --git a/internal/olm/operator/registry/configmap/registry_test.go b/internal/olm/operator/registry/configmap/registry_test.go index ccaeb9bffde..d2ba484dfae 100644 --- a/internal/olm/operator/registry/configmap/registry_test.go +++ b/internal/olm/operator/registry/configmap/registry_test.go @@ -105,13 +105,12 @@ var _ = Describe("Registry", func() { }, } dep := appsv1.Deployment{} - err := rr.Client.KubeClient.Get(context.TODO(), types.NamespacedName{Name: getRegistryServerName("pkgName"), Namespace: "testns"}, &dep) - Expect(err).Should(BeNil()) + Expect( + rr.Client.KubeClient.Get(context.TODO(), types.NamespacedName{Name: getRegistryServerName("pkgName"), Namespace: "testns"}, &dep), + ).Should(Succeed()) + Expect(rr.DeletePackageManifestsRegistry(context.TODO(), "testns")).Should(Succeed()) - err = rr.DeletePackageManifestsRegistry(context.TODO(), "testns") - Expect(err).Should(BeNil()) - - err = rr.Client.KubeClient.Get(context.TODO(), types.NamespacedName{Name: "pkgName-registry-server", Namespace: "testns"}, &dep) + err := rr.Client.KubeClient.Get(context.TODO(), types.NamespacedName{Name: "pkgName-registry-server", Namespace: "testns"}, &dep) Expect(apierrors.IsNotFound(err)).Should(BeTrue()) }) }) @@ -170,7 +169,7 @@ var _ = Describe("Registry", func() { It("should return true if a deployment exitsts in the registry", func() { temp, err := rr.IsRegistryExist(context.TODO(), testns) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) Expect(temp).Should(BeTrue()) }) @@ -180,11 +179,10 @@ var _ = Describe("Registry", func() { temp bool ) - err = rr.DeletePackageManifestsRegistry(context.TODO(), testns) - Expect(err).Should(BeNil()) + Expect(rr.DeletePackageManifestsRegistry(context.TODO(), testns)).Should(Succeed()) temp, err = rr.IsRegistryExist(context.TODO(), testns) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) Expect(temp).Should(BeFalse()) }) }) @@ -254,14 +252,14 @@ var _ = Describe("Registry", func() { rr.Client.KubeClient = fake.NewClientBuilder().Build() temp, err := rr.IsRegistryDataStale(context.TODO(), testns) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) Expect(temp).Should(BeTrue()) }) It("should return true if the configmap does not exist", func() { temp, err := rr.IsRegistryDataStale(context.TODO(), testns) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) Expect(temp).Should(BeTrue()) }) @@ -286,7 +284,7 @@ var _ = Describe("Registry", func() { ).Build() temp, err := rr.IsRegistryDataStale(context.TODO(), testns) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) Expect(temp).Should(BeTrue()) }) @@ -319,7 +317,7 @@ var _ = Describe("Registry", func() { ).Build() temp, err := rr.IsRegistryDataStale(context.TODO(), testns) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) Expect(temp).Should(BeTrue()) }) @@ -353,7 +351,7 @@ var _ = Describe("Registry", func() { ).Build() temp, err := rr.IsRegistryDataStale(context.TODO(), testns) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) Expect(temp).Should(BeTrue()) }) }) diff --git a/internal/olm/operator/registry/configmap_test.go b/internal/olm/operator/registry/configmap_test.go index 99b7b7f4739..56e1ef99ec3 100644 --- a/internal/olm/operator/registry/configmap_test.go +++ b/internal/olm/operator/registry/configmap_test.go @@ -70,9 +70,7 @@ var _ = Describe("Configmap", func() { }, } expected := cs.DeepCopy() - err := ctlog.updateCatalogSource(context.TODO(), cs) - - Expect(err).Should(BeNil()) + Expect(ctlog.updateCatalogSource(context.TODO(), cs)).Should(Succeed()) Expect(expected.Spec.Address).ShouldNot(Equal(cs.Spec.Address)) Expect(expected.Spec.SourceType).ShouldNot(Equal(cs.Spec.SourceType)) }) diff --git a/internal/olm/operator/registry/fbcindex/configMapWriter.go b/internal/olm/operator/registry/fbcindex/configMapWriter.go new file mode 100644 index 00000000000..b1511e9bc01 --- /dev/null +++ b/internal/olm/operator/registry/fbcindex/configMapWriter.go @@ -0,0 +1,173 @@ +// Copyright 2023 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fbcindex + +import ( + "bytes" + "compress/gzip" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + yamlSeparator = "\n---\n" + gzipSuffixLength = 13 + maxGZIPLength = maxConfigMapSize - gzipSuffixLength + + ConfigMapEncodingAnnotationKey = "olm.contentEncoding" + ConfigMapEncodingAnnotationGzip = "gzip+base64" +) + +/* +This file implements the actual building of the CM list. It uses the template method design pattern to implement both +regular string VM, and compressed binary CM. + +The method itself is FBCRegistryPod.getConfigMaps. This file contains the actual implementation of the writing actions, +used by the method. +*/ + +type configMapWriter interface { + reset() + newConfigMap(string) *corev1.ConfigMap + getFilePath() string + isEmpty() bool + exceedMaxLength(cmSize int, data string) (bool, error) + closeCM(cm *corev1.ConfigMap) error + addData(data string) error + continueAddData(data string) error + writeLastFragment(cm *corev1.ConfigMap) error +} + +type gzipCMWriter struct { + actualBuff *bytes.Buffer + helperBuff *bytes.Buffer + actualWriter *gzip.Writer + helperWriter *gzip.Writer + cmName string + namespace string +} + +func newGZIPWriter(name, namespace string) *gzipCMWriter { + actualBuff := &bytes.Buffer{} + helperBuff := &bytes.Buffer{} + + return &gzipCMWriter{ + actualBuff: actualBuff, + helperBuff: helperBuff, + actualWriter: gzip.NewWriter(actualBuff), + helperWriter: gzip.NewWriter(helperBuff), + cmName: name, + namespace: namespace, + } +} + +func (cmw *gzipCMWriter) reset() { + cmw.actualBuff.Reset() + cmw.actualWriter.Reset(cmw.actualBuff) + cmw.helperBuff.Reset() + cmw.helperWriter.Reset(cmw.helperBuff) +} + +func (cmw *gzipCMWriter) newConfigMap(name string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: cmw.namespace, + Name: name, + Annotations: map[string]string{ + ConfigMapEncodingAnnotationKey: ConfigMapEncodingAnnotationGzip, + }, + }, + BinaryData: map[string][]byte{}, + } +} + +func (cmw *gzipCMWriter) getFilePath() string { + return fmt.Sprintf("%s.yaml.gz", defaultConfigMapKey) +} + +func (cmw *gzipCMWriter) isEmpty() bool { + return cmw.actualBuff.Len() > 0 +} + +func (cmw *gzipCMWriter) exceedMaxLength(cmSize int, data string) (bool, error) { + _, err := cmw.helperWriter.Write([]byte(data)) + if err != nil { + return false, err + } + + err = cmw.helperWriter.Flush() + if err != nil { + return false, err + } + + return cmSize+cmw.helperBuff.Len() > maxGZIPLength, nil +} + +func (cmw *gzipCMWriter) closeCM(cm *corev1.ConfigMap) error { + err := cmw.actualWriter.Close() + if err != nil { + return err + } + + err = cmw.actualWriter.Flush() + if err != nil { + return err + } + + cm.BinaryData[defaultConfigMapKey] = make([]byte, cmw.actualBuff.Len()) + copy(cm.BinaryData[defaultConfigMapKey], cmw.actualBuff.Bytes()) + + cmw.reset() + + return nil +} + +func (cmw *gzipCMWriter) addData(data string) error { + dataBytes := []byte(data) + _, err := cmw.helperWriter.Write(dataBytes) + if err != nil { + return err + } + _, err = cmw.actualWriter.Write(dataBytes) + if err != nil { + return err + } + return nil +} + +// continueAddData completes adding the data after starting adding it in exceedMaxLength +func (cmw *gzipCMWriter) continueAddData(data string) error { + _, err := cmw.actualWriter.Write([]byte(data)) + if err != nil { + return err + } + return nil +} + +func (cmw *gzipCMWriter) writeLastFragment(cm *corev1.ConfigMap) error { + err := cmw.actualWriter.Close() + if err != nil { + return err + } + + cm.BinaryData[defaultConfigMapKey] = cmw.actualBuff.Bytes() + return nil +} diff --git a/internal/olm/operator/registry/fbcindex/fbc_registry_pod.go b/internal/olm/operator/registry/fbcindex/fbc_registry_pod.go index 32fd1e2cf1c..9da6c2414e3 100644 --- a/internal/olm/operator/registry/fbcindex/fbc_registry_pod.go +++ b/internal/olm/operator/registry/fbcindex/fbc_registry_pod.go @@ -15,13 +15,11 @@ package fbcindex import ( - "bytes" "context" "errors" "fmt" "path" "strings" - "text/template" "time" "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -45,6 +43,7 @@ const ( defaultGRPCPort = 50051 defaultContainerName = "registry-grpc" + defaultInitContainerName = "registry-grpc-init" defaultContainerPortName = "grpc" defaultConfigMapKey = "extraFBC" @@ -80,6 +79,8 @@ type FBCRegistryPod struct { //nolint:maligned configMapName string + cmWriter configMapWriter + cfg *operator.Configuration } @@ -99,6 +100,8 @@ func (f *FBCRegistryPod) init(cfg *operator.Configuration, cs *v1alpha1.CatalogS f.cfg = cfg + f.cmWriter = newGZIPWriter(f.configMapName, cfg.Namespace) + // validate the FBCRegistryPod struct and ensure required fields are set if err := f.validate(); err != nil { return fmt.Errorf("invalid FBC registry pod: %v", err) @@ -221,10 +224,7 @@ func (f *FBCRegistryPod) podForBundleRegistry(cs *v1alpha1.CatalogSource) (*core bundleImage := f.BundleItems[len(f.BundleItems)-1].ImageTag // construct the container command for pod spec - containerCmd, err := f.getContainerCmd() - if err != nil { - return nil, err - } + containerCmd := fmt.Sprintf(`opm serve %s -p %d`, f.FBCIndexRootDir, f.GRPCPort) //f.getContainerCmd() // create ConfigMap if it does not exist, // if it exists, then update it with new content. @@ -233,8 +233,11 @@ func (f *FBCRegistryPod) podForBundleRegistry(cs *v1alpha1.CatalogSource) (*core return nil, fmt.Errorf("configMap error: %w", err) } - volumes := []corev1.Volume{} - volumeMounts := []corev1.VolumeMount{} + var ( + volumes []corev1.Volume + sharedVolumeMounts []corev1.VolumeMount + gzipVolumeMount []corev1.VolumeMount + ) for _, cm := range cms { volumes = append(volumes, corev1.Volume{ @@ -244,7 +247,7 @@ func (f *FBCRegistryPod) podForBundleRegistry(cs *v1alpha1.CatalogSource) (*core Items: []corev1.KeyToPath{ { Key: defaultConfigMapKey, - Path: path.Join(cm.Name, fmt.Sprintf("%s.yaml", defaultConfigMapKey)), + Path: path.Join(cm.Name, f.cmWriter.getFilePath()), }, }, LocalObjectReference: corev1.LocalObjectReference{ @@ -254,10 +257,25 @@ func (f *FBCRegistryPod) podForBundleRegistry(cs *v1alpha1.CatalogSource) (*core }, }) - volumeMounts = append(volumeMounts, corev1.VolumeMount{ - Name: k8sutil.TrimDNS1123Label(cm.Name + "-volume"), + volumes = append(volumes, corev1.Volume{ + Name: k8sutil.TrimDNS1123Label(cm.Name + "-unzip"), + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }) + + vm := corev1.VolumeMount{ + Name: k8sutil.TrimDNS1123Label(cm.Name + "-unzip"), MountPath: path.Join(f.FBCIndexRootDir, cm.Name), SubPath: cm.Name, + } + + sharedVolumeMounts = append(sharedVolumeMounts, vm) + + gzipVolumeMount = append(gzipVolumeMount, corev1.VolumeMount{ + Name: k8sutil.TrimDNS1123Label(cm.Name + "-volume"), + MountPath: path.Join("/compressed", f.FBCIndexRootDir, cm.Name), + SubPath: cm.Name, }) } @@ -315,25 +333,47 @@ func (f *FBCRegistryPod) podForBundleRegistry(cs *v1alpha1.CatalogSource) (*core Ports: []corev1.ContainerPort{ {Name: defaultContainerPortName, ContainerPort: f.GRPCPort}, }, - VolumeMounts: volumeMounts, + VolumeMounts: sharedVolumeMounts, }, }, ServiceAccountName: f.cfg.ServiceAccount, }, } + f.addGZIPInitContainer(sharedVolumeMounts, gzipVolumeMount) + return f.pod, nil } -// container creation command for FBC type images. -const fbcCmdTemplate = `opm serve {{ .FBCIndexRootDir}} -p {{ .GRPCPort }}` +func (f *FBCRegistryPod) addGZIPInitContainer(containerVolumeMount []corev1.VolumeMount, gzipVolumeMount []corev1.VolumeMount) { + initContainerVolumeMount := append(containerVolumeMount, gzipVolumeMount...) + f.pod.Spec.InitContainers = append(f.pod.Spec.InitContainers, corev1.Container{ + Name: defaultInitContainerName, + Image: "docker.io/library/busybox:1.36.0", + Command: []string{ + "sh", + "-c", + fmt.Sprintf(`for dir in /compressed%s/*configmap-partition*; do `, f.FBCIndexRootDir) + + `for f in ${dir}/*; do ` + + `file="${f%.*}";` + + `file="${file#/compressed}";` + + `cat ${f} | gzip -d -c > "${file}";` + + "done;" + + "done;", + }, + VolumeMounts: initContainerVolumeMount, + }) +} // createConfigMap creates a ConfigMap if it does not exist and if it does, then update it with new content. // Also, sets the owner reference by making CatalogSource the owner of ConfigMap object for cleanup purposes. func (f *FBCRegistryPod) createConfigMaps(cs *v1alpha1.CatalogSource) ([]*corev1.ConfigMap, error) { // By default just use the partitioning logic. // If the entire FBC contents can fit in one ConfigMap it will. - cms := f.partitionedConfigMaps() + cms, err := f.partitionedConfigMaps() + if err != nil { + return nil, err + } // Loop through all the ConfigMaps and set the OwnerReference and try to create them for _, cm := range cms { @@ -354,81 +394,79 @@ func (f *FBCRegistryPod) createConfigMaps(cs *v1alpha1.CatalogSource) ([]*corev1 // partitionedConfigMaps will create and return a list of *corev1.ConfigMap // that represents all the ConfigMaps that will need to be created to // properly have all the FBC contents rendered in the registry pod. -func (f *FBCRegistryPod) partitionedConfigMaps() []*corev1.ConfigMap { +func (f *FBCRegistryPod) partitionedConfigMaps() ([]*corev1.ConfigMap, error) { + var err error // Split on the YAML separator `---` - yamlDefs := strings.Split(f.FBCContent, "---")[1:] - configMaps := []*corev1.ConfigMap{} + yamlDefs := strings.Split(f.FBCContent, "---") + + configMaps, err := f.getConfigMaps(yamlDefs) + if err != nil { + return nil, err + } + + return configMaps, nil +} + +// getConfigMaps builds a list of configMaps, to contain the bundle. +func (f *FBCRegistryPod) getConfigMaps(yamlDefs []string) ([]*corev1.ConfigMap, error) { + defer f.cmWriter.reset() + + cm := f.cmWriter.newConfigMap(fmt.Sprintf("%s-partition-1", f.configMapName)) + configMaps := []*corev1.ConfigMap{cm} + cmSize := cm.Size() - // Keep the number of ConfigMaps that are created to a minimum by - // stuffing them as full as possible. partitionCount := 1 - cm := f.makeBaseConfigMap() + // for each chunk of yaml see if it can be added to the ConfigMap partition for _, yamlDef := range yamlDefs { - // If the ConfigMap has data then lets attempt to add to it - if len(cm.Data) != 0 { - // Create a copy to use to verify that adding the data doesn't - // exceed the max ConfigMap size of 1 MiB. - tempCm := cm.DeepCopy() - tempCm.Data[defaultConfigMapKey] = tempCm.Data[defaultConfigMapKey] + "\n---\n" + yamlDef - - // if it would be too large adding the data then partition it. - if tempCm.Size() >= maxConfigMapSize { - // Set the ConfigMap name based on the partition it is - cm.SetName(fmt.Sprintf("%s-partition-%d", f.configMapName, partitionCount)) - // Increase the partition count + yamlDef = strings.TrimSpace(yamlDef) + if len(yamlDef) == 0 { + continue + } + + if f.cmWriter.isEmpty() { + data := yamlSeparator + yamlDef + exceeded, err := f.cmWriter.exceedMaxLength(cmSize, data) + if err != nil { + return nil, err + } + if exceeded { + err = f.cmWriter.closeCM(cm) + if err != nil { + return nil, err + } + partitionCount++ - // Add the ConfigMap to the list of ConfigMaps - configMaps = append(configMaps, cm.DeepCopy()) - - // Create a new ConfigMap - cm = f.makeBaseConfigMap() - // Since adding this data would have made the previous - // ConfigMap too large, add it to this new one. - // No chunk of YAML from the bundle should cause - // the ConfigMap size to exceed 1 MiB and if - // somehow it does then there is a problem with the - // YAML itself. We can't reasonably break it up smaller - // since it is a single object. - cm.Data[defaultConfigMapKey] = yamlDef + + cm = f.cmWriter.newConfigMap(fmt.Sprintf("%s-partition-%d", f.configMapName, partitionCount)) + configMaps = append(configMaps, cm) + cmSize = cm.Size() + + err = f.cmWriter.addData(yamlDef) + if err != nil { + return nil, err + } } else { - // if adding the data to the ConfigMap - // doesn't make the ConfigMap exceed the - // size limit then actually add it. - cm.Data = tempCm.Data + err = f.cmWriter.continueAddData(data) + if err != nil { + return nil, err + } } } else { - // If there is no data in the ConfigMap - // then this is the first pass. Since it is - // the first pass go ahead and add the data. - cm.Data[defaultConfigMapKey] = yamlDef + err := f.cmWriter.addData(yamlDef) + if err != nil { + return nil, err + } } } - // if there aren't as many ConfigMaps as partitions AND the unadded ConfigMap has data - // then add it to the list of ConfigMaps. This is so we don't miss adding a ConfigMap - // after the above loop completes. - if len(configMaps) != partitionCount && len(cm.Data) != 0 { - cm.SetName(fmt.Sprintf("%s-partition-%d", f.configMapName, partitionCount)) - configMaps = append(configMaps, cm.DeepCopy()) + // write the data of the last cm + err := f.cmWriter.writeLastFragment(cm) + if err != nil { + return nil, err } - return configMaps -} - -// makeBaseConfigMap will return the base *corev1.ConfigMap -// definition that is used by various functions when creating a ConfigMap. -func (f *FBCRegistryPod) makeBaseConfigMap() *corev1.ConfigMap { - return &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: corev1.SchemeGroupVersion.String(), - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: f.cfg.Namespace, - }, - Data: map[string]string{}, - } + return configMaps, nil } // createOrUpdateConfigMap will create a ConfigMap if it doesn't exist or @@ -452,6 +490,7 @@ func (f *FBCRegistryPod) createOrUpdateConfigMap(cm *corev1.ConfigMap) error { } // update ConfigMap with new FBCContent tempCm.Data = cm.Data + tempCm.BinaryData = cm.BinaryData return f.cfg.Client.Update(context.TODO(), tempCm) }); err != nil { return fmt.Errorf("error updating ConfigMap: %w", err) @@ -459,20 +498,3 @@ func (f *FBCRegistryPod) createOrUpdateConfigMap(cm *corev1.ConfigMap) error { return nil } - -// getContainerCmd uses templating to construct the container command -// and throws error if unable to parse and execute the container command -func (f *FBCRegistryPod) getContainerCmd() (string, error) { - // add the custom dirname template function to the - // template's FuncMap and parse the cmdTemplate - t := template.Must(template.New("cmd").Parse(fbcCmdTemplate)) - - // execute the command by applying the parsed template to command - // and write command output to out - out := &bytes.Buffer{} - if err := t.Execute(out, f); err != nil { - return "", fmt.Errorf("parse container command: %w", err) - } - - return out.String(), nil -} diff --git a/internal/olm/operator/registry/fbcindex/fbc_registry_pod_test.go b/internal/olm/operator/registry/fbcindex/fbc_registry_pod_test.go index 94d11de0232..d57f84422a4 100644 --- a/internal/olm/operator/registry/fbcindex/fbc_registry_pod_test.go +++ b/internal/olm/operator/registry/fbcindex/fbc_registry_pod_test.go @@ -15,8 +15,13 @@ package fbcindex import ( + "bytes" + "compress/gzip" "context" "fmt" + "io" + "math/rand" + "regexp" "strings" "testing" "time" @@ -27,7 +32,7 @@ import ( "github.com/operator-framework/operator-sdk/internal/olm/operator" "github.com/operator-framework/operator-sdk/internal/olm/operator/registry/index" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -61,9 +66,9 @@ var _ = Describe("FBCRegistryPod", func() { cs *v1alpha1.CatalogSource ) - BeforeEach(func() { + JustBeforeEach(func() { cs = &v1alpha1.CatalogSource{ - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: "test-catalogsource", }, } @@ -85,60 +90,33 @@ var _ = Describe("FBCRegistryPod", func() { }) Context("with valid registry pod values", func() { + It("should create the FBCRegistryPod successfully", func() { expectedPodName := "quay-io-example-example-operator-bundle-0-2-0" Expect(rp).NotTo(BeNil()) Expect(rp.pod.Name).To(Equal(expectedPodName)) Expect(rp.pod.Namespace).To(Equal(rp.cfg.Namespace)) Expect(rp.pod.Spec.Containers[0].Name).To(Equal(defaultContainerName)) - if len(rp.pod.Spec.Containers) > 0 { - if len(rp.pod.Spec.Containers[0].Ports) > 0 { - Expect(rp.pod.Spec.Containers[0].Ports[0].ContainerPort).To(Equal(rp.GRPCPort)) - } - } + Expect(rp.pod.Spec.Containers).Should(HaveLen(1)) + Expect(rp.pod.Spec.Containers[0].Ports).Should(HaveLen(1)) + Expect(rp.pod.Spec.Containers[0].Ports[0].ContainerPort).To(Equal(rp.GRPCPort)) + Expect(rp.pod.Spec.Containers[0].Command).Should(HaveLen(3)) + Expect(rp.pod.Spec.Containers[0].Command).Should(ContainElements("sh", "-c", containerCommandFor(rp.FBCIndexRootDir, rp.GRPCPort))) + Expect(rp.pod.Spec.InitContainers).Should(HaveLen(1)) }) It("should create a registry pod when database path is not provided", func() { Expect(rp.FBCIndexRootDir).To(Equal(fmt.Sprintf("/%s-configs", cs.Name))) }) - - It("should return a valid container command for one image", func() { - output, err := rp.getContainerCmd() - Expect(err).To(BeNil()) - Expect(output).Should(Equal(containerCommandFor(rp.FBCIndexRootDir, rp.GRPCPort))) - }) - - It("should return a valid container command for three images", func() { - bundleItems := append(defaultBundleItems, - index.BundleItem{ - ImageTag: "quay.io/example/example-operator-bundle:0.3.0", - AddMode: index.ReplacesBundleAddMode, - }, - index.BundleItem{ - ImageTag: "quay.io/example/example-operator-bundle:1.0.1", - AddMode: index.SemverBundleAddMode, - }, - index.BundleItem{ - ImageTag: "localhost/example-operator-bundle:1.0.1", - AddMode: index.SemverBundleAddMode, - }, - ) - rp2 := FBCRegistryPod{ - GRPCPort: defaultGRPCPort, - BundleItems: bundleItems, - } - output, err := rp2.getContainerCmd() - Expect(err).To(BeNil()) - Expect(output).Should(Equal(containerCommandFor(rp2.FBCIndexRootDir, rp2.GRPCPort))) - }) }) Context("with invalid registry pod values", func() { + It("should error when bundle image is not provided", func() { expectedErr := "bundle image set cannot be empty" rp := &FBCRegistryPod{} err := rp.init(cfg, cs) - Expect(err).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring(expectedErr)) }) @@ -159,58 +137,86 @@ var _ = Describe("FBCRegistryPod", func() { cancel() err := rp.checkPodStatus(ctx, mockBadPodCheck) - Expect(err).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring(expectedErr)) }) }) - Context("creating a ConfigMap", func() { - It("makeBaseConfigMap() should return a basic ConfigMap manifest", func() { - cm := rp.makeBaseConfigMap() + Context("creating a compressed ConfigMap", func() { + It("cmWriter.makeBaseConfigMap() should return a basic ConfigMap manifest", func() { + cm := rp.cmWriter.newConfigMap("test-cm") + Expect(cm.Name).Should(Equal("test-cm")) Expect(cm.GetObjectKind().GroupVersionKind()).Should(Equal(corev1.SchemeGroupVersion.WithKind("ConfigMap"))) Expect(cm.GetNamespace()).Should(Equal(cfg.Namespace)) - Expect(cm.Data).ShouldNot(BeNil()) - Expect(len(cm.Data)).Should(Equal(0)) + Expect(cm.Data).Should(BeNil()) + Expect(cm.BinaryData).ShouldNot(BeNil()) + Expect(cm.BinaryData).Should(BeEmpty()) }) - It("partitionedConfigMaps() should return a single ConfigMap", func() { + It("partitionedConfigMaps() should return a single compressed ConfigMap", func() { rp.FBCContent = testYaml - expectedYaml := "" - for i, yaml := range strings.Split(testYaml, "---")[1:] { - if i != 0 { - expectedYaml += "\n---\n" - } + expectedYaml := strings.TrimPrefix(strings.TrimSpace(testYaml), "---\n") - expectedYaml += yaml - } - cms := rp.partitionedConfigMaps() - Expect(len(cms)).Should(Equal(1)) - Expect(cms[0].Data).Should(HaveKey("extraFBC")) - Expect(cms[0].Data["extraFBC"]).Should(Equal(expectedYaml)) + cms, err := rp.partitionedConfigMaps() + Expect(err).ShouldNot(HaveOccurred()) + Expect(cms).Should(HaveLen(1)) + Expect(cms[0].BinaryData).Should(HaveKey("extraFBC")) + + By("uncompressed the BinaryData") + uncompressed := decompressCM(cms[0]) + Expect(uncompressed).Should(Equal(expectedYaml)) }) - It("partitionedConfigMaps() should return multiple ConfigMaps", func() { - // Create a large yaml manifest - largeYaml := "" - for i := len([]byte(largeYaml)); i < maxConfigMapSize; { - largeYaml += testYaml - i = len([]byte(largeYaml)) + It("partitionedConfigMaps() should return a single compressed ConfigMap for large yaml", func() { + largeYaml := strings.Builder{} + for largeYaml.Len() < maxConfigMapSize { + largeYaml.WriteString(testYaml) } + rp.FBCContent = largeYaml.String() + + expectedYaml := strings.TrimPrefix(strings.TrimSpace(largeYaml.String()), "---\n") + expectedYaml = regexp.MustCompile(`\n\n+`).ReplaceAllString(expectedYaml, "\n") + cms, err := rp.partitionedConfigMaps() + Expect(err).ShouldNot(HaveOccurred()) + Expect(cms).Should(HaveLen(1)) + Expect(cms[0].BinaryData).Should(HaveKey("extraFBC")) + + actualBinaryData := cms[0].BinaryData["extraFBC"] + Expect(len(actualBinaryData)).Should(BeNumerically("<", maxConfigMapSize)) + By("uncompress the BinaryData") + uncompressed := decompressCM(cms[0]) + Expect(uncompressed).Should(Equal(expectedYaml)) + }) + + It("partitionedConfigMaps() should return a multiple compressed ConfigMaps for a huge yaml", func() { + // build completely random yamls. This is because gzip relies on duplications, and so repeated text is + // compressed very well, so we'll need a really huge input to create more than one CM. When using random + // input, gzip will create larger output, and we can get to multiple CM with much smaller input. + largeYamlBuilder := strings.Builder{} + for largeYamlBuilder.Len() < maxConfigMapSize*2 { + largeYamlBuilder.WriteString(generateRandYaml()) + } + largeYaml := largeYamlBuilder.String() rp.FBCContent = largeYaml - cms := rp.partitionedConfigMaps() - Expect(len(cms)).Should(Equal(2)) - Expect(cms[0].Data).Should(HaveKey("extraFBC")) - Expect(cms[0].Data["extraFBC"]).ShouldNot(BeEmpty()) - Expect(cms[1].Data).Should(HaveKey("extraFBC")) - Expect(cms[1].Data["extraFBC"]).ShouldNot(BeEmpty()) + expectedYaml := strings.TrimPrefix(strings.TrimSpace(largeYaml), "---\n") + expectedYaml = regexp.MustCompile(`\n\n+`).ReplaceAllString(expectedYaml, "\n") + + cms, err := rp.partitionedConfigMaps() + Expect(err).ShouldNot(HaveOccurred()) + + Expect(cms).Should(HaveLen(2)) + Expect(cms[0].BinaryData).Should(HaveKey("extraFBC")) + Expect(cms[1].BinaryData).Should(HaveKey("extraFBC")) + decompressed1 := decompressCM(cms[1]) + decompressed0 := decompressCM(cms[0]) + Expect(decompressed0 + "\n---\n" + decompressed1).Should(Equal(expectedYaml)) }) - It("createOrUpdateConfigMap() should create the ConfigMap if it does not exist", func() { - cm := rp.makeBaseConfigMap() - cm.SetName("test-cm") - cm.Data["test"] = "hello test world!" + It("createOrUpdateConfigMap() should create the compressed ConfigMap if it does not exist", func() { + cm := rp.cmWriter.newConfigMap("test-cm") + cm.BinaryData["test"] = compress("hello test world!") Expect(rp.createOrUpdateConfigMap(cm)).Should(Succeed()) @@ -219,12 +225,11 @@ var _ = Describe("FBCRegistryPod", func() { Expect(testCm).Should(BeEquivalentTo(cm)) }) - It("createOrUpdateConfigMap() should update the ConfigMap if it already exists", func() { - cm := rp.makeBaseConfigMap() - cm.SetName("test-cm") - cm.Data["test"] = "hello test world!" + It("createOrUpdateConfigMap() should update the compressed ConfigMap if it already exists", func() { + cm := rp.cmWriter.newConfigMap("test-cm") + cm.BinaryData["test"] = compress("hello test world!") Expect(rp.cfg.Client.Create(context.TODO(), cm)).Should(Succeed()) - cm.Data["test"] = "hello changed world!" + cm.BinaryData["test"] = compress("hello changed world!") cm.SetResourceVersion("2") Expect(rp.createOrUpdateConfigMap(cm)).Should(Succeed()) @@ -234,64 +239,90 @@ var _ = Describe("FBCRegistryPod", func() { Expect(testCm).Should(BeEquivalentTo(cm)) }) - It("createConfigMaps() should create a single ConfigMap", func() { - rp.FBCContent = testYaml - expectedYaml := "" - for i, yaml := range strings.Split(testYaml, "---")[1:] { - if i != 0 { - expectedYaml += "\n---\n" - } - - expectedYaml += yaml + It("createOrUpdateConfigMap() should update the uncompressed-old ConfigMap if it already exists", func() { + origCM := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: rp.cfg.Namespace, + Name: "test-cm", + }, + Data: map[string]string{"test": "hello test world!"}, } + Expect(rp.cfg.Client.Create(context.TODO(), origCM)).Should(Succeed()) + cm := rp.cmWriter.newConfigMap("test-cm") + cm.BinaryData["test"] = compress("hello changed world!") + cm.SetResourceVersion("2") + + Expect(rp.createOrUpdateConfigMap(cm)).Should(Succeed()) + + testCm := &corev1.ConfigMap{} + Expect(rp.cfg.Client.Get(context.TODO(), types.NamespacedName{Namespace: rp.cfg.Namespace, Name: cm.GetName()}, testCm)).Should(Succeed()) + Expect(cm.Data).Should(BeNil()) + Expect(testCm.BinaryData).Should(BeEquivalentTo(cm.BinaryData)) + }) + + It("createConfigMaps() should create a single compressed ConfigMap", func() { + rp.FBCContent = testYaml + + expectedYaml := strings.TrimPrefix(strings.TrimSpace(testYaml), "---\n") expectedName := fmt.Sprintf("%s-configmap-partition-1", cs.GetName()) cms, err := rp.createConfigMaps(cs) Expect(err).ShouldNot(HaveOccurred()) - Expect(len(cms)).Should(Equal(1)) + Expect(cms).Should(HaveLen(1)) Expect(cms[0].GetNamespace()).Should(Equal(rp.cfg.Namespace)) Expect(cms[0].GetName()).Should(Equal(expectedName)) - Expect(cms[0].Data).Should(HaveKey("extraFBC")) - Expect(cms[0].Data["extraFBC"]).Should(Equal(expectedYaml)) + Expect(cms[0].Data).Should(BeNil()) + Expect(cms[0].BinaryData).Should(HaveKey("extraFBC")) + uncompressed := decompressCM(cms[0]) + Expect(uncompressed).Should(Equal(expectedYaml)) testCm := &corev1.ConfigMap{} Expect(rp.cfg.Client.Get(context.TODO(), types.NamespacedName{Namespace: rp.cfg.Namespace, Name: expectedName}, testCm)).Should(Succeed()) - Expect(testCm.Data).Should(HaveKey("extraFBC")) - Expect(testCm.Data["extraFBC"]).Should(Equal(expectedYaml)) - Expect(len(testCm.OwnerReferences)).Should(Equal(1)) + Expect(testCm.BinaryData).Should(HaveKey("extraFBC")) + Expect(testCm.Data).Should(BeNil()) + uncompressed = decompressCM(testCm) + Expect(uncompressed).Should(Equal(expectedYaml)) + Expect(testCm.OwnerReferences).Should(HaveLen(1)) }) - It("createConfigMaps() should create multiple ConfigMaps", func() { - largeYaml := "" - for i := len([]byte(largeYaml)); i < maxConfigMapSize; { - largeYaml += testYaml - i = len([]byte(largeYaml)) - } - rp.FBCContent = largeYaml - - cms, err := rp.createConfigMaps(cs) - Expect(err).ShouldNot(HaveOccurred()) - Expect(len(cms)).Should(Equal(2)) - - for i, cm := range cms { - expectedName := fmt.Sprintf("%s-configmap-partition-%d", cs.GetName(), i+1) - Expect(cm.Data).Should(HaveKey("extraFBC")) - Expect(cm.Data["extraFBC"]).ShouldNot(BeEmpty()) - Expect(cm.GetNamespace()).Should(Equal(rp.cfg.Namespace)) - Expect(cm.GetName()).Should(Equal(expectedName)) - - testCm := &corev1.ConfigMap{} - Expect(rp.cfg.Client.Get(context.TODO(), types.NamespacedName{Namespace: rp.cfg.Namespace, Name: expectedName}, testCm)).Should(Succeed()) - Expect(testCm.Data).Should(HaveKey("extraFBC")) - Expect(testCm.Data["extraFBC"]).Should(Equal(cm.Data["extraFBC"])) - Expect(len(testCm.OwnerReferences)).Should(Equal(1)) - } + It("should create the compressed FBCRegistryPod successfully", func() { + expectedPodName := "quay-io-example-example-operator-bundle-0-2-0" + Expect(rp).NotTo(BeNil()) + Expect(rp.pod.Name).To(Equal(expectedPodName)) + Expect(rp.pod.Namespace).To(Equal(rp.cfg.Namespace)) + Expect(rp.pod.Spec.Containers[0].Name).To(Equal(defaultContainerName)) + Expect(rp.pod.Spec.Containers).Should(HaveLen(1)) + Expect(rp.pod.Spec.Containers[0].Ports).Should(HaveLen(1)) + Expect(rp.pod.Spec.Containers[0].Ports[0].ContainerPort).To(Equal(rp.GRPCPort)) + Expect(rp.pod.Spec.Containers[0].Command).Should(HaveLen(3)) + Expect(rp.pod.Spec.Containers[0].Command).Should(ContainElements("sh", "-c", containerCommandFor(rp.FBCIndexRootDir, rp.GRPCPort))) + Expect(rp.pod.Spec.InitContainers).Should(HaveLen(1)) + Expect(rp.pod.Spec.InitContainers[0].VolumeMounts).Should(HaveLen(2)) }) }) }) }) +func decompressCM(cm *corev1.ConfigMap) string { + actualBinaryData := cm.BinaryData["extraFBC"] + ExpectWithOffset(1, len(actualBinaryData)).Should(BeNumerically("<", maxConfigMapSize)) + By("uncompress the BinaryData") + compressed := bytes.NewBuffer(actualBinaryData) + reader, err := gzip.NewReader(compressed) + ExpectWithOffset(1, err).ShouldNot(HaveOccurred()) + var uncompressed bytes.Buffer + ExpectWithOffset(1, reader.Close()).Should(Succeed()) + _, err = io.Copy(&uncompressed, reader) + ExpectWithOffset(1, err).ShouldNot(HaveOccurred()) + + return uncompressed.String() +} + // containerCommandFor returns the expected container command for a db path and set of bundle items. func containerCommandFor(indexRootDir string, grpcPort int32) string { //nolint:unparam return fmt.Sprintf("opm serve %s -p %d", indexRootDir, grpcPort) @@ -332,3 +363,54 @@ address: postcode: '89393' country: 'French Southern Territories' ` +const charTbl = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+=*&^%$#@!,.;~/\\|" + +var rnd = rand.New(rand.NewSource(time.Now().UnixMilli())) + +func randField() string { + + fieldNameLength := rnd.Intn(15) + 5 + fieldName := make([]byte, fieldNameLength) + for i := 0; i < fieldNameLength; i++ { + fieldName[i] = charTbl[rnd.Intn('z'-'a'+1)] + } + + // random field name between 5 and 45 + size := rnd.Intn(40) + 5 + + value := make([]byte, size) + for i := 0; i < size; i++ { + value[i] = charTbl[rnd.Intn(len(charTbl))] + } + return fmt.Sprintf("%s: %q\n", fieldName, value) +} + +func generateRandYaml() string { + numLines := rnd.Intn(45) + 5 + + b := strings.Builder{} + b.WriteString("---\n") + for i := 0; i < numLines; i++ { + b.WriteString(randField()) + } + return b.String() +} + +var ( + compressBuff = &bytes.Buffer{} + compressor = gzip.NewWriter(compressBuff) +) + +func compress(s string) []byte { + compressBuff.Reset() + compressor.Reset(compressBuff) + + input := bytes.NewBufferString(s) + _, err := io.Copy(compressor, input) + ExpectWithOffset(1, err).ShouldNot(HaveOccurred()) + + Expect(compressor.Flush()).Should(Succeed()) + Expect(compressor.Close()).Should(Succeed()) + + return compressBuff.Bytes() +} diff --git a/internal/olm/operator/registry/index/registry_pod_test.go b/internal/olm/operator/registry/index/registry_pod_test.go index 4f1f81130dd..9274f5eeebc 100644 --- a/internal/olm/operator/registry/index/registry_pod_test.go +++ b/internal/olm/operator/registry/index/registry_pod_test.go @@ -93,14 +93,14 @@ var _ = Describe("SQLiteRegistryPod", func() { It("should return a valid container command for one image", func() { output, err := rp.getContainerCmd() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(containerCommandFor(defaultDBPath, defaultBundleItems, false, rp.SkipTLSVerify, false))) }) It("should return a container command with --ca-file", func() { rp.CASecretName = caSecretName output, err := rp.getContainerCmd() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(containerCommandFor(defaultDBPath, defaultBundleItems, true, rp.SkipTLSVerify, false))) }) @@ -110,7 +110,7 @@ var _ = Describe("SQLiteRegistryPod", func() { rp.BundleItems = bundles rp.SkipTLSVerify = true output, err := rp.getContainerCmd() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(containerCommandFor(defaultDBPath, bundles, false, rp.SkipTLSVerify, false))) } }) @@ -137,20 +137,20 @@ var _ = Describe("SQLiteRegistryPod", func() { SkipTLSVerify: true, } output, err := rp2.getContainerCmd() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(containerCommandFor(defaultDBPath, bundleItems, false, rp2.SkipTLSVerify, false))) }) It("should return a valid container command for one image", func() { output, err := rp.getContainerCmd() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(containerCommandFor(defaultDBPath, defaultBundleItems, false, false, rp.UseHTTP))) }) It("should return a container command with --ca-file", func() { rp.CASecretName = caSecretName output, err := rp.getContainerCmd() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(containerCommandFor(defaultDBPath, defaultBundleItems, true, false, rp.UseHTTP))) }) @@ -160,7 +160,7 @@ var _ = Describe("SQLiteRegistryPod", func() { rp.BundleItems = bundles rp.UseHTTP = true output, err := rp.getContainerCmd() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(containerCommandFor(defaultDBPath, bundles, false, false, rp.UseHTTP))) } }) @@ -187,7 +187,7 @@ var _ = Describe("SQLiteRegistryPod", func() { UseHTTP: true, } output, err := rp2.getContainerCmd() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(containerCommandFor(defaultDBPath, bundleItems, false, false, rp2.UseHTTP))) }) @@ -196,9 +196,7 @@ var _ = Describe("SQLiteRegistryPod", func() { return true, nil }) - err := rp.checkPodStatus(context.Background(), mockGoodPodCheck) - - Expect(err).To(BeNil()) + Expect(rp.checkPodStatus(context.Background(), mockGoodPodCheck)).To(Succeed()) }) It("adds secrets and a service account to the pod", func() { @@ -244,7 +242,7 @@ var _ = Describe("SQLiteRegistryPod", func() { expectedErr := "bundle image set cannot be empty" rp := &SQLiteRegistryPod{} err := rp.init(cfg) - Expect(err).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring(expectedErr)) }) @@ -254,7 +252,7 @@ var _ = Describe("SQLiteRegistryPod", func() { BundleItems: []BundleItem{{ImageTag: "quay.io/example/example-operator-bundle:0.2.0", AddMode: "invalid"}}, } err := rp.init(cfg) - Expect(err).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring(expectedErr)) }) @@ -275,7 +273,7 @@ var _ = Describe("SQLiteRegistryPod", func() { cancel() err := rp.checkPodStatus(ctx, mockBadPodCheck) - Expect(err).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring(expectedErr)) }) }) diff --git a/internal/olm/operator/registry/operator_installer_test.go b/internal/olm/operator/registry/operator_installer_test.go index 236f0485c3c..485cab044f2 100644 --- a/internal/olm/operator/registry/operator_installer_test.go +++ b/internal/olm/operator/registry/operator_installer_test.go @@ -196,7 +196,7 @@ var _ = Describe("OperatorInstaller", func() { Expect(err).ToNot(HaveOccurred()) Expect(ip.Name).To(Equal(name)) Expect(ip.Namespace).To(Equal(namespace)) - Expect(ip.Spec.Approved).To(Equal(true)) + Expect(ip.Spec.Approved).To(BeTrue()) }) It("should return an error if the install plan does not exist.", func() { oi.cfg.Client = fake.NewClientBuilder().WithScheme(sch).Build() @@ -356,11 +356,10 @@ var _ = Describe("OperatorInstaller", func() { It("should create one with the given target namespaces", func() { _ = oi.InstallMode.Set(string(v1alpha1.InstallModeTypeSingleNamespace)) oi.InstallMode.TargetNamespaces = []string{"anotherns"} - err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(oi.ensureOperatorGroup(context.TODO())).To(Succeed()) og, found, err := oi.getOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) Expect(og).ToNot(BeNil()) Expect(og.Name).To(Equal("operator-sdk-og")) @@ -371,34 +370,32 @@ var _ = Describe("OperatorInstaller", func() { _ = oi.InstallMode.Set(string(v1alpha1.InstallModeTypeSingleNamespace)) oi.InstallMode.TargetNamespaces = []string{"testns"} err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).ToNot(BeNil()) + Expect(err).To(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring("use install mode \"OwnNamespace\"")) }) }) Context("given OwnNamespace", func() { It("should create one with the given target namespaces", func() { _ = oi.InstallMode.Set(string(v1alpha1.InstallModeTypeOwnNamespace)) - err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(oi.ensureOperatorGroup(context.TODO())).To(Succeed()) og, found, err := oi.getOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) Expect(og).ToNot(BeNil()) Expect(og.Name).To(Equal("operator-sdk-og")) Expect(og.Namespace).To(Equal("testns")) - Expect(len(og.Spec.TargetNamespaces)).To(Equal(1)) + Expect(og.Spec.TargetNamespaces).To(HaveLen(1)) }) }) Context("given MultiNamespaces", func() { It("should create one with the given target namespaces", func() { _ = oi.InstallMode.Set(string(v1alpha1.InstallModeTypeMultiNamespace)) oi.InstallMode.TargetNamespaces = []string{"anotherns1", "anotherns2"} - err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(oi.ensureOperatorGroup(context.TODO())).To(Succeed()) og, found, err := oi.getOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) Expect(og).ToNot(BeNil()) Expect(og.Name).To(Equal("operator-sdk-og")) @@ -409,16 +406,15 @@ var _ = Describe("OperatorInstaller", func() { Context("given AllNamespaces", func() { It("should create one with the given target namespaces", func() { _ = oi.InstallMode.Set(string(v1alpha1.InstallModeTypeAllNamespaces)) - err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(oi.ensureOperatorGroup(context.TODO())).To(Succeed()) og, found, err := oi.getOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) Expect(og).ToNot(BeNil()) Expect(og.Name).To(Equal("operator-sdk-og")) Expect(og.Namespace).To(Equal("testns")) - Expect(len(og.Spec.TargetNamespaces)).To(Equal(0)) + Expect(og.Spec.TargetNamespaces).To(BeEmpty()) }) }) }) @@ -430,11 +426,10 @@ var _ = Describe("OperatorInstaller", func() { It("should return nil for AllNamespaces with empty targets", func() { // context, client, name, ns, targets oog := createOperatorGroupHelper(context.TODO(), client, "existing-og", "testns") - err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(oi.ensureOperatorGroup(context.TODO())).To(Succeed()) og, found, err := oi.getOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) Expect(og.Name).To(Equal(oog.Name)) Expect(og.Namespace).To(Equal(oog.Namespace)) @@ -444,7 +439,7 @@ var _ = Describe("OperatorInstaller", func() { _ = createOperatorGroupHelper(context.TODO(), client, "existing-og", "testns", "incompatiblens") err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).ShouldNot(BeNil()) + Expect(err).Should(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("is not compatible")) }) }) @@ -456,10 +451,10 @@ var _ = Describe("OperatorInstaller", func() { oog := createOperatorGroupHelper(context.TODO(), client, "existing-og", "testns", "testns") err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) og, found, err := oi.getOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) Expect(og.Name).To(Equal(oog.Name)) Expect(og.Namespace).To(Equal(oog.Namespace)) @@ -468,7 +463,7 @@ var _ = Describe("OperatorInstaller", func() { _ = createOperatorGroupHelper(context.TODO(), client, "existing-og", "testns", "incompatiblens") err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).ShouldNot(BeNil()) + Expect(err).Should(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("is not compatible")) }) }) @@ -480,11 +475,10 @@ var _ = Describe("OperatorInstaller", func() { oi.InstallMode.TargetNamespaces = []string{"anotherns"} oog := createOperatorGroupHelper(context.TODO(), client, "existing-og", "testns", "anotherns") - err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(oi.ensureOperatorGroup(context.TODO())).To(Succeed()) og, found, err := oi.getOperatorGroup(context.TODO()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(found).To(BeTrue()) Expect(og.Name).To(Equal(oog.Name)) Expect(og.Namespace).To(Equal(oog.Namespace)) @@ -494,7 +488,7 @@ var _ = Describe("OperatorInstaller", func() { _ = createOperatorGroupHelper(context.TODO(), client, "existing-og", "testns", "testns") err := oi.ensureOperatorGroup(context.TODO()) - Expect(err).ShouldNot(BeNil()) + Expect(err).Should(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("use install mode \"OwnNamespace\"")) }) }) @@ -530,7 +524,7 @@ var _ = Describe("OperatorInstaller", func() { Expect(og).ShouldNot(BeNil()) Expect(og.Name).To(Equal(operator.SDKOperatorGroupName)) Expect(og.Namespace).To(Equal("testnamespace")) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) }) }) @@ -550,15 +544,14 @@ var _ = Describe("OperatorInstaller", func() { } err := oi.isOperatorGroupCompatible(og, oi.InstallMode.TargetNamespaces) - Expect(err).ShouldNot(BeNil()) + Expect(err).Should(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring("is not compatible")) }) It("should return nil if no installmode is empty", func() { // empty install mode oi.InstallMode = operator.InstallMode{} Expect(oi.InstallMode.IsEmpty()).To(BeTrue()) - err := oi.isOperatorGroupCompatible(og, oi.InstallMode.TargetNamespaces) - Expect(err).Should(BeNil()) + Expect(oi.isOperatorGroupCompatible(og, oi.InstallMode.TargetNamespaces)).Should(Succeed()) }) It("should return nil if namespaces match", func() { oi.InstallMode = operator.InstallMode{ @@ -566,8 +559,7 @@ var _ = Describe("OperatorInstaller", func() { TargetNamespaces: []string{"matchingns"}, } aog := createOperatorGroupHelper(context.TODO(), nil, "existing-og", "testns", "matchingns") - err := oi.isOperatorGroupCompatible(aog, oi.InstallMode.TargetNamespaces) - Expect(err).Should(BeNil()) + Expect(oi.isOperatorGroupCompatible(aog, oi.InstallMode.TargetNamespaces)).Should(Succeed()) }) }) @@ -601,7 +593,7 @@ var _ = Describe("OperatorInstaller", func() { grp, found, err := oi.getOperatorGroup(context.TODO()) Expect(grp).To(BeNil()) Expect(found).To(BeFalse()) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) }) It("should return an error when more than OperatorGroup found", func() { _ = createOperatorGroupHelper(context.TODO(), client, "og1", "atestns") @@ -618,7 +610,7 @@ var _ = Describe("OperatorInstaller", func() { Expect(grp.Name).To(Equal(og.Name)) Expect(grp.Namespace).To(Equal(og.Namespace)) Expect(found).To(BeTrue()) - Expect(err).Should(BeNil()) + Expect(err).ShouldNot(HaveOccurred()) }) }) @@ -643,15 +635,15 @@ var _ = Describe("OperatorInstaller", func() { supported.Insert(string(v1alpha1.InstallModeTypeAllNamespaces)) target, err := oi.getTargetNamespaces(supported) Expect(target).To(BeNil()) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) }) It("should return operator's namespace when OwnNamespace is supported", func() { oi.cfg.Namespace = "test-ns" supported.Insert(string(v1alpha1.InstallModeTypeOwnNamespace)) target, err := oi.getTargetNamespaces(supported) - Expect(len(target)).To(Equal(1)) + Expect(target).To(HaveLen(1)) Expect(target[0]).To(Equal("test-ns")) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) }) It("should return configured namespace when SingleNamespace is passed in", func() { @@ -662,9 +654,9 @@ var _ = Describe("OperatorInstaller", func() { supported.Insert(string(v1alpha1.InstallModeTypeSingleNamespace)) target, err := oi.getTargetNamespaces(supported) - Expect(len(target)).To(Equal(1)) + Expect(target).To(HaveLen(1)) Expect(target[0]).To(Equal("test-ns")) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) }) It("should return configured namespace when MultiNamespace is passed in", func() { @@ -675,9 +667,9 @@ var _ = Describe("OperatorInstaller", func() { supported.Insert(string(v1alpha1.InstallModeTypeMultiNamespace)) target, err := oi.getTargetNamespaces(supported) - Expect(len(target)).To(Equal(2)) + Expect(target).To(HaveLen(2)) Expect(target).To(Equal([]string{"test-ns1", "test-ns2"})) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) }) }) }) diff --git a/internal/registry/labels_test.go b/internal/registry/labels_test.go index 57f80ca00a1..6c8a318bd83 100644 --- a/internal/registry/labels_test.go +++ b/internal/registry/labels_test.go @@ -45,7 +45,7 @@ var _ = Describe("Labels", func() { expPath = defaultPath writeMetadataHelper(fs, expPath, annotationsStringValidV1) metadata, path, err = findBundleMetadata(fs, "/bundle") - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(path).To(Equal(expPath)) Expect(metadata).To(BeEquivalentTo(annotationsValidV1)) }) @@ -53,7 +53,7 @@ var _ = Describe("Labels", func() { expPath = "/bundle/metadata/my-metadata.yaml" writeMetadataHelper(fs, expPath, annotationsStringValidV1) metadata, path, err = findBundleMetadata(fs, "/bundle") - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(path).To(Equal(expPath)) Expect(metadata).To(BeEquivalentTo(annotationsValidV1)) }) @@ -61,7 +61,7 @@ var _ = Describe("Labels", func() { expPath = "/bundle/my-dir/my-metadata.yaml" writeMetadataHelper(fs, expPath, annotationsStringValidV1) metadata, path, err = findBundleMetadata(fs, "/bundle") - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(path).To(Equal(expPath)) Expect(metadata).To(BeEquivalentTo(annotationsValidV1)) }) @@ -69,7 +69,7 @@ var _ = Describe("Labels", func() { expPath = "/bundle/my-parent-dir/my-dir/annotations.yaml" writeMetadataHelper(fs, expPath, annotationsStringValidV1) metadata, path, err = findBundleMetadata(fs, "/bundle") - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(path).To(Equal(expPath)) Expect(metadata).To(BeEquivalentTo(annotationsValidV1)) }) @@ -78,7 +78,7 @@ var _ = Describe("Labels", func() { writeMetadataHelper(fs, expPath, annotationsStringValidV1) writeMetadataHelper(fs, "/bundle/other-metadata/annotations.yaml", annotationsStringValidNoRegLabels) metadata, path, err = findBundleMetadata(fs, "/bundle") - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(path).To(Equal(expPath)) Expect(metadata).To(BeEquivalentTo(annotationsValidV1)) }) @@ -87,7 +87,7 @@ var _ = Describe("Labels", func() { writeMetadataHelper(fs, expPath, annotationsStringValidV1) writeMetadataHelper(fs, "/bundle/custom2/annotations.yaml", annotationsStringValidNoRegLabels) metadata, path, err = findBundleMetadata(fs, "/bundle") - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(path).To(Equal(expPath)) Expect(metadata).To(BeEquivalentTo(annotationsValidV1)) }) @@ -97,7 +97,7 @@ var _ = Describe("Labels", func() { expPath = defaultPath writeMetadataHelper(fs, defaultPath, annotationsStringValidNoRegLabels) metadata, path, err = findBundleMetadata(fs, "/bundle") - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) Expect(path).To(Equal(expPath)) Expect(metadata).To(BeEquivalentTo(annotationsValidNoRegLabels)) }) diff --git a/internal/scorecard/bundle_test.go b/internal/scorecard/bundle_test.go index 5a4da152d66..df1cf532760 100644 --- a/internal/scorecard/bundle_test.go +++ b/internal/scorecard/bundle_test.go @@ -43,7 +43,7 @@ var _ = Describe("Tarring a bundle", func() { BeforeEach(func() { r = PodTestRunner{} expTarball, err = os.ReadFile(expTarPath) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) }) Context("with a valid on-disk bundle", func() { @@ -54,9 +54,9 @@ var _ = Describe("Tarring a bundle", func() { It("creates a tarball successfully", func() { r.BundlePath = validBundlePath r.BundleMetadata, _, err = registry.FindBundleMetadata(validBundlePath) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) tarredBundleData, err := r.getBundleData() - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) cmpTarFilesHelper(expTarball, tarredBundleData) }) }) @@ -64,7 +64,7 @@ var _ = Describe("Tarring a bundle", func() { Context("with an invalid on-disk bundle", func() { It("returns an error", func() { _, err = r.getBundleData() - Expect(err).NotTo(BeNil()) + Expect(err).To(HaveOccurred()) }) }) }) @@ -76,9 +76,9 @@ var _ = Describe("Tarring a bundle", func() { func cmpTarFilesHelper(c1, c2 []byte) { r1, r2 := bytes.NewBuffer(c1), bytes.NewBuffer(c2) set1, err := untarToFileSet(r1) - ExpectWithOffset(1, err).To(BeNil()) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) set2, err := untarToFileSet(r2) - ExpectWithOffset(1, err).To(BeNil()) + ExpectWithOffset(1, err).ToNot(HaveOccurred()) for fileName, contents1 := range set1 { contents2, hasFileName := set2[fileName] @@ -87,7 +87,7 @@ func cmpTarFilesHelper(c1, c2 []byte) { "contents of file %s differ in first and second tarballs", fileName) delete(set2, fileName) } - ExpectWithOffset(1, set2).To(HaveLen(0), "second tarball has files not in the first") + ExpectWithOffset(1, set2).To(BeEmpty(), "second tarball has files not in the first") } // untarToFileSet reads a gizpped tarball from r and writes each object's bytes to a set, keyed by header name. diff --git a/internal/scorecard/tests/bundle_test.go b/internal/scorecard/tests/bundle_test.go index 49f45757a2e..75144b54324 100644 --- a/internal/scorecard/tests/bundle_test.go +++ b/internal/scorecard/tests/bundle_test.go @@ -190,7 +190,7 @@ var _ = Describe("Basic and OLM tests", func() { }) result = checkOwnedCSVStatusDescriptor(cr, &csv, result) - Expect(len(result.Suggestions)).To(Equal(1)) + Expect(result.Suggestions).To(HaveLen(1)) Expect(result.State).To(Equal(scapiv1alpha3.PassState)) }) diff --git a/internal/validate/external_test.go b/internal/validate/external_test.go index 5970435dcf7..f894cacb12f 100644 --- a/internal/validate/external_test.go +++ b/internal/validate/external_test.go @@ -46,7 +46,7 @@ var _ = Describe("External", func() { It("should return false", func() { entrypoints, hasExternal := GetExternalValidatorEntrypoints("") Expect(hasExternal).To(BeFalse()) - Expect(entrypoints).To(HaveLen(0)) + Expect(entrypoints).To(BeEmpty()) }) }) @@ -58,7 +58,7 @@ var _ = Describe("External", func() { Expect(err).NotTo(HaveOccurred()) Expect(results).To(HaveLen(1)) Expect(results[0].Name).To(Equal("passes-bundle")) - Expect(results[0].Errors).To(HaveLen(0)) + Expect(results[0].Errors).To(BeEmpty()) }) }) @@ -85,7 +85,7 @@ var _ = Describe("External", func() { results, err := RunExternalValidators(ctx, entrypoints, "foo/bar") Expect(err).To(HaveOccurred()) Expect(stderrBuf.String()).To(Equal("validator runtime error")) - Expect(results).To(HaveLen(0)) + Expect(results).To(BeEmpty()) }) }) diff --git a/test/e2e/ansible/cluster_test.go b/test/e2e/ansible/cluster_test.go index 05b6a16105f..d2ceebd219a 100644 --- a/test/e2e/ansible/cluster_test.go +++ b/test/e2e/ansible/cluster_test.go @@ -280,7 +280,7 @@ var _ = Describe("Running ansible projects", func() { Expect(err).NotTo(HaveOccurred()) token, err := base64.StdEncoding.DecodeString(strings.TrimSpace(b64Token)) Expect(err).NotTo(HaveOccurred()) - Expect(len(token)).To(BeNumerically(">", 0)) + Expect(token).ToNot(BeEmpty()) By("creating a curl pod") cmdOpts := []string{ @@ -322,7 +322,7 @@ var _ = Describe("Running ansible projects", func() { fmt.Sprintf("%s-sample", strings.ToLower(tc.Kind)), "-o=jsonpath={..metadata.namespace}") Expect(err).NotTo(HaveOccurred()) - Expect(crNamespace).NotTo(HaveLen(0)) + Expect(crNamespace).NotTo(BeEmpty()) By("ensuring the operator metrics contains a `resource_created_at` metric for the Memcached CR") metricExportedMemcachedCR := fmt.Sprintf("resource_created_at_seconds{group=\"%s\","+ diff --git a/test/e2e/go/cluster_test.go b/test/e2e/go/cluster_test.go index f36381601ba..c444c197807 100644 --- a/test/e2e/go/cluster_test.go +++ b/test/e2e/go/cluster_test.go @@ -137,7 +137,7 @@ var _ = Describe("operator-sdk", func() { Expect(err).NotTo(HaveOccurred()) token, err := base64.StdEncoding.DecodeString(strings.TrimSpace(b64Token)) Expect(err).NotTo(HaveOccurred()) - Expect(len(token)).To(BeNumerically(">", 0)) + Expect(token).ToNot(BeEmpty()) By("creating a curl pod") cmdOpts := []string{ diff --git a/test/e2e/helm/cluster_test.go b/test/e2e/helm/cluster_test.go index a5241457a03..1980731aae2 100644 --- a/test/e2e/helm/cluster_test.go +++ b/test/e2e/helm/cluster_test.go @@ -226,7 +226,7 @@ var _ = Describe("Running Helm projects", func() { Expect(err).NotTo(HaveOccurred()) token, err := base64.StdEncoding.DecodeString(strings.TrimSpace(b64Token)) Expect(err).NotTo(HaveOccurred()) - Expect(len(token)).To(BeNumerically(">", 0)) + Expect(token).ToNot(BeEmpty()) By("creating a curl pod") cmdOpts := []string{ @@ -266,7 +266,7 @@ var _ = Describe("Running Helm projects", func() { fmt.Sprintf("%s-sample", strings.ToLower(tc.Kind)), "-o=jsonpath={..metadata.namespace}") Expect(err).NotTo(HaveOccurred()) - Expect(crNamespace).NotTo(HaveLen(0)) + Expect(crNamespace).NotTo(BeEmpty()) By("ensuring the operator metrics contains a `resource_created_at` metric for the CR") metricExportedCR := fmt.Sprintf("resource_created_at_seconds{group=\"%s\","+ diff --git a/testdata/ansible/memcached-operator/Dockerfile b/testdata/ansible/memcached-operator/Dockerfile index 123b05d3472..44cf74870b2 100644 --- a/testdata/ansible/memcached-operator/Dockerfile +++ b/testdata/ansible/memcached-operator/Dockerfile @@ -1,4 +1,4 @@ -FROM quay.io/operator-framework/ansible-operator:v1.28.0 +FROM quay.io/operator-framework/ansible-operator:v1.29.0 COPY requirements.yml ${HOME}/requirements.yml RUN ansible-galaxy collection install -r ${HOME}/requirements.yml \ diff --git a/testdata/ansible/memcached-operator/Makefile b/testdata/ansible/memcached-operator/Makefile index e82d06b0ee4..8ddcd581d38 100644 --- a/testdata/ansible/memcached-operator/Makefile +++ b/testdata/ansible/memcached-operator/Makefile @@ -147,7 +147,7 @@ ifeq (,$(shell which ansible-operator 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(ANSIBLE_OPERATOR)) ;\ - curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.28.0/ansible-operator_$(OS)_$(ARCH) ;\ + curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.29.0/ansible-operator_$(OS)_$(ARCH) ;\ chmod +x $(ANSIBLE_OPERATOR) ;\ } else diff --git a/testdata/ansible/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/ansible/memcached-operator/bundle/tests/scorecard/config.yaml index c0ffb5abfff..4eac868c3bd 100644 --- a/testdata/ansible/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/ansible/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/ansible/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/ansible/memcached-operator/config/scorecard/patches/basic.config.yaml index 2f2774442c4..0d79582078c 100644 --- a/testdata/ansible/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/ansible/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/ansible/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/ansible/memcached-operator/config/scorecard/patches/olm.config.yaml index ace0a1adfef..59be415b42a 100644 --- a/testdata/ansible/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/ansible/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v3/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/go/v3/memcached-operator/bundle/tests/scorecard/config.yaml index c0ffb5abfff..4eac868c3bd 100644 --- a/testdata/go/v3/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/go/v3/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v3/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/go/v3/memcached-operator/config/scorecard/patches/basic.config.yaml index 2f2774442c4..0d79582078c 100644 --- a/testdata/go/v3/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/go/v3/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/go/v3/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/go/v3/memcached-operator/config/scorecard/patches/olm.config.yaml index ace0a1adfef..59be415b42a 100644 --- a/testdata/go/v3/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/go/v3/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v3/memcached-operator/main.go b/testdata/go/v3/memcached-operator/main.go index 26c8638aebe..a8c1a22305a 100644 --- a/testdata/go/v3/memcached-operator/main.go +++ b/testdata/go/v3/memcached-operator/main.go @@ -90,8 +90,10 @@ func main() { } if err = (&controllers.MemcachedReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + // Add a Recorder to the reconciler. + // This allows the operator author to emit events during reconcilliation. Recorder: mgr.GetEventRecorderFor("memcached-controller"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Memcached") diff --git a/testdata/go/v3/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/go/v3/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml index c0ffb5abfff..4eac868c3bd 100644 --- a/testdata/go/v3/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/go/v3/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v3/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/go/v3/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml index 2f2774442c4..0d79582078c 100644 --- a/testdata/go/v3/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/go/v3/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/go/v3/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/go/v3/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml index ace0a1adfef..59be415b42a 100644 --- a/testdata/go/v3/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/go/v3/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v3/monitoring/memcached-operator/main.go b/testdata/go/v3/monitoring/memcached-operator/main.go index 24546a6270e..39d35248801 100644 --- a/testdata/go/v3/monitoring/memcached-operator/main.go +++ b/testdata/go/v3/monitoring/memcached-operator/main.go @@ -97,8 +97,10 @@ func main() { } if err = (&controllers.MemcachedReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + // Add a Recorder to the reconciler. + // This allows the operator author to emit events during reconcilliation. Recorder: mgr.GetEventRecorderFor("memcached-controller"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Memcached") diff --git a/testdata/go/v4-alpha/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/go/v4-alpha/memcached-operator/bundle/tests/scorecard/config.yaml index c0ffb5abfff..4eac868c3bd 100644 --- a/testdata/go/v4-alpha/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/go/v4-alpha/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4-alpha/memcached-operator/cmd/main.go b/testdata/go/v4-alpha/memcached-operator/cmd/main.go index b82cd901ee1..4b8ac664f2f 100644 --- a/testdata/go/v4-alpha/memcached-operator/cmd/main.go +++ b/testdata/go/v4-alpha/memcached-operator/cmd/main.go @@ -90,8 +90,10 @@ func main() { } if err = (&controller.MemcachedReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + // Add a Recorder to the reconciler. + // This allows the operator author to emit events during reconcilliation. Recorder: mgr.GetEventRecorderFor("memcached-controller"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Memcached") diff --git a/testdata/go/v4-alpha/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/go/v4-alpha/memcached-operator/config/scorecard/patches/basic.config.yaml index 2f2774442c4..0d79582078c 100644 --- a/testdata/go/v4-alpha/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/go/v4-alpha/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/go/v4-alpha/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/go/v4-alpha/memcached-operator/config/scorecard/patches/olm.config.yaml index ace0a1adfef..59be415b42a 100644 --- a/testdata/go/v4-alpha/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/go/v4-alpha/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4-alpha/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/go/v4-alpha/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml index c0ffb5abfff..4eac868c3bd 100644 --- a/testdata/go/v4-alpha/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/go/v4-alpha/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4-alpha/monitoring/memcached-operator/cmd/main.go b/testdata/go/v4-alpha/monitoring/memcached-operator/cmd/main.go index 79b6a5616b4..954f8bb94ca 100644 --- a/testdata/go/v4-alpha/monitoring/memcached-operator/cmd/main.go +++ b/testdata/go/v4-alpha/monitoring/memcached-operator/cmd/main.go @@ -97,8 +97,10 @@ func main() { } if err = (&controller.MemcachedReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + // Add a Recorder to the reconciler. + // This allows the operator author to emit events during reconcilliation. Recorder: mgr.GetEventRecorderFor("memcached-controller"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Memcached") diff --git a/testdata/go/v4-alpha/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/go/v4-alpha/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml index 2f2774442c4..0d79582078c 100644 --- a/testdata/go/v4-alpha/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/go/v4-alpha/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/go/v4-alpha/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/go/v4-alpha/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml index ace0a1adfef..59be415b42a 100644 --- a/testdata/go/v4-alpha/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/go/v4-alpha/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/helm/memcached-operator/Dockerfile b/testdata/helm/memcached-operator/Dockerfile index 15b34ffcd2e..161558a5b82 100644 --- a/testdata/helm/memcached-operator/Dockerfile +++ b/testdata/helm/memcached-operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM quay.io/operator-framework/helm-operator:v1.28.0 +FROM quay.io/operator-framework/helm-operator:v1.29.0 ENV HOME=/opt/helm COPY watches.yaml ${HOME}/watches.yaml diff --git a/testdata/helm/memcached-operator/Makefile b/testdata/helm/memcached-operator/Makefile index 3901d8094ad..89a0fc84ec9 100644 --- a/testdata/helm/memcached-operator/Makefile +++ b/testdata/helm/memcached-operator/Makefile @@ -146,7 +146,7 @@ ifeq (,$(shell which helm-operator 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(HELM_OPERATOR)) ;\ - curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.28.0/helm-operator_$(OS)_$(ARCH) ;\ + curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.29.0/helm-operator_$(OS)_$(ARCH) ;\ chmod +x $(HELM_OPERATOR) ;\ } else diff --git a/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml index c0ffb5abfff..4eac868c3bd 100644 --- a/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml index 2f2774442c4..0d79582078c 100644 --- a/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml index ace0a1adfef..59be415b42a 100644 --- a/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.28.0 + image: quay.io/operator-framework/scorecard-test:v1.29.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/website/config.toml b/website/config.toml index 16dbbf6b679..33d76edbdf0 100644 --- a/website/config.toml +++ b/website/config.toml @@ -107,6 +107,12 @@ url_latest_version = "https://sdk.operatorframework.io" ##RELEASE_ADDME## +[[params.versions]] + version = "v1.29" + url = "https://v1-29-x.sdk.operatorframework.io" + kube_version = "1.26.0" + client_go_version = "v0.26.2" + [[params.versions]] version = "v1.28" url = "https://v1-28-x.sdk.operatorframework.io" diff --git a/website/content/en/docs/best-practices/common-recommendation.md b/website/content/en/docs/best-practices/common-recommendation.md index a6eeab4a151..db1030bc027 100644 --- a/website/content/en/docs/best-practices/common-recommendation.md +++ b/website/content/en/docs/best-practices/common-recommendation.md @@ -97,7 +97,7 @@ spec: [operator-best-practices]: /docs/best-practices/best-practices [kb-gkv]: https://book.kubebuilder.io/cronjob-tutorial/gvks.html [operator-pattern]: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/ -[molecule]: https://molecule.readthedocs.io/en/latest/ +[molecule]: https://molecule.readthedocs.io/ [molecule-tests]: /docs/building-operators/ansible/testing-guide [helm-chart-tests]: https://helm.sh/docs/topics/chart_tests/ [envtest]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest diff --git a/website/content/en/docs/building-operators/ansible/migration.md b/website/content/en/docs/building-operators/ansible/migration.md index 2f3988b5999..f208b745056 100644 --- a/website/content/en/docs/building-operators/ansible/migration.md +++ b/website/content/en/docs/building-operators/ansible/migration.md @@ -301,7 +301,7 @@ For further steps regarding the deployment of the operator, creation of custom r [kube-auth-proxy]: https://github.com/brancz/kube-rbac-proxy [metrics]: https://book.kubebuilder.io/reference/metrics.html?highlight=metr#metrics [marker]: https://book.kubebuilder.io/reference/markers.html?highlight=markers#marker-syntax -[molecule]: https://molecule.readthedocs.io/en/latest/# +[molecule]: https://molecule.readthedocs.io/ [testing-guide]: /docs/building-operators/ansible/testing-guide [migration-doc]: /docs/upgrading-sdk-version/ [tutorial-deploy]: /docs/building-operators/ansible/tutorial/#run-the-operator diff --git a/website/content/en/docs/building-operators/ansible/reference/advanced_options.md b/website/content/en/docs/building-operators/ansible/reference/advanced_options.md index 4f8570ec68c..b81c2893bce 100644 --- a/website/content/en/docs/building-operators/ansible/reference/advanced_options.md +++ b/website/content/en/docs/building-operators/ansible/reference/advanced_options.md @@ -268,7 +268,7 @@ If you want more control over the logs that are outputted, consider using the [Z ## `ansible.sdk.operatorframework.io/reconcile-period` Custom Resource Annotation You can specify the reconcile period for an Ansible Operator by adding the ansible.sdk.operatorframework.io/reconcile-period key to the custom resource annotations. -This feature specifies the maximum interval in which a cluster will get reconciled. If changes are detected in the desired state, the cluster may be reconciled sooner than the specified interval. +This feature specifies the maximum interval in which a cluster will get reconciled, and defaults to 10 hours if not manually set. If changes are detected in the desired state, the cluster may be reconciled sooner than the specified interval. The reconcile period can be specified in the custom resource's annotations in the following manner: diff --git a/website/content/en/docs/building-operators/ansible/reference/watches.md b/website/content/en/docs/building-operators/ansible/reference/watches.md index 0bdd405cb53..79a62aa3968 100644 --- a/website/content/en/docs/building-operators/ansible/reference/watches.md +++ b/website/content/en/docs/building-operators/ansible/reference/watches.md @@ -27,7 +27,7 @@ be monitored for updates and cached. current project directory. * **vars**: This is an arbitrary map of key-value pairs. The contents will be passed as `extra_vars` to the playbook or role specified for this watch. -* **reconcilePeriod** (optional): The maximum interval that the operator will wait before beginning another reconcile, even if no watched events are received. When an operator watches many resources, each reconcile can become expensive, and a low value here can actually reduce performance. Typically, this option should only be used in advanced use cases where `watchDependentResources` is set to `False` and when is not possible to use the watch feature. E.g To manage external resources that don’t emit Kubernetes events. The format for the duration string is a sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +* **reconcilePeriod** (optional): The maximum interval that the operator will wait before beginning another reconcile, even if no watched events are received. When an operator watches many resources, each reconcile can become expensive, and a low value here can actually reduce performance. Typically, this option should only be used in advanced use cases where `watchDependentResources` is set to `False` and when is not possible to use the watch feature. E.g To manage external resources that don’t emit Kubernetes events. The format for the duration string is a sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Defaults to 10 hours. * **manageStatus** (optional): When true (default), the operator will manage the status of the CR generically. Set to false, the status of the CR is managed elsewhere, by the specified role/playbook or in a separate controller. @@ -97,7 +97,7 @@ Some features can be overridden per resource via an annotation on that CR. The o | Feature | Yaml Key | Description| Annotation for override | default | Documentation | |---------|----------|------------|-------------------------|---------|---------------| -| Reconcile Period | `reconcilePeriod` | time between reconcile runs for a particular CR | ansible.sdk.operatorframework.io/reconcile-period | | | +| Reconcile Period | `reconcilePeriod` | time between reconcile runs for a particular CR | ansible.sdk.operatorframework.io/reconcile-period | 10h | | | Manage Status | `manageStatus` | Allows the ansible operator to manage the conditions section of each resource's status section. | | true | | | Watching Dependent Resources | `watchDependentResources` | Allows the ansible operator to dynamically watch resources that are created by ansible | | true | [dependent watches](../dependent-watches) | | Watching Cluster-Scoped Resources | `watchClusterScopedResources` | Allows the ansible operator to watch cluster-scoped resources that are created by ansible | | false | | diff --git a/website/content/en/docs/building-operators/ansible/reference/webhooks.md b/website/content/en/docs/building-operators/ansible/reference/webhooks.md index d9dbcad1584..a5bbde32e44 100644 --- a/website/content/en/docs/building-operators/ansible/reference/webhooks.md +++ b/website/content/en/docs/building-operators/ansible/reference/webhooks.md @@ -165,5 +165,5 @@ Ansible-based Operator, you must [admission-controllers]:https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/ -[validating-webhook]:https://v1-21.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#validatingwebhookconfiguration-v1-admissionregistration-k8s-io -[mutating-webhook]:https://v1-21.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#mutatingwebhookconfiguration-v1-admissionregistration-k8s-io +[validating-webhook]:https://v1-26.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#validatingwebhookconfiguration-v1-admissionregistration-k8s-io +[mutating-webhook]:https://v1-26.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#mutatingwebhookconfiguration-v1-admissionregistration-k8s-io diff --git a/website/content/en/docs/building-operators/ansible/testing-guide.md b/website/content/en/docs/building-operators/ansible/testing-guide.md index 5f5fff96c77..ef2fcddd829 100644 --- a/website/content/en/docs/building-operators/ansible/testing-guide.md +++ b/website/content/en/docs/building-operators/ansible/testing-guide.md @@ -48,7 +48,7 @@ Our molecule scenarios have the following basic structure: └── verify.yml ``` -- `molecule.yml` is a configuration file for molecule. It defines what driver to use to stand up an environment and the associated configuration, linting rules, and a variety of other configuration options. For full documentation on the options available here, see the [molecule configuration documentation](https://molecule.readthedocs.io/en/latest/configuration) +- `molecule.yml` is a configuration file for molecule. It defines what driver to use to stand up an environment and the associated configuration, linting rules, and a variety of other configuration options. For full documentation on the options available here, see the [molecule configuration documentation](https://molecule.readthedocs.io/configuration/) - `prepare.yml` is an Ansible playbook that is run once during the set up of a scenario. You can put any arbitrary Ansible in this playbook. It is used for one-time configuration of your test environment, for example, creating the cluster-wide `CustomResourceDefinition` that your Operator will watch. diff --git a/website/content/en/docs/building-operators/golang/advanced-topics.md b/website/content/en/docs/building-operators/golang/advanced-topics.md index 7d5c6d7f8a7..ef47d4c70a9 100644 --- a/website/content/en/docs/building-operators/golang/advanced-topics.md +++ b/website/content/en/docs/building-operators/golang/advanced-topics.md @@ -113,15 +113,205 @@ func init() { * After adding new import paths to your operator project, run `go mod vendor` if a `vendor/` directory is present in the root of your project directory to fulfill these dependencies. * Your 3rd party resource needs to be added before add the controller in `"Setup all Controllers"`. -### Metrics +### Monitoring and Observability +This section covers how to create custom metrics, [alerts] and [recording rules] for your operator. It focuses on the technical aspects, and demonstrates the implementation by updating the sample [memcached-operator]. -To learn about how metrics work in the Operator SDK read the [metrics section][metrics_doc] of the Kubebuilder documentation. +For more information regarding monitoring best practices, take a look at our docs on [observability-best-practices]. +#### Prerequisites +The following steps are required in order to inspect the operator's custom metrics, alerts and recording rules: +- Install Prometheus and Prometheus Operator. We recommend using [kube-prometheus] in production if you don’t have your own monitoring system. If you are just experimenting, you can only install Prometheus and Prometheus Operator. +- Make sure Prometheus has access to the operator's namespace, by setting the corresponding RBAC rules. + + Example: [prometheus_role.yaml] and [prometheus_role_binding.yaml] + +#### Publishing Custom Metrics +If you wish to publish custom metrics for your operator, this can be easily achieved by using the global registry from `controller-runtime/pkg/metrics`. +One way to achieve this is to declare your collectors as global variables, register them using `RegisterMetrics()` and call it in the controller's `init()` function. + +Example custom metric: [MemcachedDeploymentSizeUndesiredCountTotal] + +```go +package monitoring + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + MemcachedDeploymentSizeUndesiredCountTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "memcached_deployment_size_undesired_count_total", + Help: "Total number of times the deployment size was not as desired.", + }, + ) +) + +// RegisterMetrics will register metrics with the global prometheus registry +func RegisterMetrics() { + metrics.Registry.MustRegister(MemcachedDeploymentSizeUndesiredCountTotal) +} +``` + +- The above example creates a new `Counter` metric. For other metrics' types, see [Prometheus Documentation]. +- For more information regarding operators metrics best-practices, please follow [observability-best-practices]. + +[init() function example]: + +```go +package main + + +import ( + ... + "github.com/example/memcached-operator/monitoring" +) + +func init() { + ... + monitoring.RegisterMetrics() + ... +} +``` + +The next step would be to set the controller's logic according to which we update the metric's value. In this case, the new metric type is `Counter`, thus a valid update operation would be to increment its value. + +[Metric update example]: + +```go +... +size := memcached.Spec.Size +if *found.Spec.Replicas != size { + // Increment MemcachedDeploymentSizeUndesiredCountTotal metric by 1 + monitoring.MemcachedDeploymentSizeUndesiredCountTotal.Inc() +} +... +``` +Different metrics types have different valid operations. For more information, please follow [Prometheus Golang client]. + +#### Publishing Alerts and Recording Rules +In order to add alerts and recording rules, which are unique to the operator's needs, we'll create a dedicated PrometheusRule object, by using [prometheus-operator API]. + +[PrometheusRule example]: + +```go +package monitoring + +import ( + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// NewPrometheusRule creates new PrometheusRule(CR) for the operator to have alerts and recording rules +func NewPrometheusRule(namespace string) *monitoringv1.PrometheusRule { + return &monitoringv1.PrometheusRule{ + TypeMeta: metav1.TypeMeta{ + APIVersion: monitoringv1.SchemeGroupVersion.String(), + Kind: "PrometheusRule", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "memcached-operator-rules", + Namespace: "memcached-operator-system", + }, + Spec: *NewPrometheusRuleSpec(), + } +} + +// NewPrometheusRuleSpec creates PrometheusRuleSpec for alerts and recording rules +func NewPrometheusRuleSpec() *monitoringv1.PrometheusRuleSpec { + return &monitoringv1.PrometheusRuleSpec{ + Groups: []monitoringv1.RuleGroup{{ + Name: "memcached.rules", + Rules: []monitoringv1.Rule{ + createOperatorUpTotalRecordingRule(), + createOperatorDownAlertRule() + }, + }}, + } +} + +// createOperatorUpTotalRecordingRule creates memcached_operator_up_total recording rule +func createOperatorUpTotalRecordingRule() monitoringv1.Rule { + return monitoringv1.Rule{ + Record: "memcached_operator_up_total", + Expr: intstr.FromString("sum(up{pod=~'memcached-operator-controller-manager-.*'} or vector(0))"), + } +} + +// createOperatorDownAlertRule creates MemcachedOperatorDown alert rule +func createOperatorDownAlertRule() monitoringv1.Rule { + return monitoringv1.Rule{ + Alert: "MemcachedOperatorDown", + Expr: intstr.FromString("memcached_operator_up_total == 0"), + Annotations: map[string]string{ + "description": "No running memcached-operator pods were detected in the last 5 min.", + }, + For: "5m", + Labels: map[string]string{ + "severity": "critical", + }, + } +} +``` + +Then, we may want to ensure that the new PrometheusRule is being created and reconciled. One way to achieve this is by expanding the existing `Reconcile()` function logic. + +[PrometheusRule reconciliation example]: + +```go +func (r *MemcachedReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + ... + ... + // Check if prometheus rule already exists, if not create a new one + foundRule := &monitoringv1.PrometheusRule{} + err := r.Get(ctx, types.NamespacedName{Name: ruleName, Namespace: namespace}, foundRule) + if err != nil && apierrors.IsNotFound(err) { + // Define a new prometheus rule + prometheusRule := monitoring.NewPrometheusRule(namespace) + if err := r.Create(ctx, prometheusRule); err != nil { + log.Error(err, "Failed to create prometheus rule") + return ctrl.Result{}, nil + } + } + + if err == nil { + // Check if prometheus rule spec was changed, if so set as desired + desiredRuleSpec := monitoring.NewPrometheusRuleSpec() + if !reflect.DeepEqual(foundRule.Spec.DeepCopy(), desiredRuleSpec) { + desiredRuleSpec.DeepCopyInto(&foundRule.Spec) + if r.Update(ctx, foundRule); err != nil { + log.Error(err, "Failed to update prometheus rule") + return ctrl.Result{}, nil + } + } + ... + ... +} +``` + +- Please review the [observability-best-practices] for additional important information regarding alerts and recording rules. + + +#### Alerts Unit Testing +It is highly recommended implementing unit tests for prometheus rules. For more information, please follow the Prometheus [unit testing documentation]. For examples of unit testing in a Golang operator, see the sample memcached-operator [alerts unit tests]. + +#### Inspecting the metrics, alerts and recording rules with Prometheus UI +Finally, in order to inspect the exposed metrics and alerts, we need to forward the corresponding port where metrics are published by Prometheus (usually `9090`, which is the default value). This can be done with the following command: +```bash +$ kubectl -n monitoring port-forward svc/prometheus-k8s 9090 +``` + + +where we assume that the prometheus service is available in the `monitoring` namespace. + +Now you can access Prometheus UI using `http://localhost:9090`. For more details on exposing prometheus metrics, please refer [kube-prometheus docs]. ### Handle Cleanup on Deletion Operators may create objects as part of their operational duty. Object accumulation can consume unnecessary resources, slow down the API and clutter the user interface. As such it is important for operators to keep good hygiene and to clean up resources when they are not needed. Here are a few common scenarios. - + #### Internal Resources A typical example of correct resource cleanup is the [Jobs][jobs] implementation. When a Job is created, one or multiple Pods are created as child resources. When a Job is deleted, the associated Pods are deleted as well. This is a very common pattern easily achieved by setting an owner reference from the parent (Job) to the child (Pod) object. Here is a code snippet for doing so, where "r" is the reconcilier and "ctrl" the controller-runtime library: @@ -311,3 +501,21 @@ Authors may decide to distribute their bundles for various architectures: x86_64 [apimachinery_condition]: https://github.com/kubernetes/apimachinery/blob/d4f471b82f0a17cda946aeba446770563f92114d/pkg/apis/meta/v1/types.go#L1368 [helpers-conditions]: https://github.com/kubernetes/apimachinery/blob/master/pkg/api/meta/conditions.go [multi_arch]:/docs/advanced-topics/multi-arch +[observability-best-practices]:https://sdk.operatorframework.io/docs/best-practices/observability-best-practices/ +[alerts]:https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ +[recording rules]:https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ +[prometheus_role.yaml]:https://github.com/operator-framework/operator-sdk/blob/master/testdata/go/v4-alpha/monitoring/memcached-operator/config/rbac/prometheus_role.yaml +[prometheus_role_binding.yaml]:https://github.com/operator-framework/operator-sdk/blob/master/testdata/go/v4-alpha/monitoring/memcached-operator/config/rbac/prometheus_role_binding.yaml +[MemcachedDeploymentSizeUndesiredCountTotal]:https://github.com/operator-framework/operator-sdk/blob/master/testdata/go/v4-alpha/monitoring/memcached-operator/monitoring/metrics.go +[init() function example]:https://github.com/operator-framework/operator-sdk/blob/master/testdata/go/v4-alpha/monitoring/memcached-operator/cmd/main.go +[Metric update example]:https://github.com/operator-framework/operator-sdk/blob/master/testdata/go/v4-alpha/monitoring/memcached-operator/internal/controller/memcached_controller.go +[Prometheus Documentation]:https://prometheus.io/docs/concepts/metric_types/ +[Prometheus Golang client]:https://pkg.go.dev/github.com/prometheus/client_golang/prometheus +[kube-prometheus]:https://github.com/prometheus-operator/kube-prometheus +[memcached-operator]:https://github.com/operator-framework/operator-sdk/tree/master/testdata/go/v4-alpha/monitoring/memcached-operator +[prometheus-operator API]:https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md +[PrometheusRule example]:https://github.com/operator-framework/operator-sdk/blob/master/testdata/go/v4-alpha/monitoring/memcached-operator/monitoring/alerts.go +[PrometheusRule reconciliation example]:https://github.com/operator-framework/operator-sdk/blob/master/testdata/go/v4-alpha/monitoring/memcached-operator/internal/controller/memcached_controller.go +[unit testing documentation]:https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/ +[alerts unit tests]:https://github.com/operator-framework/operator-sdk/tree/master/testdata/go/v4-alpha/monitoring/memcached-operator/monitoring/prom-rule-ci +[kube-prometheus docs]:https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/access-ui.md#prometheus \ No newline at end of file diff --git a/website/content/en/docs/building-operators/golang/testing.md b/website/content/en/docs/building-operators/golang/testing.md index 3d7b2d1c79b..e034359a965 100644 --- a/website/content/en/docs/building-operators/golang/testing.md +++ b/website/content/en/docs/building-operators/golang/testing.md @@ -53,7 +53,7 @@ To implement application-specific tests, the SDK's test harness, [scorecard][sco [gomega]: https://onsi.github.io/gomega/ [kuttl]: https://kuttl.dev/ [sample]: https://github.com/operator-framework/operator-sdk/tree/master/testdata/go/v3/memcached-operator -[molecule]: https://molecule.readthedocs.io/en/latest/ +[molecule]: https://molecule.readthedocs.io/ [molecule-tests]: /docs/building-operators/ansible/testing-guide [helm-chart-tests]: https://helm.sh/docs/topics/chart_tests/ [go-legacy-shell]: https://github.com/operator-framework/operator-sdk/blob/v1.0.0/hack/tests/e2e-go.sh diff --git a/website/content/en/docs/building-operators/golang/tutorial.md b/website/content/en/docs/building-operators/golang/tutorial.md index 4e8c5c0c0ff..1f3d6e3066c 100644 --- a/website/content/en/docs/building-operators/golang/tutorial.md +++ b/website/content/en/docs/building-operators/golang/tutorial.md @@ -190,6 +190,23 @@ For this example replace the generated controller file `controllers/memcached_co **Note**: The next two subsections explain how the controller watches resources and how the reconcile loop is triggered. If you'd like to skip this section, head to the [deploy](#run-the-operator) section to see how to run the operator. +### Setup a Recorder + +First, add a recorder when you initialize the Memcached reconciler in `main.go`. + +```Go +if err = (&controllers.MemcachedReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("memcached-controller"), +}).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Memcached") + os.Exit(1) +} +``` + +This recorder will be used within the reconcile method of the controller to emit events. + ### Resources watched by the Controller The `SetupWithManager()` function in `controllers/memcached_controller.go` specifies how the controller is built to watch a CR and other resources that are owned and managed by that controller. @@ -490,56 +507,56 @@ Next, check out the following: 1. The [advanced topics][advanced-topics] doc for more use cases and under-the-hood details. -[legacy-quickstart-doc]:https://v0-19-x.sdk.operatorframework.io/docs/golang/legacy/quickstart/ -[migration-guide]:/docs/building-operators/golang/migration -[install-guide]:/docs/building-operators/golang/installation -[image-reg-config]:/docs/olm-integration/cli-overview#private-bundle-and-catalog-image-registries -[enqueue_requests_from_map_func]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/handler#EnqueueRequestsFromMapFunc -[event_handler_godocs]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/handler#hdr-EventHandlers -[event_filtering]:/docs/building-operators/golang/references/event-filtering/ -[controller_options]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#Options -[controller_godocs]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller -[operator_scope]:/docs/building-operators/golang/operator-scope/ -[kubebuilder_layout_doc]:https://book.kubebuilder.io/cronjob-tutorial/basic-project.html -[go_mod_wiki]: https://github.com/golang/go/wiki/Modules -[doc_client_api]:/docs/building-operators/golang/references/client/ -[manager_go_doc]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#Manager -[request-go-doc]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Request -[result_go_doc]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Result -[multi-namespaced-cache-builder]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder -[kubebuilder_entrypoint_doc]: https://book.kubebuilder.io/cronjob-tutorial/empty-main.html +[API-groups]:https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups +[activate_modules]: https://github.com/golang/go/wiki/Modules#how-to-install-and-activate-module-support +[advanced-topics]: /docs/building-operators/golang/advanced-topics/ [api_terms_doc]: https://book.kubebuilder.io/cronjob-tutorial/gvks.html -[kb_controller_doc]: https://book.kubebuilder.io/cronjob-tutorial/controller-overview.html -[kb_api_doc]: https://book.kubebuilder.io/cronjob-tutorial/new-api.html +[builder_godocs]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder +[conditionals]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[controller-runtime]: https://github.com/kubernetes-sigs/controller-runtime +[controller_godocs]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller +[controller_options]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#Options [controller_tools]: https://sigs.k8s.io/controller-tools -[doc-validation-schema]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema -[generating-crd]: https://book.kubebuilder.io/reference/generating-crd.html -[markers]: https://book.kubebuilder.io/reference/markers.html [crd-markers]: https://book.kubebuilder.io/reference/markers/crd-validation.html -[memcached_controller]: https://github.com/operator-framework/operator-sdk/blob/latest/testdata/go/v3/memcached-operator/controllers/memcached_controller.go -[builder_godocs]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder -[activate_modules]: https://github.com/golang/go/wiki/Modules#how-to-install-and-activate-module-support -[advanced-topics]: /docs/building-operators/golang/advanced-topics/ [create_a_webhook]: /docs/building-operators/golang/webhook -[status_marker]: https://book.kubebuilder.io/reference/generating-crd.html#status -[status_subresource]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource -[API-groups]:https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups -[legacy_CLI]:https://v0-19-x.sdk.operatorframework.io/docs/cli/ -[role-based-access-control]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap -[multigroup-kubebuilder-doc]: https://book.kubebuilder.io/migration/multi-group.html +[deploy-image-plugin-doc]: https://master.book.kubebuilder.io/plugins/deploy-image-plugin-v1-alpha.html [doc-bundle]:https://github.com/operator-framework/operator-registry/blob/v1.16.1/docs/design/operator-bundle.md#operator-bundle -[tutorial-bundle]:/docs/olm-integration/tutorial-bundle -[quickstart-bundle]:/docs/olm-integration/quickstart-bundle [doc-olm]:/docs/olm-integration/tutorial-bundle/#enabling-olm -[conditionals]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[doc-validation-schema]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema +[doc_client_api]:/docs/building-operators/golang/references/client/ +[enqueue_requests_from_map_func]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/handler#EnqueueRequestsFromMapFunc +[event_filtering]:/docs/building-operators/golang/references/event-filtering/ +[event_handler_godocs]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/handler#hdr-EventHandlers +[generating-crd]: https://book.kubebuilder.io/reference/generating-crd.html +[go_mod_wiki]: https://github.com/golang/go/wiki/Modules +[image-reg-config]:/docs/olm-integration/cli-overview#private-bundle-and-catalog-image-registries +[install-guide]:/docs/building-operators/golang/installation +[k8s-doc-deleting-cascade]: https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion +[k8s-doc-owner-ref]: https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/ +[kb-doc-gkvs]: https://book.kubebuilder.io/cronjob-tutorial/gvks.html +[kb_api_doc]: https://book.kubebuilder.io/cronjob-tutorial/new-api.html +[kb_controller_doc]: https://book.kubebuilder.io/cronjob-tutorial/controller-overview.html +[kubebuilder_entrypoint_doc]: https://book.kubebuilder.io/cronjob-tutorial/empty-main.html +[kubebuilder_layout_doc]:https://book.kubebuilder.io/cronjob-tutorial/basic-project.html [kubernetes-extend-api]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/ -[reconcile-godoc]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile -[rbac-k8s-doc]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +[legacy-quickstart-doc]:https://v0-19-x.sdk.operatorframework.io/docs/golang/legacy/quickstart/ +[legacy_CLI]:https://v0-19-x.sdk.operatorframework.io/docs/cli +[manager_go_doc]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#Manager +[markers]: https://book.kubebuilder.io/reference/markers.html +[memcached_controller]: https://github.com/operator-framework/operator-sdk/blob/latest/testdata/go/v3/memcached-operator/controllers/memcached_controller.go +[migration-guide]:/docs/building-operators/golang/migration +[multi-namespaced-cache-builder]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder +[multigroup-kubebuilder-doc]: https://book.kubebuilder.io/migration/multi-group.html [olm-integration]: /docs/olm-integration [openapi-validation]: /docs/building-operators/golang/references/openapi-validation -[controller-runtime]: https://github.com/kubernetes-sigs/controller-runtime -[kb-doc-gkvs]: https://book.kubebuilder.io/cronjob-tutorial/gvks.html +[operator_scope]:/docs/building-operators/golang/operator-scope/ +[quickstart-bundle]:/docs/olm-integration/quickstart-bundle +[rbac-k8s-doc]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ [rbac_markers]: https://book.kubebuilder.io/reference/markers/rbac.html -[k8s-doc-owner-ref]: https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/ -[k8s-doc-deleting-cascade]: https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion -[deploy-image-plugin-doc]: https://master.book.kubebuilder.io/plugins/deploy-image-plugin-v1-alpha.html +[reconcile-godoc]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile +[request-go-doc]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Request +[result_go_doc]: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Result +[role-based-access-control]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#iam-rolebinding-bootstrap +[status_marker]: https://book.kubebuilder.io/reference/generating-crd.html#status +[status_subresource]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource +[tutorial-bundle]:/docs/olm-integration/tutorial-bundle diff --git a/website/content/en/docs/cli/operator-sdk_scorecard.md b/website/content/en/docs/cli/operator-sdk_scorecard.md index 5b5e85ea0eb..eb4cc2e655e 100644 --- a/website/content/en/docs/cli/operator-sdk_scorecard.md +++ b/website/content/en/docs/cli/operator-sdk_scorecard.md @@ -28,9 +28,9 @@ operator-sdk scorecard [flags] -l, --selector string label selector to determine which tests are run -s, --service-account string Service account to use for tests (default "default") -x, --skip-cleanup Disable resource cleanup after tests are run - -b, --storage-image string Storage image to be used by the Scorecard pod (default "quay.io/operator-framework/scorecard-storage:latest") + -b, --storage-image string Storage image to be used by the Scorecard pod (default "quay.io/operator-framework/scorecard-storage@sha256:a3bfda71281393c7794cabdd39c563fb050d3020fd0b642ea164646bdd39a0e2") -t, --test-output string Test output directory. (default "test-output") - -u, --untar-image string Untar image to be used by the Scorecard pod (default "quay.io/operator-framework/scorecard-untar:latest") + -u, --untar-image string Untar image to be used by the Scorecard pod (default "quay.io/operator-framework/scorecard-untar@sha256:2e728c5e67a7f4dec0df157a322dd5671212e8ae60f69137463bd4fdfbff8747") -w, --wait-time duration seconds to wait for tests to complete. Example: 35s (default 30s) ``` diff --git a/website/content/en/docs/contribution-guidelines/developer-guide.md b/website/content/en/docs/contribution-guidelines/developer-guide.md index 70d624587ba..cb47da034bc 100644 --- a/website/content/en/docs/contribution-guidelines/developer-guide.md +++ b/website/content/en/docs/contribution-guidelines/developer-guide.md @@ -9,7 +9,7 @@ weight: 1 ### Prerequisites - [git][git-tool] -- [go][go-tool] version 1.18 +- [go][go-tool] version 1.19 ### Download Operator SDK diff --git a/website/content/en/docs/contribution-guidelines/releasing.md b/website/content/en/docs/contribution-guidelines/releasing.md index 6fc531210ac..d99123b9334 100644 --- a/website/content/en/docs/contribution-guidelines/releasing.md +++ b/website/content/en/docs/contribution-guidelines/releasing.md @@ -7,8 +7,9 @@ weight: 4 These steps describe how to conduct a release of the operator-sdk repo using example versions. Replace these versions with the current and new version you are releasing, respectively. -Table of contents: +## Table of Contents: +- [Prerequisites](#prerequisites) - [Major and minor releases](#major-and-minor-releases) - [Patch releases](#patch-releases) - [`scorecard-test-kuttl` image releases](#scorecard-test-kuttl-image-releases) @@ -16,172 +17,200 @@ Table of contents: ## Prerequisites -- [`git`](https://git-scm.com/downloads) 2.2+ -- [`make`](https://www.gnu.org/software/make/) 4.2+ -- [`sed`](https://www.gnu.org/software/sed/) 4.3+ +The following tools and permissions are needed to conduct a release of the operator-sdk repo. -##### MacOS users +### Tools -Install GNU `sed` and `make` which may not be by default: +- [`git`](https://git-scm.com/downloads): version 2.2+ +- [`make`](https://www.gnu.org/software/make/): version 4.2+ +- [`sed`](https://www.gnu.org/software/sed/): version 4.3+ -```sh -brew install gnu-sed make -``` +### Permissions -Verify that the version of `make` is higher than 4.2 using command `make --version`. +- Must be a [Netlify admin][doc-owners] +- Must be an admin on the [operator-sdk repo](https://github.com/operator-framework/operator-sdk/settings/access) -Add the gnubin directory to your PATH from your `~/.bashrc`: +### Setting Up Tools for MacOS Users -```sh -echo 'export PATH="/usr/local/opt/make/libexec/gnubin:$PATH"' >> ~/.bashrc -``` +To install the prerequisite tools on MacOS, complete the following steps: -Verify that the version of `sed` is higher than 4.3 using command `gnu-sed --version`. +1. Install GNU `sed` and `make`, which may not be installed by default: -Add the `gnubin` directory to your PATH from your `~/.bashrc`: + - ```sh + brew install gnu-sed make + ``` -```sh -echo 'export PATH="/usr/local/opt/gnu-sed/libexec/gnubin:$PATH"' >> ~/.bashrc -``` - -## Major and Minor releases - -We will use the `v1.3.0` release version in this example. +1. Verify that the version of `make` is higher than 4.2 using command `make --version`. -### Before starting +1. Add the gnubin directory for `make` to your PATH from your `~/.bashrc` +to allow you to use `gmake` as `make`: -1. **Before creating a new branch**, a [Netlify admin][doc-owners] must add `v1.13.x` - to [Branch - Deploys](https://app.netlify.com/sites/operator-sdk/settings/deploys#branches). - This will watch for this branch when there are changes on Github - (creating the branch, or adding a commit). -1. **Before creating a new branch**, kick off a new build of the [ansible-operator-base - image](https://quay.io/repository/operator-framework/ansible-operator-base) - by running the - [deploy-manual](https://github.com/operator-framework/operator-sdk/actions/workflows/deploy-manual.yml) - GitHub action. After the image is built, check the security scan - results under `Child Manifests` and merge the autogenerated PRs. -1. A release branch must be created. If you have the proper permissions, - you can do this by running the following, assuming the upstream SDK - is the `upstream` remote repo: - ```sh - git checkout master - git pull - git checkout -b v1.3.x - git push -u upstream v1.3.x - ``` -1. Make sure that the list of supported OLM versions stated in the [Overview][overview] section of SDK docs is updated. If a new version of OLM needs to be officially supported, follow the steps in [updating OLM bindata](#updating-olm-bindata) section. - -### 0. Lock the `master` branch + - ```sh + echo 'export PATH="/usr/local/opt/make/libexec/gnubin:$PATH"' >> ~/.bashrc + ``` -1. Lock down the `master` branch to prevent further commits before the release completes: - 1. Go to `Settings -> Branches` in the SDK repo. - 1. Under `Branch protection rules`, click `Edit` on the `master` branch rule. - 1. In section `Protect matching branches` of the `Rule settings` box, increase the number of required approving reviewers to 6. +1. Verify that the version of `sed` is higher than 4.3 using command `gnu-sed --version`. -### 1. Create and push a release commit +1. Add the gnubin directory for `gnu-sed` to your PATH from your `~/.bashrc` +to allow you to use `gnu-sed` as `sed`: -Create a new branch to push the release commit: + - ```sh + echo 'export PATH="/usr/local/opt/gnu-sed/libexec/gnubin:$PATH"' >> ~/.bashrc + ``` -```sh -export RELEASE_VERSION=v1.3.0 -git checkout master -git pull -git checkout -b release-$RELEASE_VERSION -``` +## Major and Minor Releases -Update the top-level [Makefile] variable `IMAGE_VERSION` -to the upcoming release tag `v1.3.0`. This variable ensures sample projects have been tagged -correctly prior to the release commit. - ```sh - sed -i -E 's/(IMAGE_VERSION = ).+/\1v1\.3\.0/g' Makefile - ``` - For MAC users command will be little different. - ```sh - gsed -i -E 's/(IMAGE_VERSION = ).+/\1v1\.3\.0/g' Makefile - ``` -Run the pre-release `make` target: +We will use the `v1.3.0` release version in this example. -```sh -make prerelease -``` +**Be sure to substitute +the version you are releasing into the provided commands.** -The following changes should be present: +To perform a major or minor release, you must perform the following actions: -- `Makefile`: IMAGE_VERSION should be modified to the upcoming release tag. (This variable ensures sampleprojects have been tagged correctly prior to the release commit.) -- `changelog/generated/v1.3.0.md`: commit changes (created by changelog generation). -- `changelog/fragments/*`: commit deleted fragment files (deleted by changelog generation). -- `website/content/en/docs/upgrading-sdk-version/v1.3.0.md`: commit changes (created by changelog generation). -- `website/config.toml`: commit changes (modified by release script). -- `testdata/*`: Generated sample code. +- Ensure a new Netlify branch is created and a new ansible-operator-base image is built +- Create a release branch and lock down the master branch +- Create and merge a PR for the release branch +- Unlock the master branch and push a release tag to it +- Perform some clean up actions and announce the new release to the community -Commit these changes and push to your remote (assuming your remote is named `origin`): +### Procedure -```sh -git add Makefile changelog website testdata -git commit -m "Release $RELEASE_VERSION" -git push -u origin release-$RELEASE_VERSION -``` +1. **Before creating a new release branch**, it is imperative to perform the following initial setup steps: + 1. In the [Branches and deploy contexts](https://app.netlify.com/sites/operator-sdk/settings/deploys#branches) + pane in Netlify, click into the Additional branches list section and add `v1.13.x`. + - This will watch the branch when there are changes on Github (creating the branch, or adding a commit). + - NOTE: You must be a [Netlify admin][doc-owners] in order to edit the branches list. + 1. Kick off a new build of the [ansible-operator-base + image](https://quay.io/repository/operator-framework/ansible-operator-base) + by running the + [deploy-manual](https://github.com/operator-framework/operator-sdk/actions/workflows/deploy-manual.yml) + GitHub action on master. + 1. After the image is built, find the image in [quay](https://quay.io/repository/operator-framework/ansible-operator-base?tab=tags) and check the security scan results under `Child Manifests` to see if there are any errors that need to be addressed. + 1. Once the security scan results have been checked and addressed as necessary, merge the autogenerated PRs created by the Github action. +1. Create a release branch by running the following, assuming the upstream SDK repo is the `upstream` remote on your machine: + + - ```sh + git checkout master + git fetch upstream master + git pull master + git checkout -b v1.3.x + git push upstream v1.3.x + ``` + +1. Make sure that the list of supported OLM versions is up to date: + 1. Identify if a new version of OLM needs to be officially supported by ensuring that the latest three releases listed on the [OLM release page](https://github.com/operator-framework/operator-lifecycle-manager/releases) are all listed as supported in the [Overview][overview] section of the SDK docs. + 1. If a new version of OLM needs to be added and an old version removed, follow the steps in the [updating OLM bindata](#updating-olm-bindata) section before moving onto the next step. -### 2. Create and merge a new PR +1. Lock down the `master` branch to prevent further commits before the release completes: + 1. Go to `Settings -> Branches` in the SDK repo. + 1. Under `Branch protection rules`, click `Edit` on the `master` branch rule. + 1. In section `Protect matching branches` of the `Rule settings` box, increase the number of required approving reviewers to 6. + 1. Scroll down to save your changes to protect the `master` branch. + +1. Create and push a release commit + 1. Create a new branch to push the release commit: + + - ```sh + export RELEASE_VERSION=v1.3.0 + git checkout master + git pull master + git checkout -b release-$RELEASE_VERSION + ``` + + 1. Update the top-level [Makefile] variable `IMAGE_VERSION` +to the upcoming release tag `v1.3.0`. This variable ensures sample projects have been tagged +correctly prior to the release commit. -Create and merge a new PR for the commit created in step 1. You can force-merge your PR to the locked-down `master` + - ```sh + sed -i -E 's/(IMAGE_VERSION = ).+/\1v1\.3\.0/g' Makefile + ``` + + If this command fails on MacOS with a warning "sed is not found", follow the step 5 in the [Setting Up Tools for MacOS Users](#setting-up-tools-for-macos-users) section to map `gsed` to `sed`. + 1. Run the pre-release `make` target: + + - ```sh + make prerelease + ``` + + The following changes should be present: + - `Makefile`: IMAGE_VERSION should be modified to the upcoming release tag. (This variable ensures sampleprojects have been tagged correctly prior to the release commit.) + - `changelog/generated/v1.3.0.md`: commit changes (created by changelog generation). + - `changelog/fragments/*`: commit deleted fragment files (deleted by changelog generation). + - `website/content/en/docs/upgrading-sdk-version/v1.3.0.md`: commit changes (created by changelog generation). + - `website/config.toml`: commit changes (modified by release script). + - `testdata/*`: Generated sample code. + 1. Commit these changes and push to your remote (assuming your remote is named `origin`): + + - ```sh + git add Makefile changelog website testdata + git commit -sm "Release $RELEASE_VERSION" + git push origin release-$RELEASE_VERSION + ``` + +1. Create and merge a new PR for the release-v1.3.0 branch created in step 5.4. + - You can force-merge your PR to the locked-down `master` if you have admin access to the operator-sdk repo, or ask an administrator to do so. - -### 3. Unlock the `master` branch - -Unlock the branch by changing the number of required approving reviewers in the `master` branch rule back to 1. - -### 4. Create and push a release tag on `master` - -Refresh your local `master` branch, tag the release PR commit, and push to the main operator-sdk repo -(assumes the remote's name is `upstream`): - -```sh -git checkout master -git pull -make tag -git push upstream refs/tags/$RELEASE_VERSION -``` - -### 5. Fast-forward the `latest` and release branches - -The `latest` branch points to the latest release tag to keep the main website subdomain up-to-date. -Run the following commands to do so: - -```sh -git checkout latest -git reset --hard refs/tags/$RELEASE_VERSION -git push -f upstream latest -``` - -Similarly, to update the release branch, run: - -```sh -git checkout v1.3.x -git reset --hard refs/tags/$RELEASE_VERSION -git push -f upstream v1.3.x -``` - -### 6. Post release steps - -- Publish the new Netlify subdomain. Assuming that the Netlify prestep - was done before the new branch was created, a new [branch - option](https://app.netlify.com/sites/operator-sdk/settings/domain#branch-subdomains) - should be visible to Netlify Admins and can be mapped to a subdomain. Please test that this subdomain - works before announcing. -- Make an [operator-framework Google Group][of-ggroup] post. -- Post to Kubernetes slack in #kubernetes-operators and #operator-sdk-dev. -- In the [GitHub milestone][gh-milestones], bump any open issues to the following release. -- Update the newly unsupported branch (1.1.x in this example) the documentation to mark it as archived. - This is done by ensuring that the following is in `website/config.toml`. This PR does not need to be - merged before the release is complete. - -```toml -version = "FIXME_1.1.x" -archived_version = true -url_latest_version = "https://sdk.operatorframework.io" -``` + - Note that the docs PR check will fail because the site isn't published yet; the PR can be merged anyways. + +1. Unlock the `master` branch + 1. Go to `Settings -> Branches` in the SDK repo. + 1. Under `Branch protection rules`, click `Edit` on the `master` branch rule. + 1. In section `Protect matching branches` of the `Rule settings` box, reduce the number of required approving reviewers back to 1. + +1. Create and push a release tag on `master` + 1. Refresh your local `master` branch, tag the release PR commit, and push to the main operator-sdk repo (assumes the remote's name is `upstream`): + + - ```sh + git checkout master + git pull master + make tag + git push upstream refs/tags/$RELEASE_VERSION + ``` + +1. Fast-forward the `latest` and release branches + 1. The `latest` branch points to the latest release tag to keep the main website subdomain up-to-date. + Run the following commands to do so: + + - ```sh + git checkout latest + git reset --hard refs/tags/$RELEASE_VERSION + git push -f upstream latest + ``` + + 1. Similarly, to update the release branch, run: + + - ```sh + git checkout v1.3.x + git reset --hard refs/tags/$RELEASE_VERSION + git push -f upstream v1.3.x + ``` + +1. Post release steps + 1. Publish the new Netlify subdomain for version-specific docs. + 1. Assuming that the Netlify prestep was done before the new branch was created, a new [branch option](https://app.netlify.com/sites/operator-sdk/settings/domain#branch-subdomains) + should be visible to Netlify Admins under Domain management > Branch subdomains and can be mapped to a subdomain. (Note: you may have to scroll down to the bottom of the Branch subdomains section to find the branch that is ready to be mapped.) + 1. Please test that this subdomain works by going to the link in a browser. You can use the link in the second column to jump to the docs page for this release. + 1. Make an [operator-framework Google Group][of-ggroup] post. + - You can use [this post](https://groups.google.com/g/operator-framework/c/2fBHHLQOKs8/m/VAd_zd_IAwAJ) as an example. + 1. Post to Kubernetes slack in #kubernetes-operators and #operator-sdk-dev. + - You can use [this post](https://kubernetes.slack.com/archives/C017UU45SHL/p1679082546359389) as an example. + 1. Clean up the GitHub milestone + 1. In the [GitHub milestone][gh-milestones], bump any open issues to the following release. + 1. Close out the milestone. + 1. Update the newly unsupported branch documentation (1.1.x in this example)to mark it as archived. (Note that this step does not need to be merged before the release is complete.) + 1. Checkout the newly unsupported release branch: + + - ```sh + git checkout v1.1.x + ``` + + 1. Modify the `website/config.toml` file on lines 88-90 to be the following: + + - ```toml + version = "v1.1" + archived_version = true + url_latest_version = "https://sdk.operatorframework.io" + ``` ## Patch releases @@ -196,14 +225,14 @@ We will use the `v1.3.1` release version in this example. GitHub action. After the image is built, check the security scan results under `Child Manifests`. -#### 0. Lock down release branches on GitHub +### 0. Lock down release branches on GitHub 1. Lock down the `v1.3.x` branch to prevent further commits before the release completes: - 1. Go to `Settings -> Branches` in the SDK repo. - 1. Under `Branch protection rules`, click `Edit` on the `v*.` branch rule. - 1. In section `Protect matching branches` of the `Rule settings` box, increase the number of required approving reviewers to `6`. + 1. Go to `Settings -> Branches` in the SDK repo. + 1. Under `Branch protection rules`, click `Edit` on the `v*.` branch rule. + 1. In section `Protect matching branches` of the `Rule settings` box, increase the number of required approving reviewers to `6`. -#### 1. Branch +### 1. Branch Create a new branch from the release branch (v1.3.x in this example). This branch should already exist prior to cutting a patch release. @@ -214,8 +243,7 @@ git pull git checkout -b release-$RELEASE_VERSION ``` - -#### 2. Prepare the release commit +### 2. Prepare the release commit Using the version for your release as the IMAGE_VERSION, execute the following commands from the root of the project. @@ -242,13 +270,13 @@ git commit -sm "Release $RELEASE_VERSION" git push -u origin release-$RELEASE_VERSION ``` -#### 3. Create and merge Pull Request +### 3. Create and merge Pull Request - Create a pull request against the `v1.3.x` branch. - Once approving review is given, merge. You may have to unlock the branch by setting "required approving reviewers" to back to `1`. (See step 0). -#### 4. Create a release tag +### 4. Create a release tag Pull down `v1.3.x` and tag it. @@ -281,7 +309,8 @@ git push -f upstream latest **Note** In case there are non-transient errors while building the release job, you must: -1. Revert the release PR. To do so, create a PR which reverts step [2](#2-create-and-merge-a-new-pr). + +1. Revert the release PR. To do so, create a PR which reverts the patch release PR created in step [3](#3-create-and-merge-pull-request). 2. Fix what broke in the release branch. 3. Re-run the release with an incremented minor version to avoid Go module errors (ex. if v1.3.1 broke, then re-run the release as v1.3.2). Patch versions are cheap so this is not a big deal. @@ -305,19 +334,18 @@ git push upstream refs/tags/$RELEASE_VERSION The [`deploy/image-scorecard-test-kuttl`](https://github.com/operator-framework/operator-sdk/actions/workflows/deploy.yml) Action workflow will build and push this image. +## Helpful Tips and Information -## Helpful tips and information - -### Binaries and signatures +### Binaries and Signatures Binaries will be signed using our CI system's GPG key. Both binary and signature will be uploaded to the release. -### Release branches +### Release Branches Each minor release has a corresponding release branch of the form `vX.Y.x`, where `X` and `Y` are the major and minor release version numbers and the `x` is literal. This branch accepts bug fixes according to our [backport policy][backports]. -##### Cherry-picking +### Cherry-picking Once a minor release is complete, bug fixes can be merged into the release branch for the next patch release. Fixes can be added automatically by posting a `/cherry-pick v1.3.x` comment in the `master` PR, or manually by running: @@ -331,25 +359,39 @@ git push upstream cherrypick/some-bug Create and merge a PR from your branch to `v1.3.x`. -### GitHub release information +### GitHub Release Information GitHub releases live under the [`Releases` tab][release-page] in the operator-sdk repo. -### Updating OLM bindata +### Updating OLM Bindata Prior to an Operator SDK release, add bindata (if required) for a new OLM version by following these steps: 1. Add the new version to the [`OLM_VERSIONS`][olm_version] variable in the Makefile. -2. Remove the *lowest* version from that variable, as `operator-sdk` only supports 3 versions at a time. +2. Remove the _lowest_ version from that variable, as `operator-sdk` only supports 3 versions at a time. 3. Run `make bindata`. -4. Update the list of supported OLM versions stated in the [`Overview`][overview] section of SDK documentation is updated. +4. Check that all files were correctly updated by running this script from the root directory of the repository: + + - ```sh + ./hack/check-olm.sh + ``` + + If the check shows that files were missed by the make target, manually edit them to add the new version and remove the obsolete version. +5. Check that the list of supported OLM versions stated in the [`Overview`][overview] section of SDK documentation is updated. +6. Add the changed files to ensure that they will be committed as part of the release commit: + + - ```sh + git add -u + ``` + +### Patch Releases in Parallel -### Patch releases in parallel: +The following should be considered when doing parallel patch releases: - - Releasing in order is nice but not worth the inconvenience. Release order affects the order on GitHub releases, and which +- Releasing in order is nice but not worth the inconvenience. Release order affects the order on GitHub releases, and which is labeled "latest release". - - Do not unlock v.* branches while other releases are in progress. Instead, have an admin do the merges. - - Release announcements should be consolidated. +- Do not unlock v.* branches while other releases are in progress. Instead, have an admin do the merges. +- Release announcements should be consolidated. [doc-owners]: https://github.com/operator-framework/operator-sdk/blob/master/OWNERS [release-page]:https://github.com/operator-framework/operator-sdk/releases @@ -358,4 +400,4 @@ Prior to an Operator SDK release, add bindata (if required) for a new OLM versio [gh-milestones]:https://github.com/operator-framework/operator-sdk/milestones [Makefile]:https://github.com/operator-framework/operator-sdk/blob/master/Makefile [olm_version]:https://github.com/operator-framework/operator-sdk/blob/6002c70fe770cdaba9ba99da72685e0e7b6b69e8/Makefile#L45 -[overview]: /docs/overview/#olm-version-compatibility +[overview]: https://github.com/operator-framework/operator-sdk/blob/master/website/content/en/docs/overview/_index.md#olm-version-compatibility diff --git a/website/content/en/docs/installation/_index.md b/website/content/en/docs/installation/_index.md index 9c9c7b9d30e..e0ce2ca65b6 100644 --- a/website/content/en/docs/installation/_index.md +++ b/website/content/en/docs/installation/_index.md @@ -36,7 +36,7 @@ export OS=$(uname | awk '{print tolower($0)}') Download the binary for your platform: ```sh -export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.28.0 +export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.29.0 curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} ``` diff --git a/website/content/en/docs/overview/project-layout.md b/website/content/en/docs/overview/project-layout.md index a2f7e319190..aa0949edc4e 100644 --- a/website/content/en/docs/overview/project-layout.md +++ b/website/content/en/docs/overview/project-layout.md @@ -84,7 +84,7 @@ Now, let's look at the files and directories specific to Helm-based operators. [olm-manifests]: https://github.com/operator-framework/operator-registry/tree/v1.5.3#manifest-format [olm-metadata]: https://github.com/operator-framework/operator-registry/blob/v1.16.1/docs/design/operator-bundle.md#bundle-manifest-format [bundle]:https://github.com/operator-framework/operator-registry/blob/v1.16.1/docs/design/operator-bundle.md -[molecule]: https://molecule.readthedocs.io/en/latest/ +[molecule]: https://molecule.readthedocs.io/ [ansible-watches]: /docs/building-operators/ansible/reference/watches [ansible-test-guide]: /docs/building-operators/ansible/testing-guide [helm-watches]: /docs/building-operators/helm/reference/watches diff --git a/website/content/en/docs/upgrading-sdk-version/v1.29.0.md b/website/content/en/docs/upgrading-sdk-version/v1.29.0.md new file mode 100644 index 00000000000..47a7bb2902a --- /dev/null +++ b/website/content/en/docs/upgrading-sdk-version/v1.29.0.md @@ -0,0 +1,6 @@ +--- +title: v1.29.0 +weight: 998971000 +--- + +There are no migrations for this release! 🎉 diff --git a/website/content/en/docs/upgrading-sdk-version/version-upgrade-guide.md b/website/content/en/docs/upgrading-sdk-version/version-upgrade-guide.md index 27bcd4f9b20..8fe2baa5896 100644 --- a/website/content/en/docs/upgrading-sdk-version/version-upgrade-guide.md +++ b/website/content/en/docs/upgrading-sdk-version/version-upgrade-guide.md @@ -1288,7 +1288,7 @@ which ./bin/openapi-gen > /dev/null || go build -o ./bin/openapi-gen k8s.io/kube The Molecule version for Ansible based-operators was upgraded from `2.22` to `3.0.2`. The following changes are required in the default scaffold files. - Remove the `scenario.name` from `molecule.yaml` and then, ensure that any condition with will look for the folder name which determines the scenario name from now on -- Replace the lint with newer syntax from [documentation](https://molecule.readthedocs.io/en/latest/contributing/#linting). See: +- Replace the lint with newer syntax from [documentation](https://molecule.readthedocs.io/contributing/#linting). See: Replace: